Lines Matching refs:ci

79 	struct ceph_inode_info *ci = ceph_inode(inode);  in ceph_get_snapdir()  local
89 ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */ in ceph_get_snapdir()
90 ci->i_rbytes = 0; in ceph_get_snapdir()
115 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci, in __get_or_create_frag() argument
123 p = &ci->i_fragtree.rb_node; in __get_or_create_frag()
146 rb_insert_color(&frag->node, &ci->i_fragtree); in __get_or_create_frag()
149 ceph_vinop(&ci->vfs_inode), f); in __get_or_create_frag()
156 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f) in __ceph_find_frag() argument
158 struct rb_node *n = ci->i_fragtree.rb_node; in __ceph_find_frag()
179 static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v, in __ceph_choose_frag() argument
192 frag = __ceph_find_frag(ci, t); in __ceph_choose_frag()
221 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v, in ceph_choose_frag() argument
225 mutex_lock(&ci->i_fragtree_mutex); in ceph_choose_frag()
226 ret = __ceph_choose_frag(ci, v, pfrag, found); in ceph_choose_frag()
227 mutex_unlock(&ci->i_fragtree_mutex); in ceph_choose_frag()
239 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_fill_dirfrag() local
248 spin_lock(&ci->i_ceph_lock); in ceph_fill_dirfrag()
249 if (ci->i_auth_cap) in ceph_fill_dirfrag()
250 diri_auth = ci->i_auth_cap->mds; in ceph_fill_dirfrag()
251 spin_unlock(&ci->i_ceph_lock); in ceph_fill_dirfrag()
256 mutex_lock(&ci->i_fragtree_mutex); in ceph_fill_dirfrag()
259 frag = __ceph_find_frag(ci, id); in ceph_fill_dirfrag()
266 rb_erase(&frag->node, &ci->i_fragtree); in ceph_fill_dirfrag()
280 frag = __get_or_create_frag(ci, id); in ceph_fill_dirfrag()
298 mutex_unlock(&ci->i_fragtree_mutex); in ceph_fill_dirfrag()
323 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_fill_fragtree() local
330 mutex_lock(&ci->i_fragtree_mutex); in ceph_fill_fragtree()
332 if (nsplits != ci->i_fragtree_nsplits) { in ceph_fill_fragtree()
337 if (!__ceph_find_frag(ci, id)) in ceph_fill_fragtree()
339 } else if (!RB_EMPTY_ROOT(&ci->i_fragtree)) { in ceph_fill_fragtree()
340 rb_node = rb_first(&ci->i_fragtree); in ceph_fill_fragtree()
347 if (id != __ceph_choose_frag(ci, id, NULL, NULL)) in ceph_fill_fragtree()
359 rb_node = rb_first(&ci->i_fragtree); in ceph_fill_fragtree()
383 rb_erase(&frag->node, &ci->i_fragtree); in ceph_fill_fragtree()
385 ci->i_fragtree_nsplits--; in ceph_fill_fragtree()
391 frag = __get_or_create_frag(ci, id); in ceph_fill_fragtree()
396 ci->i_fragtree_nsplits++; in ceph_fill_fragtree()
407 rb_erase(&frag->node, &ci->i_fragtree); in ceph_fill_fragtree()
409 ci->i_fragtree_nsplits--; in ceph_fill_fragtree()
414 mutex_unlock(&ci->i_fragtree_mutex); in ceph_fill_fragtree()
423 struct ceph_inode_info *ci; in ceph_alloc_inode() local
426 ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS); in ceph_alloc_inode()
427 if (!ci) in ceph_alloc_inode()
430 dout("alloc_inode %p\n", &ci->vfs_inode); in ceph_alloc_inode()
432 spin_lock_init(&ci->i_ceph_lock); in ceph_alloc_inode()
434 ci->i_version = 0; in ceph_alloc_inode()
435 ci->i_inline_version = 0; in ceph_alloc_inode()
436 ci->i_time_warp_seq = 0; in ceph_alloc_inode()
437 ci->i_ceph_flags = 0; in ceph_alloc_inode()
438 atomic64_set(&ci->i_ordered_count, 1); in ceph_alloc_inode()
439 atomic64_set(&ci->i_release_count, 1); in ceph_alloc_inode()
440 atomic64_set(&ci->i_complete_seq[0], 0); in ceph_alloc_inode()
441 atomic64_set(&ci->i_complete_seq[1], 0); in ceph_alloc_inode()
442 ci->i_symlink = NULL; in ceph_alloc_inode()
444 ci->i_max_bytes = 0; in ceph_alloc_inode()
445 ci->i_max_files = 0; in ceph_alloc_inode()
447 memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout)); in ceph_alloc_inode()
448 RCU_INIT_POINTER(ci->i_layout.pool_ns, NULL); in ceph_alloc_inode()
450 ci->i_fragtree = RB_ROOT; in ceph_alloc_inode()
451 mutex_init(&ci->i_fragtree_mutex); in ceph_alloc_inode()
453 ci->i_xattrs.blob = NULL; in ceph_alloc_inode()
454 ci->i_xattrs.prealloc_blob = NULL; in ceph_alloc_inode()
455 ci->i_xattrs.dirty = false; in ceph_alloc_inode()
456 ci->i_xattrs.index = RB_ROOT; in ceph_alloc_inode()
457 ci->i_xattrs.count = 0; in ceph_alloc_inode()
458 ci->i_xattrs.names_size = 0; in ceph_alloc_inode()
459 ci->i_xattrs.vals_size = 0; in ceph_alloc_inode()
460 ci->i_xattrs.version = 0; in ceph_alloc_inode()
461 ci->i_xattrs.index_version = 0; in ceph_alloc_inode()
463 ci->i_caps = RB_ROOT; in ceph_alloc_inode()
464 ci->i_auth_cap = NULL; in ceph_alloc_inode()
465 ci->i_dirty_caps = 0; in ceph_alloc_inode()
466 ci->i_flushing_caps = 0; in ceph_alloc_inode()
467 INIT_LIST_HEAD(&ci->i_dirty_item); in ceph_alloc_inode()
468 INIT_LIST_HEAD(&ci->i_flushing_item); in ceph_alloc_inode()
469 ci->i_prealloc_cap_flush = NULL; in ceph_alloc_inode()
470 INIT_LIST_HEAD(&ci->i_cap_flush_list); in ceph_alloc_inode()
471 init_waitqueue_head(&ci->i_cap_wq); in ceph_alloc_inode()
472 ci->i_hold_caps_min = 0; in ceph_alloc_inode()
473 ci->i_hold_caps_max = 0; in ceph_alloc_inode()
474 INIT_LIST_HEAD(&ci->i_cap_delay_list); in ceph_alloc_inode()
475 INIT_LIST_HEAD(&ci->i_cap_snaps); in ceph_alloc_inode()
476 ci->i_head_snapc = NULL; in ceph_alloc_inode()
477 ci->i_snap_caps = 0; in ceph_alloc_inode()
480 ci->i_nr_by_mode[i] = 0; in ceph_alloc_inode()
482 mutex_init(&ci->i_truncate_mutex); in ceph_alloc_inode()
483 ci->i_truncate_seq = 0; in ceph_alloc_inode()
484 ci->i_truncate_size = 0; in ceph_alloc_inode()
485 ci->i_truncate_pending = 0; in ceph_alloc_inode()
487 ci->i_max_size = 0; in ceph_alloc_inode()
488 ci->i_reported_size = 0; in ceph_alloc_inode()
489 ci->i_wanted_max_size = 0; in ceph_alloc_inode()
490 ci->i_requested_max_size = 0; in ceph_alloc_inode()
492 ci->i_pin_ref = 0; in ceph_alloc_inode()
493 ci->i_rd_ref = 0; in ceph_alloc_inode()
494 ci->i_rdcache_ref = 0; in ceph_alloc_inode()
495 ci->i_wr_ref = 0; in ceph_alloc_inode()
496 ci->i_wb_ref = 0; in ceph_alloc_inode()
497 ci->i_wrbuffer_ref = 0; in ceph_alloc_inode()
498 ci->i_wrbuffer_ref_head = 0; in ceph_alloc_inode()
499 atomic_set(&ci->i_filelock_ref, 0); in ceph_alloc_inode()
500 atomic_set(&ci->i_shared_gen, 0); in ceph_alloc_inode()
501 ci->i_rdcache_gen = 0; in ceph_alloc_inode()
502 ci->i_rdcache_revoking = 0; in ceph_alloc_inode()
504 INIT_LIST_HEAD(&ci->i_unsafe_dirops); in ceph_alloc_inode()
505 INIT_LIST_HEAD(&ci->i_unsafe_iops); in ceph_alloc_inode()
506 spin_lock_init(&ci->i_unsafe_lock); in ceph_alloc_inode()
508 ci->i_snap_realm = NULL; in ceph_alloc_inode()
509 INIT_LIST_HEAD(&ci->i_snap_realm_item); in ceph_alloc_inode()
510 INIT_LIST_HEAD(&ci->i_snap_flush_item); in ceph_alloc_inode()
512 INIT_WORK(&ci->i_wb_work, ceph_writeback_work); in ceph_alloc_inode()
513 INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work); in ceph_alloc_inode()
515 INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work); in ceph_alloc_inode()
517 ceph_fscache_inode_init(ci); in ceph_alloc_inode()
519 return &ci->vfs_inode; in ceph_alloc_inode()
525 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_i_callback() local
527 kmem_cache_free(ceph_inode_cachep, ci); in ceph_i_callback()
532 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_destroy_inode() local
538 ceph_fscache_unregister_inode_cookie(ci); in ceph_destroy_inode()
542 if (__ceph_has_any_quota(ci)) in ceph_destroy_inode()
549 if (ci->i_snap_realm) { in ceph_destroy_inode()
551 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; in ceph_destroy_inode()
552 struct ceph_snap_realm *realm = ci->i_snap_realm; in ceph_destroy_inode()
556 list_del_init(&ci->i_snap_realm_item); in ceph_destroy_inode()
557 ci->i_snap_realm = NULL; in ceph_destroy_inode()
558 if (realm->ino == ci->i_vino.ino) in ceph_destroy_inode()
564 kfree(ci->i_symlink); in ceph_destroy_inode()
565 while ((n = rb_first(&ci->i_fragtree)) != NULL) { in ceph_destroy_inode()
567 rb_erase(n, &ci->i_fragtree); in ceph_destroy_inode()
570 ci->i_fragtree_nsplits = 0; in ceph_destroy_inode()
572 __ceph_destroy_xattrs(ci); in ceph_destroy_inode()
573 if (ci->i_xattrs.blob) in ceph_destroy_inode()
574 ceph_buffer_put(ci->i_xattrs.blob); in ceph_destroy_inode()
575 if (ci->i_xattrs.prealloc_blob) in ceph_destroy_inode()
576 ceph_buffer_put(ci->i_xattrs.prealloc_blob); in ceph_destroy_inode()
578 ceph_put_string(rcu_dereference_raw(ci->i_layout.pool_ns)); in ceph_destroy_inode()
609 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_fill_file_size() local
612 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 || in ceph_fill_file_size()
613 (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) { in ceph_fill_file_size()
621 ci->i_reported_size = size; in ceph_fill_file_size()
622 if (truncate_seq != ci->i_truncate_seq) { in ceph_fill_file_size()
624 ci->i_truncate_seq, truncate_seq); in ceph_fill_file_size()
625 ci->i_truncate_seq = truncate_seq; in ceph_fill_file_size()
641 __ceph_caps_file_wanted(ci)) { in ceph_fill_file_size()
642 ci->i_truncate_pending++; in ceph_fill_file_size()
647 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 && in ceph_fill_file_size()
648 ci->i_truncate_size != truncate_size) { in ceph_fill_file_size()
649 dout("truncate_size %lld -> %llu\n", ci->i_truncate_size, in ceph_fill_file_size()
651 ci->i_truncate_size = truncate_size; in ceph_fill_file_size()
664 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_fill_file_time() local
672 if (ci->i_version == 0 || in ceph_fill_file_time()
679 if (ci->i_version == 0 || in ceph_fill_file_time()
680 ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) { in ceph_fill_file_time()
686 ci->i_time_warp_seq, (int)time_warp_seq); in ceph_fill_file_time()
690 ci->i_time_warp_seq = time_warp_seq; in ceph_fill_file_time()
691 } else if (time_warp_seq == ci->i_time_warp_seq) { in ceph_fill_file_time()
714 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) { in ceph_fill_file_time()
718 ci->i_time_warp_seq = time_warp_seq; in ceph_fill_file_time()
725 inode, time_warp_seq, ci->i_time_warp_seq); in ceph_fill_file_time()
741 struct ceph_inode_info *ci = ceph_inode(inode); in fill_inode() local
755 ci->i_version); in fill_inode()
779 spin_lock(&ci->i_ceph_lock); in fill_inode()
792 if (ci->i_version == 0 || in fill_inode()
794 le64_to_cpu(info->version) > (ci->i_version & ~1))) in fill_inode()
797 __ceph_caps_issued(ci, &issued); in fill_inode()
798 issued |= __ceph_caps_dirty(ci); in fill_inode()
805 __ceph_update_quota(ci, iinfo->max_bytes, iinfo->max_files); in fill_inode()
832 ci->i_files = le64_to_cpu(info->files); in fill_inode()
833 ci->i_subdirs = le64_to_cpu(info->subdirs); in fill_inode()
838 s64 old_pool = ci->i_layout.pool_id; in fill_inode()
841 ceph_file_layout_from_legacy(&ci->i_layout, &info->layout); in fill_inode()
842 old_ns = rcu_dereference_protected(ci->i_layout.pool_ns, in fill_inode()
843 lockdep_is_held(&ci->i_ceph_lock)); in fill_inode()
844 rcu_assign_pointer(ci->i_layout.pool_ns, pool_ns); in fill_inode()
846 if (ci->i_layout.pool_id != old_pool || pool_ns != old_ns) in fill_inode()
847 ci->i_ceph_flags &= ~CEPH_I_POOL_PERM; in fill_inode()
857 ci->i_max_size != le64_to_cpu(info->max_size)) { in fill_inode()
858 dout("max_size %lld -> %llu\n", ci->i_max_size, in fill_inode()
860 ci->i_max_size = le64_to_cpu(info->max_size); in fill_inode()
868 ci->i_dir_layout = iinfo->dir_layout; in fill_inode()
869 ci->i_rbytes = le64_to_cpu(info->rbytes); in fill_inode()
870 ci->i_rfiles = le64_to_cpu(info->rfiles); in fill_inode()
871 ci->i_rsubdirs = le64_to_cpu(info->rsubdirs); in fill_inode()
872 ceph_decode_timespec64(&ci->i_rctime, &info->rctime); in fill_inode()
878 if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) && in fill_inode()
879 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) { in fill_inode()
880 if (ci->i_xattrs.blob) in fill_inode()
881 ceph_buffer_put(ci->i_xattrs.blob); in fill_inode()
882 ci->i_xattrs.blob = xattr_blob; in fill_inode()
884 memcpy(ci->i_xattrs.blob->vec.iov_base, in fill_inode()
886 ci->i_xattrs.version = le64_to_cpu(info->xattr_version); in fill_inode()
892 if (le64_to_cpu(info->version) > ci->i_version) in fill_inode()
893 ci->i_version = le64_to_cpu(info->version); in fill_inode()
911 if (!ci->i_symlink) { in fill_inode()
915 spin_unlock(&ci->i_ceph_lock); in fill_inode()
930 spin_lock(&ci->i_ceph_lock); in fill_inode()
931 if (!ci->i_symlink) in fill_inode()
932 ci->i_symlink = sym; in fill_inode()
936 inode->i_link = ci->i_symlink; in fill_inode()
961 ci->i_files == 0 && ci->i_subdirs == 0 && in fill_inode()
964 !__ceph_dir_is_complete(ci)) { in fill_inode()
967 __ceph_dir_set_complete(ci, in fill_inode()
968 atomic64_read(&ci->i_release_count), in fill_inode()
969 atomic64_read(&ci->i_ordered_count)); in fill_inode()
976 ci->i_snap_caps |= info_caps; in fill_inode()
978 __ceph_get_fmode(ci, cap_fmode); in fill_inode()
983 __ceph_get_fmode(ci, cap_fmode); in fill_inode()
987 iinfo->inline_version >= ci->i_inline_version) { in fill_inode()
989 ci->i_inline_version = iinfo->inline_version; in fill_inode()
990 if (ci->i_inline_version != CEPH_INLINE_NONE && in fill_inode()
995 spin_unlock(&ci->i_ceph_lock); in fill_inode()
1002 wake_up_all(&ci->i_cap_wq); in fill_inode()
1479 struct ceph_inode_info *ci = ceph_inode(dir); in fill_readdir_cache() local
1502 if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) && in fill_readdir_cache()
1503 req->r_dir_ordered_cnt == atomic64_read(&ci->i_ordered_count)) { in fill_readdir_cache()
1518 struct ceph_inode_info *ci = ceph_inode(d_inode(parent)); in ceph_readdir_prepopulate() local
1535 last_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash, in ceph_readdir_prepopulate()
1570 atomic64_read(&ci->i_release_count); in ceph_readdir_prepopulate()
1572 atomic64_read(&ci->i_ordered_count); in ceph_readdir_prepopulate()
1593 u32 hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash, in ceph_readdir_prepopulate()
1628 atomic_read(&ci->i_shared_gen)) { in ceph_readdir_prepopulate()
1629 __ceph_dir_clear_ordered(ci); in ceph_readdir_prepopulate()
1714 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_inode_set_size() local
1717 spin_lock(&ci->i_ceph_lock); in ceph_inode_set_size()
1722 ret = __ceph_should_report_size(ci); in ceph_inode_set_size()
1724 spin_unlock(&ci->i_ceph_lock); in ceph_inode_set_size()
1746 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, in ceph_writeback_work() local
1748 struct inode *inode = &ci->vfs_inode; in ceph_writeback_work()
1776 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, in ceph_invalidate_work() local
1778 struct inode *inode = &ci->vfs_inode; in ceph_invalidate_work()
1783 mutex_lock(&ci->i_truncate_mutex); in ceph_invalidate_work()
1790 mutex_unlock(&ci->i_truncate_mutex); in ceph_invalidate_work()
1794 spin_lock(&ci->i_ceph_lock); in ceph_invalidate_work()
1796 ci->i_rdcache_gen, ci->i_rdcache_revoking); in ceph_invalidate_work()
1797 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) { in ceph_invalidate_work()
1798 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE)) in ceph_invalidate_work()
1800 spin_unlock(&ci->i_ceph_lock); in ceph_invalidate_work()
1801 mutex_unlock(&ci->i_truncate_mutex); in ceph_invalidate_work()
1804 orig_gen = ci->i_rdcache_gen; in ceph_invalidate_work()
1805 spin_unlock(&ci->i_ceph_lock); in ceph_invalidate_work()
1811 spin_lock(&ci->i_ceph_lock); in ceph_invalidate_work()
1812 if (orig_gen == ci->i_rdcache_gen && in ceph_invalidate_work()
1813 orig_gen == ci->i_rdcache_revoking) { in ceph_invalidate_work()
1815 ci->i_rdcache_gen); in ceph_invalidate_work()
1816 ci->i_rdcache_revoking--; in ceph_invalidate_work()
1820 inode, orig_gen, ci->i_rdcache_gen, in ceph_invalidate_work()
1821 ci->i_rdcache_revoking); in ceph_invalidate_work()
1822 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE)) in ceph_invalidate_work()
1825 spin_unlock(&ci->i_ceph_lock); in ceph_invalidate_work()
1826 mutex_unlock(&ci->i_truncate_mutex); in ceph_invalidate_work()
1829 ceph_check_caps(ci, 0, NULL); in ceph_invalidate_work()
1841 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, in ceph_vmtruncate_work() local
1843 struct inode *inode = &ci->vfs_inode; in ceph_vmtruncate_work()
1856 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_queue_vmtruncate() local
1861 &ci->i_vmtruncate_work)) { in ceph_queue_vmtruncate()
1865 inode, ci->i_truncate_pending); in ceph_queue_vmtruncate()
1876 struct ceph_inode_info *ci = ceph_inode(inode); in __ceph_do_pending_vmtruncate() local
1880 mutex_lock(&ci->i_truncate_mutex); in __ceph_do_pending_vmtruncate()
1882 spin_lock(&ci->i_ceph_lock); in __ceph_do_pending_vmtruncate()
1883 if (ci->i_truncate_pending == 0) { in __ceph_do_pending_vmtruncate()
1885 spin_unlock(&ci->i_ceph_lock); in __ceph_do_pending_vmtruncate()
1886 mutex_unlock(&ci->i_truncate_mutex); in __ceph_do_pending_vmtruncate()
1894 if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) { in __ceph_do_pending_vmtruncate()
1895 spin_unlock(&ci->i_ceph_lock); in __ceph_do_pending_vmtruncate()
1904 WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref); in __ceph_do_pending_vmtruncate()
1906 to = ci->i_truncate_size; in __ceph_do_pending_vmtruncate()
1907 wrbuffer_refs = ci->i_wrbuffer_ref; in __ceph_do_pending_vmtruncate()
1909 ci->i_truncate_pending, to); in __ceph_do_pending_vmtruncate()
1910 spin_unlock(&ci->i_ceph_lock); in __ceph_do_pending_vmtruncate()
1914 spin_lock(&ci->i_ceph_lock); in __ceph_do_pending_vmtruncate()
1915 if (to == ci->i_truncate_size) { in __ceph_do_pending_vmtruncate()
1916 ci->i_truncate_pending = 0; in __ceph_do_pending_vmtruncate()
1919 spin_unlock(&ci->i_ceph_lock); in __ceph_do_pending_vmtruncate()
1923 mutex_unlock(&ci->i_truncate_mutex); in __ceph_do_pending_vmtruncate()
1926 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); in __ceph_do_pending_vmtruncate()
1928 wake_up_all(&ci->i_cap_wq); in __ceph_do_pending_vmtruncate()
1943 struct ceph_inode_info *ci = ceph_inode(inode); in __ceph_setattr() local
1966 spin_lock(&ci->i_ceph_lock); in __ceph_setattr()
1967 issued = __ceph_caps_issued(ci, NULL); in __ceph_setattr()
1969 if (!ci->i_head_snapc && in __ceph_setattr()
1973 spin_unlock(&ci->i_ceph_lock); in __ceph_setattr()
1975 spin_lock(&ci->i_ceph_lock); in __ceph_setattr()
1976 issued = __ceph_caps_issued(ci, NULL); in __ceph_setattr()
2032 ci->i_time_warp_seq++; in __ceph_setattr()
2054 ci->i_time_warp_seq++; in __ceph_setattr()
2078 ci->i_reported_size = attr->ia_size; in __ceph_setattr()
2119 inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied, in __ceph_setattr()
2125 spin_unlock(&ci->i_ceph_lock); in __ceph_setattr()
2262 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_getattr() local
2276 stat->size = ci->i_rbytes; in ceph_getattr()
2278 stat->size = ci->i_files + ci->i_subdirs; in ceph_getattr()
2288 stat->nlink = 1 + 1 + ci->i_subdirs; in ceph_getattr()