Lines Matching refs:sbinfo
219 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_inode_acct_block() local
224 if (sbinfo->max_blocks) { in shmem_inode_acct_block()
225 if (percpu_counter_compare(&sbinfo->used_blocks, in shmem_inode_acct_block()
226 sbinfo->max_blocks - pages) > 0) in shmem_inode_acct_block()
228 percpu_counter_add(&sbinfo->used_blocks, pages); in shmem_inode_acct_block()
241 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_inode_unacct_blocks() local
243 if (sbinfo->max_blocks) in shmem_inode_unacct_blocks()
244 percpu_counter_sub(&sbinfo->used_blocks, pages); in shmem_inode_unacct_blocks()
277 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); in shmem_reserve_inode() local
281 spin_lock(&sbinfo->stat_lock); in shmem_reserve_inode()
282 if (sbinfo->max_inodes) { in shmem_reserve_inode()
283 if (!sbinfo->free_inodes) { in shmem_reserve_inode()
284 spin_unlock(&sbinfo->stat_lock); in shmem_reserve_inode()
287 sbinfo->free_inodes--; in shmem_reserve_inode()
290 ino = sbinfo->next_ino++; in shmem_reserve_inode()
292 ino = sbinfo->next_ino++; in shmem_reserve_inode()
293 if (unlikely(!sbinfo->full_inums && in shmem_reserve_inode()
302 sbinfo->next_ino = 1; in shmem_reserve_inode()
303 ino = sbinfo->next_ino++; in shmem_reserve_inode()
307 spin_unlock(&sbinfo->stat_lock); in shmem_reserve_inode()
322 next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu()); in shmem_reserve_inode()
325 spin_lock(&sbinfo->stat_lock); in shmem_reserve_inode()
326 ino = sbinfo->next_ino; in shmem_reserve_inode()
327 sbinfo->next_ino += SHMEM_INO_BATCH; in shmem_reserve_inode()
328 spin_unlock(&sbinfo->stat_lock); in shmem_reserve_inode()
342 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); in shmem_free_inode() local
343 if (sbinfo->max_inodes) { in shmem_free_inode()
344 spin_lock(&sbinfo->stat_lock); in shmem_free_inode()
345 sbinfo->free_inodes++; in shmem_free_inode()
346 spin_unlock(&sbinfo->stat_lock); in shmem_free_inode()
521 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, in shmem_unused_huge_shrink() argument
532 if (list_empty(&sbinfo->shrinklist)) in shmem_unused_huge_shrink()
535 spin_lock(&sbinfo->shrinklist_lock); in shmem_unused_huge_shrink()
536 list_for_each_safe(pos, next, &sbinfo->shrinklist) { in shmem_unused_huge_shrink()
562 spin_unlock(&sbinfo->shrinklist_lock); in shmem_unused_huge_shrink()
619 spin_lock(&sbinfo->shrinklist_lock); in shmem_unused_huge_shrink()
620 list_splice_tail(&list, &sbinfo->shrinklist); in shmem_unused_huge_shrink()
621 sbinfo->shrinklist_len -= removed; in shmem_unused_huge_shrink()
622 spin_unlock(&sbinfo->shrinklist_lock); in shmem_unused_huge_shrink()
630 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); in shmem_unused_huge_scan() local
632 if (!READ_ONCE(sbinfo->shrinklist_len)) in shmem_unused_huge_scan()
635 return shmem_unused_huge_shrink(sbinfo, sc, 0); in shmem_unused_huge_scan()
641 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); in shmem_unused_huge_count() local
642 return READ_ONCE(sbinfo->shrinklist_len); in shmem_unused_huge_count()
648 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, in shmem_unused_huge_shrink() argument
655 static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo) in is_huge_enabled() argument
658 (shmem_huge == SHMEM_HUGE_FORCE || sbinfo->huge) && in is_huge_enabled()
1087 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_setattr() local
1129 spin_lock(&sbinfo->shrinklist_lock); in shmem_setattr()
1136 &sbinfo->shrinklist); in shmem_setattr()
1137 sbinfo->shrinklist_len++; in shmem_setattr()
1139 spin_unlock(&sbinfo->shrinklist_lock); in shmem_setattr()
1153 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_evict_inode() local
1160 spin_lock(&sbinfo->shrinklist_lock); in shmem_evict_inode()
1163 sbinfo->shrinklist_len--; in shmem_evict_inode()
1165 spin_unlock(&sbinfo->shrinklist_lock); in shmem_evict_inode()
1478 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) in shmem_get_sbmpol() argument
1481 if (sbinfo->mpol) { in shmem_get_sbmpol()
1482 spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ in shmem_get_sbmpol()
1483 mpol = sbinfo->mpol; in shmem_get_sbmpol()
1485 spin_unlock(&sbinfo->stat_lock); in shmem_get_sbmpol()
1493 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) in shmem_get_sbmpol() argument
1800 struct shmem_sb_info *sbinfo; in shmem_getpage_gfp() local
1819 sbinfo = SHMEM_SB(inode->i_sb); in shmem_getpage_gfp()
1867 switch (sbinfo->huge) { in shmem_getpage_gfp()
1910 ret = shmem_unused_huge_shrink(sbinfo, NULL, 1); in shmem_getpage_gfp()
1948 spin_lock(&sbinfo->shrinklist_lock); in shmem_getpage_gfp()
1955 &sbinfo->shrinklist); in shmem_getpage_gfp()
1956 sbinfo->shrinklist_len++; in shmem_getpage_gfp()
1958 spin_unlock(&sbinfo->shrinklist_lock); in shmem_getpage_gfp()
2297 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); in shmem_get_inode() local
2331 shmem_get_sbmpol(sbinfo)); in shmem_get_inode()
2772 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_fallocate() local
2830 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { in shmem_fallocate()
2902 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); in shmem_statfs() local
2907 if (sbinfo->max_blocks) { in shmem_statfs()
2908 buf->f_blocks = sbinfo->max_blocks; in shmem_statfs()
2910 buf->f_bfree = sbinfo->max_blocks - in shmem_statfs()
2911 percpu_counter_sum(&sbinfo->used_blocks); in shmem_statfs()
2913 if (sbinfo->max_inodes) { in shmem_statfs()
2914 buf->f_files = sbinfo->max_inodes; in shmem_statfs()
2915 buf->f_ffree = sbinfo->free_inodes; in shmem_statfs()
3593 struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb); in shmem_reconfigure() local
3597 spin_lock(&sbinfo->stat_lock); in shmem_reconfigure()
3598 inodes = sbinfo->max_inodes - sbinfo->free_inodes; in shmem_reconfigure()
3600 if (!sbinfo->max_blocks) { in shmem_reconfigure()
3604 if (percpu_counter_compare(&sbinfo->used_blocks, in shmem_reconfigure()
3611 if (!sbinfo->max_inodes) { in shmem_reconfigure()
3622 sbinfo->next_ino > UINT_MAX) { in shmem_reconfigure()
3628 sbinfo->huge = ctx->huge; in shmem_reconfigure()
3630 sbinfo->full_inums = ctx->full_inums; in shmem_reconfigure()
3632 sbinfo->max_blocks = ctx->blocks; in shmem_reconfigure()
3634 sbinfo->max_inodes = ctx->inodes; in shmem_reconfigure()
3635 sbinfo->free_inodes = ctx->inodes - inodes; in shmem_reconfigure()
3642 mpol_put(sbinfo->mpol); in shmem_reconfigure()
3643 sbinfo->mpol = ctx->mpol; /* transfers initial ref */ in shmem_reconfigure()
3646 spin_unlock(&sbinfo->stat_lock); in shmem_reconfigure()
3649 spin_unlock(&sbinfo->stat_lock); in shmem_reconfigure()
3655 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb); in shmem_show_options() local
3657 if (sbinfo->max_blocks != shmem_default_max_blocks()) in shmem_show_options()
3659 sbinfo->max_blocks << (PAGE_SHIFT - 10)); in shmem_show_options()
3660 if (sbinfo->max_inodes != shmem_default_max_inodes()) in shmem_show_options()
3661 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); in shmem_show_options()
3662 if (sbinfo->mode != (0777 | S_ISVTX)) in shmem_show_options()
3663 seq_printf(seq, ",mode=%03ho", sbinfo->mode); in shmem_show_options()
3664 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) in shmem_show_options()
3666 from_kuid_munged(&init_user_ns, sbinfo->uid)); in shmem_show_options()
3667 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) in shmem_show_options()
3669 from_kgid_munged(&init_user_ns, sbinfo->gid)); in shmem_show_options()
3691 if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums) in shmem_show_options()
3692 seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32)); in shmem_show_options()
3695 if (sbinfo->huge) in shmem_show_options()
3696 seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge)); in shmem_show_options()
3698 shmem_show_mpol(seq, sbinfo->mpol); in shmem_show_options()
3706 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); in shmem_put_super() local
3708 free_percpu(sbinfo->ino_batch); in shmem_put_super()
3709 percpu_counter_destroy(&sbinfo->used_blocks); in shmem_put_super()
3710 mpol_put(sbinfo->mpol); in shmem_put_super()
3711 kfree(sbinfo); in shmem_put_super()
3719 struct shmem_sb_info *sbinfo; in shmem_fill_super() local
3723 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), in shmem_fill_super()
3725 if (!sbinfo) in shmem_fill_super()
3728 sb->s_fs_info = sbinfo; in shmem_fill_super()
3751 sbinfo->max_blocks = ctx->blocks; in shmem_fill_super()
3752 sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes; in shmem_fill_super()
3754 sbinfo->ino_batch = alloc_percpu(ino_t); in shmem_fill_super()
3755 if (!sbinfo->ino_batch) in shmem_fill_super()
3758 sbinfo->uid = ctx->uid; in shmem_fill_super()
3759 sbinfo->gid = ctx->gid; in shmem_fill_super()
3760 sbinfo->full_inums = ctx->full_inums; in shmem_fill_super()
3761 sbinfo->mode = ctx->mode; in shmem_fill_super()
3762 sbinfo->huge = ctx->huge; in shmem_fill_super()
3763 sbinfo->mpol = ctx->mpol; in shmem_fill_super()
3766 spin_lock_init(&sbinfo->stat_lock); in shmem_fill_super()
3767 if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL)) in shmem_fill_super()
3769 spin_lock_init(&sbinfo->shrinklist_lock); in shmem_fill_super()
3770 INIT_LIST_HEAD(&sbinfo->shrinklist); in shmem_fill_super()
3786 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); in shmem_fill_super()
3789 inode->i_uid = sbinfo->uid; in shmem_fill_super()
3790 inode->i_gid = sbinfo->gid; in shmem_fill_super()
4083 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_huge_enabled() local
4094 switch (sbinfo->huge) { in shmem_huge_enabled()