Lines Matching refs:sbinfo

196 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);  in shmem_inode_acct_block()  local
201 if (sbinfo->max_blocks) { in shmem_inode_acct_block()
202 if (percpu_counter_compare(&sbinfo->used_blocks, in shmem_inode_acct_block()
203 sbinfo->max_blocks - pages) > 0) in shmem_inode_acct_block()
205 percpu_counter_add(&sbinfo->used_blocks, pages); in shmem_inode_acct_block()
218 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_inode_unacct_blocks() local
220 if (sbinfo->max_blocks) in shmem_inode_unacct_blocks()
221 percpu_counter_sub(&sbinfo->used_blocks, pages); in shmem_inode_unacct_blocks()
244 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); in shmem_reserve_inode() local
245 if (sbinfo->max_inodes) { in shmem_reserve_inode()
246 spin_lock(&sbinfo->stat_lock); in shmem_reserve_inode()
247 if (!sbinfo->free_inodes) { in shmem_reserve_inode()
248 spin_unlock(&sbinfo->stat_lock); in shmem_reserve_inode()
251 sbinfo->free_inodes--; in shmem_reserve_inode()
252 spin_unlock(&sbinfo->stat_lock); in shmem_reserve_inode()
259 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); in shmem_free_inode() local
260 if (sbinfo->max_inodes) { in shmem_free_inode()
261 spin_lock(&sbinfo->stat_lock); in shmem_free_inode()
262 sbinfo->free_inodes++; in shmem_free_inode()
263 spin_unlock(&sbinfo->stat_lock); in shmem_free_inode()
441 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, in shmem_unused_huge_shrink() argument
452 if (list_empty(&sbinfo->shrinklist)) in shmem_unused_huge_shrink()
455 spin_lock(&sbinfo->shrinklist_lock); in shmem_unused_huge_shrink()
456 list_for_each_safe(pos, next, &sbinfo->shrinklist) { in shmem_unused_huge_shrink()
482 spin_unlock(&sbinfo->shrinklist_lock); in shmem_unused_huge_shrink()
539 spin_lock(&sbinfo->shrinklist_lock); in shmem_unused_huge_shrink()
540 list_splice_tail(&list, &sbinfo->shrinklist); in shmem_unused_huge_shrink()
541 sbinfo->shrinklist_len -= removed; in shmem_unused_huge_shrink()
542 spin_unlock(&sbinfo->shrinklist_lock); in shmem_unused_huge_shrink()
550 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); in shmem_unused_huge_scan() local
552 if (!READ_ONCE(sbinfo->shrinklist_len)) in shmem_unused_huge_scan()
555 return shmem_unused_huge_shrink(sbinfo, sc, 0); in shmem_unused_huge_scan()
561 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); in shmem_unused_huge_count() local
562 return READ_ONCE(sbinfo->shrinklist_len); in shmem_unused_huge_count()
568 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, in shmem_unused_huge_shrink() argument
575 static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo) in is_huge_enabled() argument
578 (shmem_huge == SHMEM_HUGE_FORCE || sbinfo->huge) && in is_huge_enabled()
1020 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_setattr() local
1062 spin_lock(&sbinfo->shrinklist_lock); in shmem_setattr()
1069 &sbinfo->shrinklist); in shmem_setattr()
1070 sbinfo->shrinklist_len++; in shmem_setattr()
1072 spin_unlock(&sbinfo->shrinklist_lock); in shmem_setattr()
1086 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_evict_inode() local
1093 spin_lock(&sbinfo->shrinklist_lock); in shmem_evict_inode()
1096 sbinfo->shrinklist_len--; in shmem_evict_inode()
1098 spin_unlock(&sbinfo->shrinklist_lock); in shmem_evict_inode()
1396 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) in shmem_get_sbmpol() argument
1399 if (sbinfo->mpol) { in shmem_get_sbmpol()
1400 spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ in shmem_get_sbmpol()
1401 mpol = sbinfo->mpol; in shmem_get_sbmpol()
1403 spin_unlock(&sbinfo->stat_lock); in shmem_get_sbmpol()
1411 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) in shmem_get_sbmpol() argument
1628 struct shmem_sb_info *sbinfo; in shmem_getpage_gfp() local
1677 sbinfo = SHMEM_SB(inode->i_sb); in shmem_getpage_gfp()
1769 switch (sbinfo->huge) { in shmem_getpage_gfp()
1806 ret = shmem_unused_huge_shrink(sbinfo, NULL, 1); in shmem_getpage_gfp()
1857 spin_lock(&sbinfo->shrinklist_lock); in shmem_getpage_gfp()
1864 &sbinfo->shrinklist); in shmem_getpage_gfp()
1865 sbinfo->shrinklist_len++; in shmem_getpage_gfp()
1867 spin_unlock(&sbinfo->shrinklist_lock); in shmem_getpage_gfp()
2181 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); in shmem_get_inode() local
2213 shmem_get_sbmpol(sbinfo)); in shmem_get_inode()
2642 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_fallocate() local
2700 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { in shmem_fallocate()
2772 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); in shmem_statfs() local
2777 if (sbinfo->max_blocks) { in shmem_statfs()
2778 buf->f_blocks = sbinfo->max_blocks; in shmem_statfs()
2780 buf->f_bfree = sbinfo->max_blocks - in shmem_statfs()
2781 percpu_counter_sum(&sbinfo->used_blocks); in shmem_statfs()
2783 if (sbinfo->max_inodes) { in shmem_statfs()
2784 buf->f_files = sbinfo->max_inodes; in shmem_statfs()
2785 buf->f_ffree = sbinfo->free_inodes; in shmem_statfs()
3284 static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, in shmem_parse_options() argument
3330 sbinfo->max_blocks = in shmem_parse_options()
3333 sbinfo->max_blocks = memparse(value, &rest); in shmem_parse_options()
3337 sbinfo->max_inodes = memparse(value, &rest); in shmem_parse_options()
3343 sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777; in shmem_parse_options()
3352 sbinfo->uid = make_kuid(current_user_ns(), uid); in shmem_parse_options()
3353 if (!uid_valid(sbinfo->uid)) in shmem_parse_options()
3361 sbinfo->gid = make_kgid(current_user_ns(), gid); in shmem_parse_options()
3362 if (!gid_valid(sbinfo->gid)) in shmem_parse_options()
3373 sbinfo->huge = huge; in shmem_parse_options()
3387 sbinfo->mpol = mpol; in shmem_parse_options()
3401 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); in shmem_remount_fs() local
3402 struct shmem_sb_info config = *sbinfo; in shmem_remount_fs()
3410 spin_lock(&sbinfo->stat_lock); in shmem_remount_fs()
3411 inodes = sbinfo->max_inodes - sbinfo->free_inodes; in shmem_remount_fs()
3412 if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0) in shmem_remount_fs()
3421 if (config.max_blocks && !sbinfo->max_blocks) in shmem_remount_fs()
3423 if (config.max_inodes && !sbinfo->max_inodes) in shmem_remount_fs()
3427 sbinfo->huge = config.huge; in shmem_remount_fs()
3428 sbinfo->max_blocks = config.max_blocks; in shmem_remount_fs()
3429 sbinfo->max_inodes = config.max_inodes; in shmem_remount_fs()
3430 sbinfo->free_inodes = config.max_inodes - inodes; in shmem_remount_fs()
3436 mpol_put(sbinfo->mpol); in shmem_remount_fs()
3437 sbinfo->mpol = config.mpol; /* transfers initial ref */ in shmem_remount_fs()
3440 spin_unlock(&sbinfo->stat_lock); in shmem_remount_fs()
3446 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb); in shmem_show_options() local
3448 if (sbinfo->max_blocks != shmem_default_max_blocks()) in shmem_show_options()
3450 sbinfo->max_blocks << (PAGE_SHIFT - 10)); in shmem_show_options()
3451 if (sbinfo->max_inodes != shmem_default_max_inodes()) in shmem_show_options()
3452 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); in shmem_show_options()
3453 if (sbinfo->mode != (0777 | S_ISVTX)) in shmem_show_options()
3454 seq_printf(seq, ",mode=%03ho", sbinfo->mode); in shmem_show_options()
3455 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) in shmem_show_options()
3457 from_kuid_munged(&init_user_ns, sbinfo->uid)); in shmem_show_options()
3458 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) in shmem_show_options()
3460 from_kgid_munged(&init_user_ns, sbinfo->gid)); in shmem_show_options()
3463 if (sbinfo->huge) in shmem_show_options()
3464 seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge)); in shmem_show_options()
3466 shmem_show_mpol(seq, sbinfo->mpol); in shmem_show_options()
3474 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); in shmem_put_super() local
3476 percpu_counter_destroy(&sbinfo->used_blocks); in shmem_put_super()
3477 mpol_put(sbinfo->mpol); in shmem_put_super()
3478 kfree(sbinfo); in shmem_put_super()
3485 struct shmem_sb_info *sbinfo; in shmem_fill_super() local
3489 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), in shmem_fill_super()
3491 if (!sbinfo) in shmem_fill_super()
3494 sbinfo->mode = 0777 | S_ISVTX; in shmem_fill_super()
3495 sbinfo->uid = current_fsuid(); in shmem_fill_super()
3496 sbinfo->gid = current_fsgid(); in shmem_fill_super()
3497 sb->s_fs_info = sbinfo; in shmem_fill_super()
3506 sbinfo->max_blocks = shmem_default_max_blocks(); in shmem_fill_super()
3507 sbinfo->max_inodes = shmem_default_max_inodes(); in shmem_fill_super()
3508 if (shmem_parse_options(data, sbinfo, false)) { in shmem_fill_super()
3521 spin_lock_init(&sbinfo->stat_lock); in shmem_fill_super()
3522 if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL)) in shmem_fill_super()
3524 sbinfo->free_inodes = sbinfo->max_inodes; in shmem_fill_super()
3525 spin_lock_init(&sbinfo->shrinklist_lock); in shmem_fill_super()
3526 INIT_LIST_HEAD(&sbinfo->shrinklist); in shmem_fill_super()
3542 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); in shmem_fill_super()
3545 inode->i_uid = sbinfo->uid; in shmem_fill_super()
3546 inode->i_gid = sbinfo->gid; in shmem_fill_super()
3806 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_huge_enabled() local
3814 switch (sbinfo->huge) { in shmem_huge_enabled()