Lines Matching +full:not +full:- +full:swapped
6 * 2000-2001 Christoph Rohland
7 * 2000-2001 SAP AG
9 * Copyright (C) 2002-2011 Hugh Dickins.
11 * Copyright (C) 2002-2005 VERITAS Software Corporation.
18 * tiny-shmem:
61 #include <linux/backing-dev.h>
101 * inode->i_private (with i_rwsem making sure that it has only one user at
102 * a time): we would prefer not to enlarge the shmem inode just for that.
143 return min3(nr_pages - totalhigh_pages(), nr_pages / 2, in shmem_default_max_inodes()
155 return sb->s_fs_info; in SHMEM_SB()
159 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
162 * consistent with the pre-accounting of private mappings ...
167 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size)); in shmem_acct_size()
181 return security_vm_enough_memory_mm(current->mm, in shmem_reacct_size()
182 VM_ACCT(newsize) - VM_ACCT(oldsize)); in shmem_reacct_size()
184 vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize)); in shmem_reacct_size()
192 * shmem_get_folio reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
193 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
200 return security_vm_enough_memory_mm(current->mm, in shmem_acct_block()
213 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_inode_acct_block()
214 int err = -ENOSPC; in shmem_inode_acct_block()
216 if (shmem_acct_block(info->flags, pages)) in shmem_inode_acct_block()
220 if (sbinfo->max_blocks) { in shmem_inode_acct_block()
221 if (percpu_counter_compare(&sbinfo->used_blocks, in shmem_inode_acct_block()
222 sbinfo->max_blocks - pages) > 0) in shmem_inode_acct_block()
229 percpu_counter_add(&sbinfo->used_blocks, pages); in shmem_inode_acct_block()
239 shmem_unacct_blocks(info->flags, pages); in shmem_inode_acct_block()
246 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_inode_unacct_blocks()
251 if (sbinfo->max_blocks) in shmem_inode_unacct_blocks()
252 percpu_counter_sub(&sbinfo->used_blocks, pages); in shmem_inode_unacct_blocks()
253 shmem_unacct_blocks(info->flags, pages); in shmem_inode_unacct_blocks()
268 return vma->vm_ops == &shmem_anon_vm_ops; in vma_is_anon_shmem()
273 return vma_is_anon_shmem(vma) || vma->vm_ops == &shmem_vm_ops; in vma_is_shmem()
286 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY; in shmem_enable_quotas()
301 for (type--; type >= 0; type--) in shmem_enable_quotas()
316 return SHMEM_I(inode)->i_dquot; in shmem_get_dquots()
335 if (!(sb->s_flags & SB_KERNMOUNT)) { in shmem_reserve_inode()
336 raw_spin_lock(&sbinfo->stat_lock); in shmem_reserve_inode()
337 if (sbinfo->max_inodes) { in shmem_reserve_inode()
338 if (sbinfo->free_ispace < BOGO_INODE_SIZE) { in shmem_reserve_inode()
339 raw_spin_unlock(&sbinfo->stat_lock); in shmem_reserve_inode()
340 return -ENOSPC; in shmem_reserve_inode()
342 sbinfo->free_ispace -= BOGO_INODE_SIZE; in shmem_reserve_inode()
345 ino = sbinfo->next_ino++; in shmem_reserve_inode()
347 ino = sbinfo->next_ino++; in shmem_reserve_inode()
348 if (unlikely(!sbinfo->full_inums && in shmem_reserve_inode()
356 __func__, MINOR(sb->s_dev)); in shmem_reserve_inode()
357 sbinfo->next_ino = 1; in shmem_reserve_inode()
358 ino = sbinfo->next_ino++; in shmem_reserve_inode()
362 raw_spin_unlock(&sbinfo->stat_lock); in shmem_reserve_inode()
365 * __shmem_file_setup, one of our callers, is lock-free: it in shmem_reserve_inode()
368 * unknown contexts. As such, use a per-cpu batched allocator in shmem_reserve_inode()
369 * which doesn't require the per-sb stat_lock unless we are at in shmem_reserve_inode()
373 * shmem mounts are not exposed to userspace, so we don't need in shmem_reserve_inode()
378 next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu()); in shmem_reserve_inode()
381 raw_spin_lock(&sbinfo->stat_lock); in shmem_reserve_inode()
382 ino = sbinfo->next_ino; in shmem_reserve_inode()
383 sbinfo->next_ino += SHMEM_INO_BATCH; in shmem_reserve_inode()
384 raw_spin_unlock(&sbinfo->stat_lock); in shmem_reserve_inode()
399 if (sbinfo->max_inodes) { in shmem_free_inode()
400 raw_spin_lock(&sbinfo->stat_lock); in shmem_free_inode()
401 sbinfo->free_ispace += BOGO_INODE_SIZE + freed_ispace; in shmem_free_inode()
402 raw_spin_unlock(&sbinfo->stat_lock); in shmem_free_inode()
407 * shmem_recalc_inode - recalculate the block usage of an inode
410 * @swapped: the change in number of pages swapped from inode
415 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
416 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
418 static void shmem_recalc_inode(struct inode *inode, long alloced, long swapped) in shmem_recalc_inode() argument
423 spin_lock(&info->lock); in shmem_recalc_inode()
424 info->alloced += alloced; in shmem_recalc_inode()
425 info->swapped += swapped; in shmem_recalc_inode()
426 freed = info->alloced - info->swapped - in shmem_recalc_inode()
427 READ_ONCE(inode->i_mapping->nrpages); in shmem_recalc_inode()
430 * after i_mapping->nrpages has already been adjusted (up or down), in shmem_recalc_inode()
431 * shmem_writepage() has to raise swapped before nrpages is lowered - in shmem_recalc_inode()
435 if (swapped > 0) in shmem_recalc_inode()
436 freed += swapped; in shmem_recalc_inode()
438 info->alloced -= freed; in shmem_recalc_inode()
439 spin_unlock(&info->lock); in shmem_recalc_inode()
448 struct address_space *mapping = inode->i_mapping; in shmem_charge()
454 xa_lock_irq(&mapping->i_pages); in shmem_charge()
455 mapping->nrpages += pages; in shmem_charge()
456 xa_unlock_irq(&mapping->i_pages); in shmem_charge()
476 XA_STATE(xas, &mapping->i_pages, index); in shmem_replace_entry()
483 return -ENOENT; in shmem_replace_entry()
490 * that an entry was not already brought back from swap by a racing thread.
492 * Checking page is not enough: by the time a SwapCache page is locked, it
498 return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap); in shmem_confirm_swap()
530 #define SHMEM_HUGE_DENY (-1)
531 #define SHMEM_HUGE_FORCE (-2)
534 /* ifdef here to avoid bloating shmem.o when not necessary */
543 if (!S_ISREG(inode->i_mode)) in shmem_is_huge()
545 if (mm && ((vm_flags & VM_NOHUGEPAGE) || test_bit(MMF_DISABLE_THP, &mm->flags))) in shmem_is_huge()
552 switch (SHMEM_SB(inode->i_sb)->huge) { in shmem_is_huge()
585 return -EINVAL; in shmem_parse_huge()
620 unsigned long batch = sc ? sc->nr_to_scan : 128; in shmem_unused_huge_shrink()
623 if (list_empty(&sbinfo->shrinklist)) in shmem_unused_huge_shrink()
626 spin_lock(&sbinfo->shrinklist_lock); in shmem_unused_huge_shrink()
627 list_for_each_safe(pos, next, &sbinfo->shrinklist) { in shmem_unused_huge_shrink()
631 inode = igrab(&info->vfs_inode); in shmem_unused_huge_shrink()
635 list_del_init(&info->shrinklist); in shmem_unused_huge_shrink()
640 if (round_up(inode->i_size, PAGE_SIZE) == in shmem_unused_huge_shrink()
641 round_up(inode->i_size, HPAGE_PMD_SIZE)) { in shmem_unused_huge_shrink()
642 list_move(&info->shrinklist, &to_remove); in shmem_unused_huge_shrink()
646 list_move(&info->shrinklist, &list); in shmem_unused_huge_shrink()
648 sbinfo->shrinklist_len--; in shmem_unused_huge_shrink()
649 if (!--batch) in shmem_unused_huge_shrink()
652 spin_unlock(&sbinfo->shrinklist_lock); in shmem_unused_huge_shrink()
656 inode = &info->vfs_inode; in shmem_unused_huge_shrink()
657 list_del_init(&info->shrinklist); in shmem_unused_huge_shrink()
666 inode = &info->vfs_inode; in shmem_unused_huge_shrink()
671 index = (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT; in shmem_unused_huge_shrink()
672 folio = filemap_get_folio(inode->i_mapping, index); in shmem_unused_huge_shrink()
704 list_del_init(&info->shrinklist); in shmem_unused_huge_shrink()
713 spin_lock(&sbinfo->shrinklist_lock); in shmem_unused_huge_shrink()
714 list_move(&info->shrinklist, &sbinfo->shrinklist); in shmem_unused_huge_shrink()
715 sbinfo->shrinklist_len++; in shmem_unused_huge_shrink()
716 spin_unlock(&sbinfo->shrinklist_lock); in shmem_unused_huge_shrink()
729 if (!READ_ONCE(sbinfo->shrinklist_len)) in shmem_unused_huge_scan()
739 return READ_ONCE(sbinfo->shrinklist_len); in shmem_unused_huge_count()
766 XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio)); in shmem_add_to_page_cache()
776 folio->mapping = mapping; in shmem_add_to_page_cache()
777 folio->index = index; in shmem_add_to_page_cache()
794 xas_set_err(&xas, -EEXIST); in shmem_add_to_page_cache()
798 xas_set_err(&xas, -EEXIST); in shmem_add_to_page_cache()
808 mapping->nrpages += nr; in shmem_add_to_page_cache()
822 folio->mapping = NULL; in shmem_add_to_page_cache()
832 struct address_space *mapping = folio->mapping; in shmem_delete_from_page_cache()
836 xa_lock_irq(&mapping->i_pages); in shmem_delete_from_page_cache()
837 error = shmem_replace_entry(mapping, folio->index, folio, radswap); in shmem_delete_from_page_cache()
838 folio->mapping = NULL; in shmem_delete_from_page_cache()
839 mapping->nrpages -= nr; in shmem_delete_from_page_cache()
840 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr); in shmem_delete_from_page_cache()
841 __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr); in shmem_delete_from_page_cache()
842 xa_unlock_irq(&mapping->i_pages); in shmem_delete_from_page_cache()
855 old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0); in shmem_free_swap()
857 return -ENOENT; in shmem_free_swap()
864 * given offsets are swapped out.
867 * as long as the inode doesn't go away and racy results are not a problem.
872 XA_STATE(xas, &mapping->i_pages, start); in shmem_partial_swap_usage()
874 unsigned long swapped = 0; in shmem_partial_swap_usage() local
875 unsigned long max = end - 1; in shmem_partial_swap_usage()
882 swapped++; in shmem_partial_swap_usage()
893 return swapped << PAGE_SHIFT; in shmem_partial_swap_usage()
898 * given vma is swapped out.
901 * as long as the inode doesn't go away and racy results are not a problem.
905 struct inode *inode = file_inode(vma->vm_file); in shmem_swap_usage()
907 struct address_space *mapping = inode->i_mapping; in shmem_swap_usage()
908 unsigned long swapped; in shmem_swap_usage() local
910 /* Be careful as we don't hold info->lock */ in shmem_swap_usage()
911 swapped = READ_ONCE(info->swapped); in shmem_swap_usage()
918 if (!swapped) in shmem_swap_usage()
921 if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size) in shmem_swap_usage()
922 return swapped << PAGE_SHIFT; in shmem_swap_usage()
925 return shmem_partial_swap_usage(mapping, vma->vm_pgoff, in shmem_swap_usage()
926 vma->vm_pgoff + vma_pages(vma)); in shmem_swap_usage()
957 folio = filemap_get_entry(inode->i_mapping, index); in shmem_get_partial_folio()
962 if (folio->mapping == inode->i_mapping) in shmem_get_partial_folio()
964 /* The folio has been swapped out */ in shmem_get_partial_folio()
984 struct address_space *mapping = inode->i_mapping; in shmem_undo_range()
986 pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; in shmem_undo_range()
996 if (lend == -1) in shmem_undo_range()
997 end = -1; /* unsigned, so actually very big */ in shmem_undo_range()
999 if (info->fallocend > start && info->fallocend <= end && !unfalloc) in shmem_undo_range()
1000 info->fallocend = start; in shmem_undo_range()
1004 while (index < end && find_lock_entries(mapping, &index, end - 1, in shmem_undo_range()
1043 end = folio->index; in shmem_undo_range()
1055 end = folio->index; in shmem_undo_range()
1066 if (!find_get_entries(mapping, &index, end - 1, &fbatch, in shmem_undo_range()
1068 /* If all gone or hole-punch or unfalloc, we're done */ in shmem_undo_range()
1069 if (index == start || end != -1) in shmem_undo_range()
1109 shmem_recalc_inode(inode, 0, -nr_swaps_freed); in shmem_undo_range()
1115 inode->i_mtime = inode_set_ctime_current(inode); in shmem_truncate_range()
1124 struct inode *inode = path->dentry->d_inode; in shmem_getattr()
1127 if (info->alloced - info->swapped != inode->i_mapping->nrpages) in shmem_getattr()
1130 if (info->fsflags & FS_APPEND_FL) in shmem_getattr()
1131 stat->attributes |= STATX_ATTR_APPEND; in shmem_getattr()
1132 if (info->fsflags & FS_IMMUTABLE_FL) in shmem_getattr()
1133 stat->attributes |= STATX_ATTR_IMMUTABLE; in shmem_getattr()
1134 if (info->fsflags & FS_NODUMP_FL) in shmem_getattr()
1135 stat->attributes |= STATX_ATTR_NODUMP; in shmem_getattr()
1136 stat->attributes_mask |= (STATX_ATTR_APPEND | in shmem_getattr()
1142 stat->blksize = HPAGE_PMD_SIZE; in shmem_getattr()
1145 stat->result_mask |= STATX_BTIME; in shmem_getattr()
1146 stat->btime.tv_sec = info->i_crtime.tv_sec; in shmem_getattr()
1147 stat->btime.tv_nsec = info->i_crtime.tv_nsec; in shmem_getattr()
1166 if ((info->seals & F_SEAL_EXEC) && (attr->ia_valid & ATTR_MODE)) { in shmem_setattr()
1167 if ((inode->i_mode ^ attr->ia_mode) & 0111) { in shmem_setattr()
1168 return -EPERM; in shmem_setattr()
1172 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { in shmem_setattr()
1173 loff_t oldsize = inode->i_size; in shmem_setattr()
1174 loff_t newsize = attr->ia_size; in shmem_setattr()
1177 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || in shmem_setattr()
1178 (newsize > oldsize && (info->seals & F_SEAL_GROW))) in shmem_setattr()
1179 return -EPERM; in shmem_setattr()
1182 error = shmem_reacct_size(SHMEM_I(inode)->flags, in shmem_setattr()
1194 unmap_mapping_range(inode->i_mapping, in shmem_setattr()
1196 if (info->alloced) in shmem_setattr()
1198 newsize, (loff_t)-1); in shmem_setattr()
1201 unmap_mapping_range(inode->i_mapping, in shmem_setattr()
1222 if (attr->ia_valid & ATTR_MODE) in shmem_setattr()
1223 error = posix_acl_chmod(idmap, dentry, inode->i_mode); in shmem_setattr()
1227 inode->i_mtime = inode_get_ctime(inode); in shmem_setattr()
1236 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_evict_inode()
1239 if (shmem_mapping(inode->i_mapping)) { in shmem_evict_inode()
1240 shmem_unacct_size(info->flags, inode->i_size); in shmem_evict_inode()
1241 inode->i_size = 0; in shmem_evict_inode()
1242 mapping_set_exiting(inode->i_mapping); in shmem_evict_inode()
1243 shmem_truncate_range(inode, 0, (loff_t)-1); in shmem_evict_inode()
1244 if (!list_empty(&info->shrinklist)) { in shmem_evict_inode()
1245 spin_lock(&sbinfo->shrinklist_lock); in shmem_evict_inode()
1246 if (!list_empty(&info->shrinklist)) { in shmem_evict_inode()
1247 list_del_init(&info->shrinklist); in shmem_evict_inode()
1248 sbinfo->shrinklist_len--; in shmem_evict_inode()
1250 spin_unlock(&sbinfo->shrinklist_lock); in shmem_evict_inode()
1252 while (!list_empty(&info->swaplist)) { in shmem_evict_inode()
1254 wait_var_event(&info->stop_eviction, in shmem_evict_inode()
1255 !atomic_read(&info->stop_eviction)); in shmem_evict_inode()
1258 if (!atomic_read(&info->stop_eviction)) in shmem_evict_inode()
1259 list_del_init(&info->swaplist); in shmem_evict_inode()
1264 simple_xattrs_free(&info->xattrs, sbinfo->max_inodes ? &freed : NULL); in shmem_evict_inode()
1265 shmem_free_inode(inode->i_sb, freed); in shmem_evict_inode()
1266 WARN_ON(inode->i_blocks); in shmem_evict_inode()
1278 XA_STATE(xas, &mapping->i_pages, start); in shmem_find_swap_entries()
1313 * Move the swapped pages for an inode to page cache. Returns the count
1314 * of pages swapped in, or the error in case of failure.
1322 struct address_space *mapping = inode->i_mapping; in shmem_unuse_swap_entries()
1325 struct folio *folio = fbatch->folios[i]; in shmem_unuse_swap_entries()
1338 if (error == -ENOMEM) in shmem_unuse_swap_entries()
1350 struct address_space *mapping = inode->i_mapping; in shmem_unuse_inode()
1368 start = indices[folio_batch_count(&fbatch) - 1]; in shmem_unuse_inode()
1389 if (!info->swapped) { in shmem_unuse()
1390 list_del_init(&info->swaplist); in shmem_unuse()
1395 * but before doing so, make sure shmem_evict_inode() will not in shmem_unuse()
1397 * (igrab() would protect from unlink, but not from unmount). in shmem_unuse()
1399 atomic_inc(&info->stop_eviction); in shmem_unuse()
1402 error = shmem_unuse_inode(&info->vfs_inode, type); in shmem_unuse()
1407 if (!info->swapped) in shmem_unuse()
1408 list_del_init(&info->swaplist); in shmem_unuse()
1409 if (atomic_dec_and_test(&info->stop_eviction)) in shmem_unuse()
1410 wake_up_var(&info->stop_eviction); in shmem_unuse()
1425 struct address_space *mapping = folio->mapping; in shmem_writepage()
1426 struct inode *inode = mapping->host; in shmem_writepage()
1428 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_writepage()
1434 * shmem_writepage; but a stacking filesystem might use ->writepage of in shmem_writepage()
1436 * swap only in response to memory pressure, and not for the writeback in shmem_writepage()
1439 if (WARN_ON_ONCE(!wbc->for_reclaim)) in shmem_writepage()
1442 if (WARN_ON_ONCE((info->flags & VM_LOCKED) || sbinfo->noswap)) in shmem_writepage()
1462 index = folio->index; in shmem_writepage()
1470 * not yet completed the fallocation, then (a) we want to keep track in shmem_writepage()
1471 * of this folio in case we have to undo it, and (b) it may not be a in shmem_writepage()
1476 if (inode->i_private) { in shmem_writepage()
1478 spin_lock(&inode->i_lock); in shmem_writepage()
1479 shmem_falloc = inode->i_private; in shmem_writepage()
1481 !shmem_falloc->waitq && in shmem_writepage()
1482 index >= shmem_falloc->start && in shmem_writepage()
1483 index < shmem_falloc->next) in shmem_writepage()
1484 shmem_falloc->nr_unswapped++; in shmem_writepage()
1487 spin_unlock(&inode->i_lock); in shmem_writepage()
1501 * Add inode to shmem_unuse()'s list of swapped-out inodes, in shmem_writepage()
1502 * if it's not already there. Do it now before the folio is in shmem_writepage()
1505 * we've incremented swapped, because shmem_unuse_inode() will in shmem_writepage()
1506 * prune a !swapped inode from the swaplist under this mutex. in shmem_writepage()
1509 if (list_empty(&info->swaplist)) in shmem_writepage()
1510 list_add(&info->swaplist, &shmem_swaplist); in shmem_writepage()
1521 swap_writepage(&folio->page, wbc); in shmem_writepage()
1529 if (wbc->for_reclaim) in shmem_writepage()
1540 if (!mpol || mpol->mode == MPOL_DEFAULT) in shmem_show_mpol()
1551 if (sbinfo->mpol) { in shmem_get_sbmpol()
1552 raw_spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ in shmem_get_sbmpol()
1553 mpol = sbinfo->mpol; in shmem_get_sbmpol()
1555 raw_spin_unlock(&sbinfo->stat_lock); in shmem_get_sbmpol()
1578 vma->vm_pgoff = index + info->vfs_inode.i_ino; in shmem_pseudo_vma_init()
1579 vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index); in shmem_pseudo_vma_init()
1585 mpol_cond_put(vma->vm_policy); in shmem_pseudo_vma_destroy()
1634 struct address_space *mapping = info->vfs_inode.i_mapping; in shmem_alloc_hugefolio()
1639 if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1, in shmem_alloc_hugefolio()
1690 err = -ENOMEM; in shmem_alloc_and_acct_folio()
1723 entry = old->swap; in shmem_replace_folio()
1735 return -ENOMEM; in shmem_replace_folio()
1744 new->swap = entry; in shmem_replace_folio()
1751 xa_lock_irq(&swap_mapping->i_pages); in shmem_replace_folio()
1757 __lruvec_stat_mod_folio(old, NR_FILE_PAGES, -1); in shmem_replace_folio()
1758 __lruvec_stat_mod_folio(old, NR_SHMEM, -1); in shmem_replace_folio()
1760 xa_unlock_irq(&swap_mapping->i_pages); in shmem_replace_folio()
1764 * Is this possible? I think not, now that our callers check in shmem_replace_folio()
1775 old->private = NULL; in shmem_replace_folio()
1785 struct address_space *mapping = inode->i_mapping; in shmem_set_folio_swapin_error()
1790 old = xa_cmpxchg_irq(&mapping->i_pages, index, in shmem_set_folio_swapin_error()
1799 * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks in shmem_set_folio_swapin_error()
1803 shmem_recalc_inode(inode, -1, -1); in shmem_set_folio_swapin_error()
1809 * Caller has to make sure that *foliop contains a valid swapped folio.
1818 struct address_space *mapping = inode->i_mapping; in shmem_swapin_folio()
1820 struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL; in shmem_swapin_folio()
1831 return -EIO; in shmem_swapin_folio()
1836 return -EEXIST; in shmem_swapin_folio()
1838 return -EINVAL; in shmem_swapin_folio()
1853 error = -ENOMEM; in shmem_swapin_folio()
1861 folio->swap.val != swap.val || in shmem_swapin_folio()
1863 error = -EEXIST; in shmem_swapin_folio()
1867 error = -EIO; in shmem_swapin_folio()
1890 shmem_recalc_inode(inode, 0, -1); in shmem_swapin_folio()
1904 error = -EEXIST; in shmem_swapin_folio()
1905 if (error == -EIO) in shmem_swapin_folio()
1918 * shmem_get_folio_gfp - find page in cache, or get from swap, or allocate
1920 * If we allocate a new one we do not mark it dirty. That's up to the
1932 struct address_space *mapping = inode->i_mapping; in shmem_get_folio_gfp()
1944 return -EFBIG; in shmem_get_folio_gfp()
1948 return -EINVAL; in shmem_get_folio_gfp()
1951 sbinfo = SHMEM_SB(inode->i_sb); in shmem_get_folio_gfp()
1952 charge_mm = vma ? vma->vm_mm : NULL; in shmem_get_folio_gfp()
1965 if (error == -EEXIST) in shmem_get_folio_gfp()
1975 /* Has the folio been truncated or swapped out? */ in shmem_get_folio_gfp()
1976 if (unlikely(folio->mapping != mapping)) { in shmem_get_folio_gfp()
2000 return -ENOENT; in shmem_get_folio_gfp()
2003 * Fast cache lookup and swap lookup did not find it: allocate. in shmem_get_folio_gfp()
2012 vma ? vma->vm_mm : NULL, vma ? vma->vm_flags : 0)) in shmem_get_folio_gfp()
2027 if (error != -ENOSPC) in shmem_get_folio_gfp()
2033 while (retry--) { in shmem_get_folio_gfp()
2062 folio_next_index(folio) - 1) { in shmem_get_folio_gfp()
2067 spin_lock(&sbinfo->shrinklist_lock); in shmem_get_folio_gfp()
2070 * ->shrink_list in shmem_unused_huge_shrink() in shmem_get_folio_gfp()
2072 if (list_empty_careful(&info->shrinklist)) { in shmem_get_folio_gfp()
2073 list_add_tail(&info->shrinklist, in shmem_get_folio_gfp()
2074 &sbinfo->shrinklist); in shmem_get_folio_gfp()
2075 sbinfo->shrinklist_len++; in shmem_get_folio_gfp()
2077 spin_unlock(&sbinfo->shrinklist_lock); in shmem_get_folio_gfp()
2087 * Let SGP_WRITE caller clear ends if write does not fill folio; in shmem_get_folio_gfp()
2108 error = -EINVAL; in shmem_get_folio_gfp()
2131 if (error == -ENOSPC && !once++) { in shmem_get_folio_gfp()
2135 if (error == -EEXIST) in shmem_get_folio_gfp()
2144 mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL); in shmem_get_folio()
2149 * entry unconditionally - even if something else had already woken the
2155 list_del_init(&wait->entry); in synchronous_wake_function()
2161 struct vm_area_struct *vma = vmf->vma; in shmem_fault()
2162 struct inode *inode = file_inode(vma->vm_file); in shmem_fault()
2163 gfp_t gfp = mapping_gfp_mask(inode->i_mapping); in shmem_fault()
2170 * prevent the hole-punch from ever completing: which in turn in shmem_fault()
2177 * It does not matter if we sometimes reach this check just before the in shmem_fault()
2178 * hole-punch begins, so that one fault then races with the punch: in shmem_fault()
2185 if (unlikely(inode->i_private)) { in shmem_fault()
2188 spin_lock(&inode->i_lock); in shmem_fault()
2189 shmem_falloc = inode->i_private; in shmem_fault()
2191 shmem_falloc->waitq && in shmem_fault()
2192 vmf->pgoff >= shmem_falloc->start && in shmem_fault()
2193 vmf->pgoff < shmem_falloc->next) { in shmem_fault()
2203 shmem_falloc_waitq = shmem_falloc->waitq; in shmem_fault()
2206 spin_unlock(&inode->i_lock); in shmem_fault()
2211 * stack of the hole-punching task: shmem_falloc_waitq in shmem_fault()
2213 * finish_wait() does not dereference it in that case; in shmem_fault()
2216 spin_lock(&inode->i_lock); in shmem_fault()
2218 spin_unlock(&inode->i_lock); in shmem_fault()
2224 spin_unlock(&inode->i_lock); in shmem_fault()
2227 err = shmem_get_folio_gfp(inode, vmf->pgoff, &folio, SGP_CACHE, in shmem_fault()
2232 vmf->page = folio_file_page(folio, vmf->pgoff); in shmem_fault()
2249 return -ENOMEM; in shmem_get_unmapped_area()
2251 get_area = current->mm->get_unmapped_area; in shmem_get_unmapped_area()
2260 if (addr > TASK_SIZE - len) in shmem_get_unmapped_area()
2282 VM_BUG_ON(file->f_op != &shmem_file_operations); in shmem_get_unmapped_area()
2283 sb = file_inode(file)->i_sb; in shmem_get_unmapped_area()
2291 sb = shm_mnt->mnt_sb; in shmem_get_unmapped_area()
2293 if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER) in shmem_get_unmapped_area()
2297 offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1); in shmem_get_unmapped_area()
2300 if ((addr & (HPAGE_PMD_SIZE-1)) == offset) in shmem_get_unmapped_area()
2303 inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE; in shmem_get_unmapped_area()
2315 inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1); in shmem_get_unmapped_area()
2316 inflated_addr += offset - inflated_offset; in shmem_get_unmapped_area()
2320 if (inflated_addr > TASK_SIZE - len) in shmem_get_unmapped_area()
2328 struct inode *inode = file_inode(vma->vm_file); in shmem_set_policy()
2329 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol); in shmem_set_policy()
2335 struct inode *inode = file_inode(vma->vm_file); in shmem_get_policy()
2338 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; in shmem_get_policy()
2339 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index); in shmem_get_policy()
2347 int retval = -ENOMEM; in shmem_lock()
2350 * What serializes the accesses to info->flags? in shmem_lock()
2354 if (lock && !(info->flags & VM_LOCKED)) { in shmem_lock()
2355 if (!user_shm_lock(inode->i_size, ucounts)) in shmem_lock()
2357 info->flags |= VM_LOCKED; in shmem_lock()
2358 mapping_set_unevictable(file->f_mapping); in shmem_lock()
2360 if (!lock && (info->flags & VM_LOCKED) && ucounts) { in shmem_lock()
2361 user_shm_unlock(inode->i_size, ucounts); in shmem_lock()
2362 info->flags &= ~VM_LOCKED; in shmem_lock()
2363 mapping_clear_unevictable(file->f_mapping); in shmem_lock()
2377 ret = seal_check_future_write(info->seals, vma); in shmem_mmap()
2381 /* arm64 - allow memory tagging on RAM-based files */ in shmem_mmap()
2386 if (inode->i_nlink) in shmem_mmap()
2387 vma->vm_ops = &shmem_vm_ops; in shmem_mmap()
2389 vma->vm_ops = &shmem_anon_vm_ops; in shmem_mmap()
2395 file->f_mode |= FMODE_CAN_ODIRECT; in shmem_file_open()
2417 * But FS_NODUMP_FL does not require any action in i_flags. in shmem_set_inode_flags()
2430 return &SHMEM_I(inode)->dir_offsets; in shmem_get_offset_ctx()
2452 return ERR_PTR(-ENOSPC); in __shmem_get_inode()
2455 inode->i_ino = ino; in __shmem_get_inode()
2457 inode->i_blocks = 0; in __shmem_get_inode()
2458 inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode); in __shmem_get_inode()
2459 inode->i_generation = get_random_u32(); in __shmem_get_inode()
2461 memset(info, 0, (char *)inode - (char *)info); in __shmem_get_inode()
2462 spin_lock_init(&info->lock); in __shmem_get_inode()
2463 atomic_set(&info->stop_eviction, 0); in __shmem_get_inode()
2464 info->seals = F_SEAL_SEAL; in __shmem_get_inode()
2465 info->flags = flags & VM_NORESERVE; in __shmem_get_inode()
2466 info->i_crtime = inode->i_mtime; in __shmem_get_inode()
2467 info->fsflags = (dir == NULL) ? 0 : in __shmem_get_inode()
2468 SHMEM_I(dir)->fsflags & SHMEM_FL_INHERITED; in __shmem_get_inode()
2469 if (info->fsflags) in __shmem_get_inode()
2470 shmem_set_inode_flags(inode, info->fsflags); in __shmem_get_inode()
2471 INIT_LIST_HEAD(&info->shrinklist); in __shmem_get_inode()
2472 INIT_LIST_HEAD(&info->swaplist); in __shmem_get_inode()
2473 INIT_LIST_HEAD(&info->swaplist); in __shmem_get_inode()
2474 if (sbinfo->noswap) in __shmem_get_inode()
2475 mapping_set_unevictable(inode->i_mapping); in __shmem_get_inode()
2476 simple_xattrs_init(&info->xattrs); in __shmem_get_inode()
2478 mapping_set_large_folios(inode->i_mapping); in __shmem_get_inode()
2482 inode->i_op = &shmem_special_inode_operations; in __shmem_get_inode()
2486 inode->i_mapping->a_ops = &shmem_aops; in __shmem_get_inode()
2487 inode->i_op = &shmem_inode_operations; in __shmem_get_inode()
2488 inode->i_fop = &shmem_file_operations; in __shmem_get_inode()
2489 mpol_shared_policy_init(&info->policy, in __shmem_get_inode()
2495 inode->i_size = 2 * BOGO_DIRENT_SIZE; in __shmem_get_inode()
2496 inode->i_op = &shmem_dir_inode_operations; in __shmem_get_inode()
2497 inode->i_fop = &simple_offset_dir_operations; in __shmem_get_inode()
2502 * Must not load anything in the rbtree, in __shmem_get_inode()
2503 * mpol_free_shared_policy will not be called. in __shmem_get_inode()
2505 mpol_shared_policy_init(&info->policy, NULL); in __shmem_get_inode()
2537 inode->i_flags |= S_NOQUOTA; in shmem_get_inode()
2558 struct inode *inode = file_inode(dst_vma->vm_file); in shmem_mfill_atomic_pte()
2560 struct address_space *mapping = inode->i_mapping; in shmem_mfill_atomic_pte()
2570 * We may have got a page, returned -ENOENT triggering a retry, in shmem_mfill_atomic_pte()
2571 * and now we find ourselves with -ENOMEM. Release the page, to in shmem_mfill_atomic_pte()
2578 return -ENOMEM; in shmem_mfill_atomic_pte()
2582 ret = -ENOMEM; in shmem_mfill_atomic_pte()
2614 ret = -ENOENT; in shmem_mfill_atomic_pte()
2621 clear_user_highpage(&folio->page, dst_addr); in shmem_mfill_atomic_pte()
2635 ret = -EFAULT; in shmem_mfill_atomic_pte()
2641 gfp & GFP_RECLAIM_MASK, dst_vma->vm_mm); in shmem_mfill_atomic_pte()
2646 &folio->page, true, flags); in shmem_mfill_atomic_pte()
2673 struct inode *inode = mapping->host; in shmem_write_begin()
2680 if (unlikely(info->seals & (F_SEAL_GROW | in shmem_write_begin()
2682 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) in shmem_write_begin()
2683 return -EPERM; in shmem_write_begin()
2684 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size) in shmem_write_begin()
2685 return -EPERM; in shmem_write_begin()
2698 return -EIO; in shmem_write_begin()
2710 struct inode *inode = mapping->host; in shmem_write_end()
2712 if (pos + copied > inode->i_size) in shmem_write_end()
2732 struct file *file = iocb->ki_filp; in shmem_file_read_iter()
2734 struct address_space *mapping = inode->i_mapping; in shmem_file_read_iter()
2739 loff_t *ppos = &iocb->ki_pos; in shmem_file_read_iter()
2762 if (error == -EINVAL) in shmem_file_read_iter()
2772 error = -EIO; in shmem_file_read_iter()
2792 nr -= offset; in shmem_file_read_iter()
2808 * Ok, we have the page, and it's up-to-date, so in shmem_file_read_iter()
2817 * clear_user() not so much, that it is noticeably in shmem_file_read_iter()
2824 * splice() - or others? - can result in confusion: in shmem_file_read_iter()
2838 error = -EFAULT; in shmem_file_read_iter()
2851 struct file *file = iocb->ki_filp; in shmem_file_write_iter()
2852 struct inode *inode = file->f_mapping->host; in shmem_file_write_iter()
2899 size = min_t(size_t, size, PAGE_SIZE - offset); in splice_zeropage_into_pipe()
2901 if (!pipe_full(pipe->head, pipe->tail, pipe->max_usage)) { in splice_zeropage_into_pipe()
2910 pipe->head++; in splice_zeropage_into_pipe()
2921 struct address_space *mapping = inode->i_mapping; in shmem_file_splice_read()
2928 used = pipe_occupancy(pipe->head, pipe->tail); in shmem_file_splice_read()
2929 npages = max_t(ssize_t, pipe->max_usage - used, 0); in shmem_file_splice_read()
2939 if (error == -EINVAL) in shmem_file_splice_read()
2949 error = -EIO; in shmem_file_splice_read()
2958 * the correct value for "nr", which means the zero-filled in shmem_file_splice_read()
2959 * part of the page is not copied back to userspace (unless in shmem_file_splice_read()
2960 * another truncate extends the file - this is desired though). in shmem_file_splice_read()
2965 part = min_t(loff_t, isize - *ppos, len); in shmem_file_splice_read()
2977 * Ok, we have the page, and it's up-to-date, so we can in shmem_file_splice_read()
2989 len -= n; in shmem_file_splice_read()
2992 in->f_ra.prev_pos = *ppos; in shmem_file_splice_read()
2993 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) in shmem_file_splice_read()
3008 struct address_space *mapping = file->f_mapping; in shmem_file_llseek()
3009 struct inode *inode = mapping->host; in shmem_file_llseek()
3015 return -ENXIO; in shmem_file_llseek()
3019 offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence); in shmem_file_llseek()
3030 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_fallocate()
3037 return -EOPNOTSUPP; in shmem_fallocate()
3042 struct address_space *mapping = file->f_mapping; in shmem_fallocate()
3044 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; in shmem_fallocate()
3048 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { in shmem_fallocate()
3049 error = -EPERM; in shmem_fallocate()
3056 spin_lock(&inode->i_lock); in shmem_fallocate()
3057 inode->i_private = &shmem_falloc; in shmem_fallocate()
3058 spin_unlock(&inode->i_lock); in shmem_fallocate()
3062 1 + unmap_end - unmap_start, 0); in shmem_fallocate()
3063 shmem_truncate_range(inode, offset, offset + len - 1); in shmem_fallocate()
3064 /* No need to unmap again: hole-punching leaves COWed pages */ in shmem_fallocate()
3066 spin_lock(&inode->i_lock); in shmem_fallocate()
3067 inode->i_private = NULL; in shmem_fallocate()
3070 spin_unlock(&inode->i_lock); in shmem_fallocate()
3080 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { in shmem_fallocate()
3081 error = -EPERM; in shmem_fallocate()
3086 end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT; in shmem_fallocate()
3088 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { in shmem_fallocate()
3089 error = -ENOSPC; in shmem_fallocate()
3098 spin_lock(&inode->i_lock); in shmem_fallocate()
3099 inode->i_private = &shmem_falloc; in shmem_fallocate()
3100 spin_unlock(&inode->i_lock); in shmem_fallocate()
3103 * info->fallocend is only relevant when huge pages might be in shmem_fallocate()
3107 undo_fallocend = info->fallocend; in shmem_fallocate()
3108 if (info->fallocend < end) in shmem_fallocate()
3109 info->fallocend = end; in shmem_fallocate()
3119 error = -EINTR; in shmem_fallocate()
3121 error = -ENOMEM; in shmem_fallocate()
3126 info->fallocend = undo_fallocend; in shmem_fallocate()
3131 ((loff_t)index << PAGE_SHIFT) - 1, true); in shmem_fallocate()
3139 * making it uptodate and un-undoable if we fail later. in shmem_fallocate()
3142 /* Beware 32-bit wraparound */ in shmem_fallocate()
3144 index--; in shmem_fallocate()
3151 shmem_falloc.nr_falloced += index - shmem_falloc.next; in shmem_fallocate()
3167 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) in shmem_fallocate()
3170 spin_lock(&inode->i_lock); in shmem_fallocate()
3171 inode->i_private = NULL; in shmem_fallocate()
3172 spin_unlock(&inode->i_lock); in shmem_fallocate()
3182 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); in shmem_statfs()
3184 buf->f_type = TMPFS_MAGIC; in shmem_statfs()
3185 buf->f_bsize = PAGE_SIZE; in shmem_statfs()
3186 buf->f_namelen = NAME_MAX; in shmem_statfs()
3187 if (sbinfo->max_blocks) { in shmem_statfs()
3188 buf->f_blocks = sbinfo->max_blocks; in shmem_statfs()
3189 buf->f_bavail = in shmem_statfs()
3190 buf->f_bfree = sbinfo->max_blocks - in shmem_statfs()
3191 percpu_counter_sum(&sbinfo->used_blocks); in shmem_statfs()
3193 if (sbinfo->max_inodes) { in shmem_statfs()
3194 buf->f_files = sbinfo->max_inodes; in shmem_statfs()
3195 buf->f_ffree = sbinfo->free_ispace / BOGO_INODE_SIZE; in shmem_statfs()
3199 buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b); in shmem_statfs()
3214 inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, dev, VM_NORESERVE); in shmem_mknod()
3222 &dentry->d_name, in shmem_mknod()
3224 if (error && error != -EOPNOTSUPP) in shmem_mknod()
3231 dir->i_size += BOGO_DIRENT_SIZE; in shmem_mknod()
3232 dir->i_mtime = inode_set_ctime_current(dir); in shmem_mknod()
3235 dget(dentry); /* Extra count - pin the dentry in core */ in shmem_mknod()
3250 inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, 0, VM_NORESERVE); in shmem_tmpfile()
3260 if (error && error != -EOPNOTSUPP) in shmem_tmpfile()
3307 if (inode->i_nlink) { in shmem_link()
3308 ret = shmem_reserve_inode(inode->i_sb, NULL); in shmem_link()
3315 if (inode->i_nlink) in shmem_link()
3316 shmem_free_inode(inode->i_sb, 0); in shmem_link()
3320 dir->i_size += BOGO_DIRENT_SIZE; in shmem_link()
3321 dir->i_mtime = inode_set_ctime_to_ts(dir, in shmem_link()
3336 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) in shmem_unlink()
3337 shmem_free_inode(inode->i_sb, 0); in shmem_unlink()
3341 dir->i_size -= BOGO_DIRENT_SIZE; in shmem_unlink()
3342 dir->i_mtime = inode_set_ctime_to_ts(dir, in shmem_unlink()
3346 dput(dentry); /* Undo the count from "create" - this does all the work */ in shmem_unlink()
3353 return -ENOTEMPTY; in shmem_rmdir()
3366 whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name); in shmem_whiteout()
3368 return -ENOMEM; in shmem_whiteout()
3381 * not sure which one, but that isn't even important. in shmem_whiteout()
3399 int they_are_dirs = S_ISDIR(inode->i_mode); in shmem_rename2()
3403 return -EINVAL; in shmem_rename2()
3410 return -ENOTEMPTY; in shmem_rename2()
3434 old_dir->i_size -= BOGO_DIRENT_SIZE; in shmem_rename2()
3435 new_dir->i_size += BOGO_DIRENT_SIZE; in shmem_rename2()
3452 return -ENAMETOOLONG; in shmem_symlink()
3454 inode = shmem_get_inode(idmap, dir->i_sb, dir, S_IFLNK | 0777, 0, in shmem_symlink()
3460 error = security_inode_init_security(inode, dir, &dentry->d_name, in shmem_symlink()
3462 if (error && error != -EOPNOTSUPP) in shmem_symlink()
3469 inode->i_size = len-1; in shmem_symlink()
3471 inode->i_link = kmemdup(symname, len, GFP_KERNEL); in shmem_symlink()
3472 if (!inode->i_link) { in shmem_symlink()
3473 error = -ENOMEM; in shmem_symlink()
3476 inode->i_op = &shmem_short_symlink_operations; in shmem_symlink()
3482 inode->i_mapping->a_ops = &shmem_aops; in shmem_symlink()
3483 inode->i_op = &shmem_symlink_inode_operations; in shmem_symlink()
3490 dir->i_size += BOGO_DIRENT_SIZE; in shmem_symlink()
3491 dir->i_mtime = inode_set_ctime_current(dir); in shmem_symlink()
3518 folio = filemap_get_folio(inode->i_mapping, 0); in shmem_get_link()
3520 return ERR_PTR(-ECHILD); in shmem_get_link()
3524 return ERR_PTR(-ECHILD); in shmem_get_link()
3531 return ERR_PTR(-ECHILD); in shmem_get_link()
3535 return ERR_PTR(-ECHILD); in shmem_get_link()
3549 fileattr_fill_flags(fa, info->fsflags & SHMEM_FL_USER_VISIBLE); in shmem_fileattr_get()
3561 return -EOPNOTSUPP; in shmem_fileattr_set()
3562 if (fa->flags & ~SHMEM_FL_USER_MODIFIABLE) in shmem_fileattr_set()
3563 return -EOPNOTSUPP; in shmem_fileattr_set()
3565 info->fsflags = (info->fsflags & ~SHMEM_FL_USER_MODIFIABLE) | in shmem_fileattr_set()
3566 (fa->flags & SHMEM_FL_USER_MODIFIABLE); in shmem_fileattr_set()
3568 shmem_set_inode_flags(inode, info->fsflags); in shmem_fileattr_set()
3589 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_initxattrs()
3595 if (sbinfo->max_inodes) { in shmem_initxattrs()
3596 for (xattr = xattr_array; xattr->name != NULL; xattr++) { in shmem_initxattrs()
3597 ispace += simple_xattr_space(xattr->name, in shmem_initxattrs()
3598 xattr->value_len + XATTR_SECURITY_PREFIX_LEN); in shmem_initxattrs()
3601 raw_spin_lock(&sbinfo->stat_lock); in shmem_initxattrs()
3602 if (sbinfo->free_ispace < ispace) in shmem_initxattrs()
3605 sbinfo->free_ispace -= ispace; in shmem_initxattrs()
3606 raw_spin_unlock(&sbinfo->stat_lock); in shmem_initxattrs()
3608 return -ENOSPC; in shmem_initxattrs()
3612 for (xattr = xattr_array; xattr->name != NULL; xattr++) { in shmem_initxattrs()
3613 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len); in shmem_initxattrs()
3617 len = strlen(xattr->name) + 1; in shmem_initxattrs()
3618 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len, in shmem_initxattrs()
3620 if (!new_xattr->name) { in shmem_initxattrs()
3625 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX, in shmem_initxattrs()
3627 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN, in shmem_initxattrs()
3628 xattr->name, len); in shmem_initxattrs()
3630 simple_xattr_add(&info->xattrs, new_xattr); in shmem_initxattrs()
3633 if (xattr->name != NULL) { in shmem_initxattrs()
3635 raw_spin_lock(&sbinfo->stat_lock); in shmem_initxattrs()
3636 sbinfo->free_ispace += ispace; in shmem_initxattrs()
3637 raw_spin_unlock(&sbinfo->stat_lock); in shmem_initxattrs()
3639 simple_xattrs_free(&info->xattrs, NULL); in shmem_initxattrs()
3640 return -ENOMEM; in shmem_initxattrs()
3653 return simple_xattr_get(&info->xattrs, name, buffer, size); in shmem_xattr_handler_get()
3663 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); in shmem_xattr_handler_set()
3668 if (value && sbinfo->max_inodes) { in shmem_xattr_handler_set()
3670 raw_spin_lock(&sbinfo->stat_lock); in shmem_xattr_handler_set()
3671 if (sbinfo->free_ispace < ispace) in shmem_xattr_handler_set()
3674 sbinfo->free_ispace -= ispace; in shmem_xattr_handler_set()
3675 raw_spin_unlock(&sbinfo->stat_lock); in shmem_xattr_handler_set()
3677 return -ENOSPC; in shmem_xattr_handler_set()
3680 old_xattr = simple_xattr_set(&info->xattrs, name, value, size, flags); in shmem_xattr_handler_set()
3683 if (old_xattr && sbinfo->max_inodes) in shmem_xattr_handler_set()
3684 ispace = simple_xattr_space(old_xattr->name, in shmem_xattr_handler_set()
3685 old_xattr->size); in shmem_xattr_handler_set()
3692 raw_spin_lock(&sbinfo->stat_lock); in shmem_xattr_handler_set()
3693 sbinfo->free_ispace += ispace; in shmem_xattr_handler_set()
3694 raw_spin_unlock(&sbinfo->stat_lock); in shmem_xattr_handler_set()
3727 return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size); in shmem_listxattr()
3751 return ERR_PTR(-ESTALE); in shmem_get_parent()
3759 return ino->i_ino == inum && fh[0] == ino->i_generation; in shmem_match()
3781 inum = fid->raw[2]; in shmem_fh_to_dentry()
3782 inum = (inum << 32) | fid->raw[1]; in shmem_fh_to_dentry()
3784 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), in shmem_fh_to_dentry()
3785 shmem_match, fid->raw); in shmem_fh_to_dentry()
3803 /* Unfortunately insert_inode_hash is not idempotent, in shmem_encode_fh()
3812 inode->i_ino + inode->i_generation); in shmem_encode_fh()
3816 fh[0] = inode->i_generation; in shmem_encode_fh()
3817 fh[1] = inode->i_ino; in shmem_encode_fh()
3818 fh[2] = ((__u64)inode->i_ino) >> 32; in shmem_encode_fh()
3885 struct shmem_options *ctx = fc->fs_private; in shmem_parse_one()
3899 size = memparse(param->string, &rest); in shmem_parse_one()
3908 ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE); in shmem_parse_one()
3909 ctx->seen |= SHMEM_SEEN_BLOCKS; in shmem_parse_one()
3912 ctx->blocks = memparse(param->string, &rest); in shmem_parse_one()
3913 if (*rest || ctx->blocks > LONG_MAX) in shmem_parse_one()
3915 ctx->seen |= SHMEM_SEEN_BLOCKS; in shmem_parse_one()
3918 ctx->inodes = memparse(param->string, &rest); in shmem_parse_one()
3919 if (*rest || ctx->inodes > ULONG_MAX / BOGO_INODE_SIZE) in shmem_parse_one()
3921 ctx->seen |= SHMEM_SEEN_INODES; in shmem_parse_one()
3924 ctx->mode = result.uint_32 & 07777; in shmem_parse_one()
3935 if (!kuid_has_mapping(fc->user_ns, kuid)) in shmem_parse_one()
3938 ctx->uid = kuid; in shmem_parse_one()
3949 if (!kgid_has_mapping(fc->user_ns, kgid)) in shmem_parse_one()
3952 ctx->gid = kgid; in shmem_parse_one()
3955 ctx->huge = result.uint_32; in shmem_parse_one()
3956 if (ctx->huge != SHMEM_HUGE_NEVER && in shmem_parse_one()
3960 ctx->seen |= SHMEM_SEEN_HUGE; in shmem_parse_one()
3964 mpol_put(ctx->mpol); in shmem_parse_one()
3965 ctx->mpol = NULL; in shmem_parse_one()
3966 if (mpol_parse_str(param->string, &ctx->mpol)) in shmem_parse_one()
3972 ctx->full_inums = false; in shmem_parse_one()
3973 ctx->seen |= SHMEM_SEEN_INUMS; in shmem_parse_one()
3980 ctx->full_inums = true; in shmem_parse_one()
3981 ctx->seen |= SHMEM_SEEN_INUMS; in shmem_parse_one()
3984 if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN)) { in shmem_parse_one()
3988 ctx->noswap = true; in shmem_parse_one()
3989 ctx->seen |= SHMEM_SEEN_NOSWAP; in shmem_parse_one()
3992 if (fc->user_ns != &init_user_ns) in shmem_parse_one()
3994 ctx->seen |= SHMEM_SEEN_QUOTA; in shmem_parse_one()
3995 ctx->quota_types |= (QTYPE_MASK_USR | QTYPE_MASK_GRP); in shmem_parse_one()
3998 if (fc->user_ns != &init_user_ns) in shmem_parse_one()
4000 ctx->seen |= SHMEM_SEEN_QUOTA; in shmem_parse_one()
4001 ctx->quota_types |= QTYPE_MASK_USR; in shmem_parse_one()
4004 if (fc->user_ns != &init_user_ns) in shmem_parse_one()
4006 ctx->seen |= SHMEM_SEEN_QUOTA; in shmem_parse_one()
4007 ctx->quota_types |= QTYPE_MASK_GRP; in shmem_parse_one()
4010 size = memparse(param->string, &rest); in shmem_parse_one()
4016 ctx->qlimits.usrquota_bhardlimit = size; in shmem_parse_one()
4019 size = memparse(param->string, &rest); in shmem_parse_one()
4025 ctx->qlimits.grpquota_bhardlimit = size; in shmem_parse_one()
4028 size = memparse(param->string, &rest); in shmem_parse_one()
4034 ctx->qlimits.usrquota_ihardlimit = size; in shmem_parse_one()
4037 size = memparse(param->string, &rest); in shmem_parse_one()
4043 ctx->qlimits.grpquota_ihardlimit = size; in shmem_parse_one()
4049 return invalfc(fc, "Unsupported parameter '%s'", param->key); in shmem_parse_one()
4051 return invalfc(fc, "Bad value for '%s'", param->key); in shmem_parse_one()
4059 int err = security_sb_eat_lsm_opts(options, &fc->security); in shmem_parse_options()
4068 * NUL-terminate this option: unfortunately, in shmem_parse_options()
4069 * mount options form a comma-separated list, in shmem_parse_options()
4077 options[-1] = '\0'; in shmem_parse_options()
4103 struct shmem_options *ctx = fc->fs_private; in shmem_reconfigure()
4104 struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb); in shmem_reconfigure()
4109 raw_spin_lock(&sbinfo->stat_lock); in shmem_reconfigure()
4110 used_isp = sbinfo->max_inodes * BOGO_INODE_SIZE - sbinfo->free_ispace; in shmem_reconfigure()
4112 if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) { in shmem_reconfigure()
4113 if (!sbinfo->max_blocks) { in shmem_reconfigure()
4117 if (percpu_counter_compare(&sbinfo->used_blocks, in shmem_reconfigure()
4118 ctx->blocks) > 0) { in shmem_reconfigure()
4123 if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) { in shmem_reconfigure()
4124 if (!sbinfo->max_inodes) { in shmem_reconfigure()
4128 if (ctx->inodes * BOGO_INODE_SIZE < used_isp) { in shmem_reconfigure()
4134 if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums && in shmem_reconfigure()
4135 sbinfo->next_ino > UINT_MAX) { in shmem_reconfigure()
4136 err = "Current inum too high to switch to 32-bit inums"; in shmem_reconfigure()
4139 if ((ctx->seen & SHMEM_SEEN_NOSWAP) && ctx->noswap && !sbinfo->noswap) { in shmem_reconfigure()
4143 if (!(ctx->seen & SHMEM_SEEN_NOSWAP) && !ctx->noswap && sbinfo->noswap) { in shmem_reconfigure()
4148 if (ctx->seen & SHMEM_SEEN_QUOTA && in shmem_reconfigure()
4149 !sb_any_quota_loaded(fc->root->d_sb)) { in shmem_reconfigure()
4156 (ctx->qlimits.name## hardlimit && \ in shmem_reconfigure()
4157 (ctx->qlimits.name## hardlimit != sbinfo->qlimits.name## hardlimit)) in shmem_reconfigure()
4166 if (ctx->seen & SHMEM_SEEN_HUGE) in shmem_reconfigure()
4167 sbinfo->huge = ctx->huge; in shmem_reconfigure()
4168 if (ctx->seen & SHMEM_SEEN_INUMS) in shmem_reconfigure()
4169 sbinfo->full_inums = ctx->full_inums; in shmem_reconfigure()
4170 if (ctx->seen & SHMEM_SEEN_BLOCKS) in shmem_reconfigure()
4171 sbinfo->max_blocks = ctx->blocks; in shmem_reconfigure()
4172 if (ctx->seen & SHMEM_SEEN_INODES) { in shmem_reconfigure()
4173 sbinfo->max_inodes = ctx->inodes; in shmem_reconfigure()
4174 sbinfo->free_ispace = ctx->inodes * BOGO_INODE_SIZE - used_isp; in shmem_reconfigure()
4180 if (ctx->mpol) { in shmem_reconfigure()
4181 mpol = sbinfo->mpol; in shmem_reconfigure()
4182 sbinfo->mpol = ctx->mpol; /* transfers initial ref */ in shmem_reconfigure()
4183 ctx->mpol = NULL; in shmem_reconfigure()
4186 if (ctx->noswap) in shmem_reconfigure()
4187 sbinfo->noswap = true; in shmem_reconfigure()
4189 raw_spin_unlock(&sbinfo->stat_lock); in shmem_reconfigure()
4193 raw_spin_unlock(&sbinfo->stat_lock); in shmem_reconfigure()
4199 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb); in shmem_show_options()
4202 if (sbinfo->max_blocks != shmem_default_max_blocks()) in shmem_show_options()
4203 seq_printf(seq, ",size=%luk", K(sbinfo->max_blocks)); in shmem_show_options()
4204 if (sbinfo->max_inodes != shmem_default_max_inodes()) in shmem_show_options()
4205 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); in shmem_show_options()
4206 if (sbinfo->mode != (0777 | S_ISVTX)) in shmem_show_options()
4207 seq_printf(seq, ",mode=%03ho", sbinfo->mode); in shmem_show_options()
4208 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) in shmem_show_options()
4210 from_kuid_munged(&init_user_ns, sbinfo->uid)); in shmem_show_options()
4211 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) in shmem_show_options()
4213 from_kgid_munged(&init_user_ns, sbinfo->gid)); in shmem_show_options()
4218 * /proc/config.gz to confirm 64-bit inums were successfully applied in shmem_show_options()
4219 * (which may not even exist if IKCONFIG_PROC isn't enabled). in shmem_show_options()
4221 * We hide it when inode64 isn't the default and we are using 32-bit in shmem_show_options()
4227 * +-----------------+-----------------+ in shmem_show_options()
4229 * +------------------+-----------------+-----------------+ in shmem_show_options()
4232 * +------------------+-----------------+-----------------+ in shmem_show_options()
4235 if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums) in shmem_show_options()
4236 seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32)); in shmem_show_options()
4239 if (sbinfo->huge) in shmem_show_options()
4240 seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge)); in shmem_show_options()
4245 if (sbinfo->noswap) in shmem_show_options()
4259 free_percpu(sbinfo->ino_batch); in shmem_put_super()
4260 percpu_counter_destroy(&sbinfo->used_blocks); in shmem_put_super()
4261 mpol_put(sbinfo->mpol); in shmem_put_super()
4263 sb->s_fs_info = NULL; in shmem_put_super()
4268 struct shmem_options *ctx = fc->fs_private; in shmem_fill_super()
4271 int error = -ENOMEM; in shmem_fill_super()
4279 sb->s_fs_info = sbinfo; in shmem_fill_super()
4287 if (!(sb->s_flags & SB_KERNMOUNT)) { in shmem_fill_super()
4288 if (!(ctx->seen & SHMEM_SEEN_BLOCKS)) in shmem_fill_super()
4289 ctx->blocks = shmem_default_max_blocks(); in shmem_fill_super()
4290 if (!(ctx->seen & SHMEM_SEEN_INODES)) in shmem_fill_super()
4291 ctx->inodes = shmem_default_max_inodes(); in shmem_fill_super()
4292 if (!(ctx->seen & SHMEM_SEEN_INUMS)) in shmem_fill_super()
4293 ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64); in shmem_fill_super()
4294 sbinfo->noswap = ctx->noswap; in shmem_fill_super()
4296 sb->s_flags |= SB_NOUSER; in shmem_fill_super()
4298 sb->s_export_op = &shmem_export_ops; in shmem_fill_super()
4299 sb->s_flags |= SB_NOSEC | SB_I_VERSION; in shmem_fill_super()
4301 sb->s_flags |= SB_NOUSER; in shmem_fill_super()
4303 sbinfo->max_blocks = ctx->blocks; in shmem_fill_super()
4304 sbinfo->max_inodes = ctx->inodes; in shmem_fill_super()
4305 sbinfo->free_ispace = sbinfo->max_inodes * BOGO_INODE_SIZE; in shmem_fill_super()
4306 if (sb->s_flags & SB_KERNMOUNT) { in shmem_fill_super()
4307 sbinfo->ino_batch = alloc_percpu(ino_t); in shmem_fill_super()
4308 if (!sbinfo->ino_batch) in shmem_fill_super()
4311 sbinfo->uid = ctx->uid; in shmem_fill_super()
4312 sbinfo->gid = ctx->gid; in shmem_fill_super()
4313 sbinfo->full_inums = ctx->full_inums; in shmem_fill_super()
4314 sbinfo->mode = ctx->mode; in shmem_fill_super()
4315 sbinfo->huge = ctx->huge; in shmem_fill_super()
4316 sbinfo->mpol = ctx->mpol; in shmem_fill_super()
4317 ctx->mpol = NULL; in shmem_fill_super()
4319 raw_spin_lock_init(&sbinfo->stat_lock); in shmem_fill_super()
4320 if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL)) in shmem_fill_super()
4322 spin_lock_init(&sbinfo->shrinklist_lock); in shmem_fill_super()
4323 INIT_LIST_HEAD(&sbinfo->shrinklist); in shmem_fill_super()
4325 sb->s_maxbytes = MAX_LFS_FILESIZE; in shmem_fill_super()
4326 sb->s_blocksize = PAGE_SIZE; in shmem_fill_super()
4327 sb->s_blocksize_bits = PAGE_SHIFT; in shmem_fill_super()
4328 sb->s_magic = TMPFS_MAGIC; in shmem_fill_super()
4329 sb->s_op = &shmem_ops; in shmem_fill_super()
4330 sb->s_time_gran = 1; in shmem_fill_super()
4332 sb->s_xattr = shmem_xattr_handlers; in shmem_fill_super()
4335 sb->s_flags |= SB_POSIXACL; in shmem_fill_super()
4337 uuid_gen(&sb->s_uuid); in shmem_fill_super()
4340 if (ctx->seen & SHMEM_SEEN_QUOTA) { in shmem_fill_super()
4341 sb->dq_op = &shmem_quota_operations; in shmem_fill_super()
4342 sb->s_qcop = &dquot_quotactl_sysfile_ops; in shmem_fill_super()
4343 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP; in shmem_fill_super()
4346 memcpy(&sbinfo->qlimits, &ctx->qlimits, in shmem_fill_super()
4349 if (shmem_enable_quotas(sb, ctx->quota_types)) in shmem_fill_super()
4354 inode = shmem_get_inode(&nop_mnt_idmap, sb, NULL, S_IFDIR | sbinfo->mode, 0, in shmem_fill_super()
4360 inode->i_uid = sbinfo->uid; in shmem_fill_super()
4361 inode->i_gid = sbinfo->gid; in shmem_fill_super()
4362 sb->s_root = d_make_root(inode); in shmem_fill_super()
4363 if (!sb->s_root) in shmem_fill_super()
4379 struct shmem_options *ctx = fc->fs_private; in shmem_free_fc()
4382 mpol_put(ctx->mpol); in shmem_free_fc()
4405 return &info->vfs_inode; in shmem_alloc_inode()
4410 if (S_ISLNK(inode->i_mode)) in shmem_free_in_core_inode()
4411 kfree(inode->i_link); in shmem_free_in_core_inode()
4417 if (S_ISREG(inode->i_mode)) in shmem_destroy_inode()
4418 mpol_free_shared_policy(&SHMEM_I(inode)->policy); in shmem_destroy_inode()
4419 if (S_ISDIR(inode->i_mode)) in shmem_destroy_inode()
4426 inode_init_once(&info->vfs_inode); in shmem_init_inode()
4569 return -ENOMEM; in shmem_init_fs_context()
4571 ctx->mode = 0777 | S_ISVTX; in shmem_init_fs_context()
4572 ctx->uid = current_fsuid(); in shmem_init_fs_context()
4573 ctx->gid = current_fsgid(); in shmem_init_fs_context()
4575 fc->fs_private = ctx; in shmem_init_fs_context()
4576 fc->ops = &shmem_fs_context_ops; in shmem_init_fs_context()
4604 pr_err("Could not register quota format\n"); in shmem_init()
4611 pr_err("Could not register tmpfs\n"); in shmem_init()
4618 pr_err("Could not kern_mount tmpfs\n"); in shmem_init()
4624 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; in shmem_init()
4675 return -EINVAL; in shmem_enabled_store()
4678 if (count && tmp[count - 1] == '\n') in shmem_enabled_store()
4679 tmp[count - 1] = '\0'; in shmem_enabled_store()
4682 if (huge == -EINVAL) in shmem_enabled_store()
4683 return -EINVAL; in shmem_enabled_store()
4686 return -EINVAL; in shmem_enabled_store()
4690 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; in shmem_enabled_store()
4700 * tiny-shmem: simple shmemfs and tmpfs using ramfs code
4703 * shmem code (swap-backed and resource-limited) are outweighed by
4743 return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); in shmem_get_unmapped_area()
4749 truncate_inode_pages_range(inode->i_mapping, lstart, lend); in shmem_truncate_range()
4763 return inode ? inode : ERR_PTR(-ENOSPC); in shmem_get_inode()
4780 return ERR_PTR(-EINVAL); in __shmem_file_setup()
4783 return ERR_PTR(-ENOMEM); in __shmem_file_setup()
4786 return ERR_PTR(-EINVAL); in __shmem_file_setup()
4788 inode = shmem_get_inode(&nop_mnt_idmap, mnt->mnt_sb, NULL, in __shmem_file_setup()
4795 inode->i_flags |= i_flags; in __shmem_file_setup()
4796 inode->i_size = size; in __shmem_file_setup()
4808 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
4815 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4823 * shmem_file_setup - get an unlinked file living in tmpfs
4826 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4835 * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
4839 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4849 * shmem_zero_setup - setup a shared anonymous mapping
4855 loff_t size = vma->vm_end - vma->vm_start; in shmem_zero_setup()
4863 file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags); in shmem_zero_setup()
4867 if (vma->vm_file) in shmem_zero_setup()
4868 fput(vma->vm_file); in shmem_zero_setup()
4869 vma->vm_file = file; in shmem_zero_setup()
4870 vma->vm_ops = &shmem_anon_vm_ops; in shmem_zero_setup()
4876 * shmem_read_folio_gfp - read into page cache, using specified page allocation flags.
4883 * But read_cache_page_gfp() uses the ->read_folio() method: which does not
4894 struct inode *inode = mapping->host; in shmem_read_folio_gfp()
4922 return &folio->page; in shmem_read_mapping_page_gfp()
4927 return ERR_PTR(-EIO); in shmem_read_mapping_page_gfp()