Lines Matching +full:entry +full:- +full:name
1 // SPDX-License-Identifier: GPL-2.0
25 * to a special-cased entry for the listxattr cache.
51 * You can certainly add a lot more - but you get what you ask for
111 INIT_HLIST_HEAD(&cache->buckets[i].hlist); in nfs4_xattr_hash_init()
112 spin_lock_init(&cache->buckets[i].lock); in nfs4_xattr_hash_init()
113 cache->buckets[i].cache = cache; in nfs4_xattr_hash_init()
114 cache->buckets[i].draining = false; in nfs4_xattr_hash_init()
125 * Wrapper functions to add a cache entry to the right LRU.
128 nfs4_xattr_entry_lru_add(struct nfs4_xattr_entry *entry) in nfs4_xattr_entry_lru_add() argument
132 lru = (entry->flags & NFS4_XATTR_ENTRY_EXTVAL) ? in nfs4_xattr_entry_lru_add()
135 return list_lru_add(lru, &entry->lru); in nfs4_xattr_entry_lru_add()
139 nfs4_xattr_entry_lru_del(struct nfs4_xattr_entry *entry) in nfs4_xattr_entry_lru_del() argument
143 lru = (entry->flags & NFS4_XATTR_ENTRY_EXTVAL) ? in nfs4_xattr_entry_lru_del()
146 return list_lru_del(lru, &entry->lru); in nfs4_xattr_entry_lru_del()
151 * extended attribute name/value pairs, but may also be a listxattr
152 * cache. Those allocations use the same entry so that they can be
156 * value fits in to one page with the entry structure and the name,
163 * @name: Name of the extended attribute. NULL for listxattr cache
164 * entry.
171 * @len: Length of the value. Can be 0 for zero-length attribues.
175 nfs4_xattr_alloc_entry(const char *name, const void *value, in nfs4_xattr_alloc_entry() argument
178 struct nfs4_xattr_entry *entry; in nfs4_xattr_alloc_entry() local
189 if (name != NULL) { in nfs4_xattr_alloc_entry()
190 slen = strlen(name) + 1; in nfs4_xattr_alloc_entry()
205 entry = (struct nfs4_xattr_entry *)buf; in nfs4_xattr_alloc_entry()
207 if (name != NULL) { in nfs4_xattr_alloc_entry()
209 memcpy(namep, name, slen); in nfs4_xattr_alloc_entry()
233 entry->flags = flags; in nfs4_xattr_alloc_entry()
234 entry->xattr_value = valp; in nfs4_xattr_alloc_entry()
235 kref_init(&entry->ref); in nfs4_xattr_alloc_entry()
236 entry->xattr_name = namep; in nfs4_xattr_alloc_entry()
237 entry->xattr_size = len; in nfs4_xattr_alloc_entry()
238 entry->bucket = NULL; in nfs4_xattr_alloc_entry()
239 INIT_LIST_HEAD(&entry->lru); in nfs4_xattr_alloc_entry()
240 INIT_LIST_HEAD(&entry->dispose); in nfs4_xattr_alloc_entry()
241 INIT_HLIST_NODE(&entry->hnode); in nfs4_xattr_alloc_entry()
243 return entry; in nfs4_xattr_alloc_entry()
247 nfs4_xattr_free_entry(struct nfs4_xattr_entry *entry) in nfs4_xattr_free_entry() argument
249 if (entry->flags & NFS4_XATTR_ENTRY_EXTVAL) in nfs4_xattr_free_entry()
250 kvfree(entry->xattr_value); in nfs4_xattr_free_entry()
251 kfree(entry); in nfs4_xattr_free_entry()
257 struct nfs4_xattr_entry *entry; in nfs4_xattr_free_entry_cb() local
259 entry = container_of(kref, struct nfs4_xattr_entry, ref); in nfs4_xattr_free_entry_cb()
261 if (WARN_ON(!list_empty(&entry->lru))) in nfs4_xattr_free_entry_cb()
264 nfs4_xattr_free_entry(entry); in nfs4_xattr_free_entry_cb()
276 if (WARN_ON(!hlist_empty(&cache->buckets[i].hlist))) in nfs4_xattr_free_cache_cb()
278 cache->buckets[i].draining = false; in nfs4_xattr_free_cache_cb()
281 cache->listxattr = NULL; in nfs4_xattr_free_cache_cb()
297 kref_init(&cache->ref); in nfs4_xattr_alloc_cache()
298 atomic_long_set(&cache->nent, 0); in nfs4_xattr_alloc_cache()
304 * Set the listxattr cache, which is a special-cased cache entry.
305 * The special value ERR_PTR(-ESTALE) is used to indicate that
306 * the cache is being drained - this prevents a new listxattr
316 spin_lock(&cache->listxattr_lock); in nfs4_xattr_set_listcache()
318 old = cache->listxattr; in nfs4_xattr_set_listcache()
320 if (old == ERR_PTR(-ESTALE)) { in nfs4_xattr_set_listcache()
325 cache->listxattr = new; in nfs4_xattr_set_listcache()
326 if (new != NULL && new != ERR_PTR(-ESTALE)) in nfs4_xattr_set_listcache()
331 kref_put(&old->ref, nfs4_xattr_free_entry_cb); in nfs4_xattr_set_listcache()
334 spin_unlock(&cache->listxattr_lock); in nfs4_xattr_set_listcache()
351 oldcache = nfsi->xattr_cache; in nfs4_xattr_cache_unlink()
353 list_lru_del(&nfs4_xattr_cache_lru, &oldcache->lru); in nfs4_xattr_cache_unlink()
354 oldcache->inode = NULL; in nfs4_xattr_cache_unlink()
356 nfsi->xattr_cache = NULL; in nfs4_xattr_cache_unlink()
357 nfsi->cache_validity &= ~NFS_INO_INVALID_XATTR; in nfs4_xattr_cache_unlink()
388 struct nfs4_xattr_entry *entry; in nfs4_xattr_discard_cache() local
392 nfs4_xattr_set_listcache(cache, ERR_PTR(-ESTALE)); in nfs4_xattr_discard_cache()
395 bucket = &cache->buckets[i]; in nfs4_xattr_discard_cache()
397 spin_lock(&bucket->lock); in nfs4_xattr_discard_cache()
398 bucket->draining = true; in nfs4_xattr_discard_cache()
399 hlist_for_each_entry_safe(entry, n, &bucket->hlist, hnode) { in nfs4_xattr_discard_cache()
400 nfs4_xattr_entry_lru_del(entry); in nfs4_xattr_discard_cache()
401 hlist_del_init(&entry->hnode); in nfs4_xattr_discard_cache()
402 kref_put(&entry->ref, nfs4_xattr_free_entry_cb); in nfs4_xattr_discard_cache()
404 spin_unlock(&bucket->lock); in nfs4_xattr_discard_cache()
407 atomic_long_set(&cache->nent, 0); in nfs4_xattr_discard_cache()
409 kref_put(&cache->ref, nfs4_xattr_free_cache_cb); in nfs4_xattr_discard_cache()
422 * non-delegated case) is expected to be dealt with in the revalidate
436 spin_lock(&inode->i_lock); in nfs4_xattr_get_cache()
438 if (nfsi->cache_validity & NFS_INO_INVALID_XATTR) in nfs4_xattr_get_cache()
441 cache = nfsi->xattr_cache; in nfs4_xattr_get_cache()
444 kref_get(&cache->ref); in nfs4_xattr_get_cache()
446 spin_unlock(&inode->i_lock); in nfs4_xattr_get_cache()
455 spin_lock(&inode->i_lock); in nfs4_xattr_get_cache()
456 if (nfsi->cache_validity & NFS_INO_INVALID_XATTR) { in nfs4_xattr_get_cache()
462 spin_unlock(&inode->i_lock); in nfs4_xattr_get_cache()
463 kref_put(&cache->ref, nfs4_xattr_free_cache_cb); in nfs4_xattr_get_cache()
471 if (nfsi->xattr_cache != NULL) { in nfs4_xattr_get_cache()
472 newcache = nfsi->xattr_cache; in nfs4_xattr_get_cache()
473 kref_get(&newcache->ref); in nfs4_xattr_get_cache()
475 kref_get(&cache->ref); in nfs4_xattr_get_cache()
476 nfsi->xattr_cache = cache; in nfs4_xattr_get_cache()
477 cache->inode = inode; in nfs4_xattr_get_cache()
478 list_lru_add(&nfs4_xattr_cache_lru, &cache->lru); in nfs4_xattr_get_cache()
481 spin_unlock(&inode->i_lock); in nfs4_xattr_get_cache()
489 kref_put(&cache->ref, nfs4_xattr_free_cache_cb); in nfs4_xattr_get_cache()
505 nfs4_xattr_hash_bucket(struct nfs4_xattr_cache *cache, const char *name) in nfs4_xattr_hash_bucket() argument
507 return &cache->buckets[jhash(name, strlen(name), 0) & in nfs4_xattr_hash_bucket()
508 (ARRAY_SIZE(cache->buckets) - 1)]; in nfs4_xattr_hash_bucket()
512 nfs4_xattr_get_entry(struct nfs4_xattr_bucket *bucket, const char *name) in nfs4_xattr_get_entry() argument
514 struct nfs4_xattr_entry *entry; in nfs4_xattr_get_entry() local
516 entry = NULL; in nfs4_xattr_get_entry()
518 hlist_for_each_entry(entry, &bucket->hlist, hnode) { in nfs4_xattr_get_entry()
519 if (!strcmp(entry->xattr_name, name)) in nfs4_xattr_get_entry()
523 return entry; in nfs4_xattr_get_entry()
528 struct nfs4_xattr_entry *entry) in nfs4_xattr_hash_add() argument
534 bucket = nfs4_xattr_hash_bucket(cache, entry->xattr_name); in nfs4_xattr_hash_add()
535 entry->bucket = bucket; in nfs4_xattr_hash_add()
537 spin_lock(&bucket->lock); in nfs4_xattr_hash_add()
539 if (bucket->draining) { in nfs4_xattr_hash_add()
544 oldentry = nfs4_xattr_get_entry(bucket, entry->xattr_name); in nfs4_xattr_hash_add()
546 hlist_del_init(&oldentry->hnode); in nfs4_xattr_hash_add()
549 atomic_long_inc(&cache->nent); in nfs4_xattr_hash_add()
552 hlist_add_head(&entry->hnode, &bucket->hlist); in nfs4_xattr_hash_add()
553 nfs4_xattr_entry_lru_add(entry); in nfs4_xattr_hash_add()
556 spin_unlock(&bucket->lock); in nfs4_xattr_hash_add()
559 kref_put(&oldentry->ref, nfs4_xattr_free_entry_cb); in nfs4_xattr_hash_add()
565 nfs4_xattr_hash_remove(struct nfs4_xattr_cache *cache, const char *name) in nfs4_xattr_hash_remove() argument
568 struct nfs4_xattr_entry *entry; in nfs4_xattr_hash_remove() local
570 bucket = nfs4_xattr_hash_bucket(cache, name); in nfs4_xattr_hash_remove()
572 spin_lock(&bucket->lock); in nfs4_xattr_hash_remove()
574 entry = nfs4_xattr_get_entry(bucket, name); in nfs4_xattr_hash_remove()
575 if (entry != NULL) { in nfs4_xattr_hash_remove()
576 hlist_del_init(&entry->hnode); in nfs4_xattr_hash_remove()
577 nfs4_xattr_entry_lru_del(entry); in nfs4_xattr_hash_remove()
578 atomic_long_dec(&cache->nent); in nfs4_xattr_hash_remove()
581 spin_unlock(&bucket->lock); in nfs4_xattr_hash_remove()
583 if (entry != NULL) in nfs4_xattr_hash_remove()
584 kref_put(&entry->ref, nfs4_xattr_free_entry_cb); in nfs4_xattr_hash_remove()
588 nfs4_xattr_hash_find(struct nfs4_xattr_cache *cache, const char *name) in nfs4_xattr_hash_find() argument
591 struct nfs4_xattr_entry *entry; in nfs4_xattr_hash_find() local
593 bucket = nfs4_xattr_hash_bucket(cache, name); in nfs4_xattr_hash_find()
595 spin_lock(&bucket->lock); in nfs4_xattr_hash_find()
597 entry = nfs4_xattr_get_entry(bucket, name); in nfs4_xattr_hash_find()
598 if (entry != NULL) in nfs4_xattr_hash_find()
599 kref_get(&entry->ref); in nfs4_xattr_hash_find()
601 spin_unlock(&bucket->lock); in nfs4_xattr_hash_find()
603 return entry; in nfs4_xattr_hash_find()
607 * Entry point to retrieve an entry from the cache.
609 ssize_t nfs4_xattr_cache_get(struct inode *inode, const char *name, char *buf, in nfs4_xattr_cache_get() argument
613 struct nfs4_xattr_entry *entry; in nfs4_xattr_cache_get() local
618 return -ENOENT; in nfs4_xattr_cache_get()
621 entry = nfs4_xattr_hash_find(cache, name); in nfs4_xattr_cache_get()
623 if (entry != NULL) { in nfs4_xattr_cache_get()
625 entry->xattr_name, (unsigned long)entry->xattr_size); in nfs4_xattr_cache_get()
628 ret = entry->xattr_size; in nfs4_xattr_cache_get()
629 } else if (buflen < entry->xattr_size) in nfs4_xattr_cache_get()
630 ret = -ERANGE; in nfs4_xattr_cache_get()
632 memcpy(buf, entry->xattr_value, entry->xattr_size); in nfs4_xattr_cache_get()
633 ret = entry->xattr_size; in nfs4_xattr_cache_get()
635 kref_put(&entry->ref, nfs4_xattr_free_entry_cb); in nfs4_xattr_cache_get()
637 dprintk("%s: cache miss '%s'\n", __func__, name); in nfs4_xattr_cache_get()
638 ret = -ENOENT; in nfs4_xattr_cache_get()
641 kref_put(&cache->ref, nfs4_xattr_free_cache_cb); in nfs4_xattr_cache_get()
652 struct nfs4_xattr_entry *entry; in nfs4_xattr_cache_list() local
657 return -ENOENT; in nfs4_xattr_cache_list()
659 spin_lock(&cache->listxattr_lock); in nfs4_xattr_cache_list()
661 entry = cache->listxattr; in nfs4_xattr_cache_list()
663 if (entry != NULL && entry != ERR_PTR(-ESTALE)) { in nfs4_xattr_cache_list()
666 ret = entry->xattr_size; in nfs4_xattr_cache_list()
667 } else if (entry->xattr_size > buflen) in nfs4_xattr_cache_list()
668 ret = -ERANGE; in nfs4_xattr_cache_list()
670 memcpy(buf, entry->xattr_value, entry->xattr_size); in nfs4_xattr_cache_list()
671 ret = entry->xattr_size; in nfs4_xattr_cache_list()
674 ret = -ENOENT; in nfs4_xattr_cache_list()
677 spin_unlock(&cache->listxattr_lock); in nfs4_xattr_cache_list()
679 kref_put(&cache->ref, nfs4_xattr_free_cache_cb); in nfs4_xattr_cache_list()
689 void nfs4_xattr_cache_add(struct inode *inode, const char *name, in nfs4_xattr_cache_add() argument
693 struct nfs4_xattr_entry *entry; in nfs4_xattr_cache_add() local
696 name, (unsigned long)buflen); in nfs4_xattr_cache_add()
702 entry = nfs4_xattr_alloc_entry(name, buf, pages, buflen); in nfs4_xattr_cache_add()
703 if (entry == NULL) in nfs4_xattr_cache_add()
708 if (!nfs4_xattr_hash_add(cache, entry)) in nfs4_xattr_cache_add()
709 kref_put(&entry->ref, nfs4_xattr_free_entry_cb); in nfs4_xattr_cache_add()
712 kref_put(&cache->ref, nfs4_xattr_free_cache_cb); in nfs4_xattr_cache_add()
721 void nfs4_xattr_cache_remove(struct inode *inode, const char *name) in nfs4_xattr_cache_remove() argument
725 dprintk("%s: remove '%s'\n", __func__, name); in nfs4_xattr_cache_remove()
732 nfs4_xattr_hash_remove(cache, name); in nfs4_xattr_cache_remove()
734 kref_put(&cache->ref, nfs4_xattr_free_cache_cb); in nfs4_xattr_cache_remove()
744 struct nfs4_xattr_entry *entry; in nfs4_xattr_cache_set_list() local
750 entry = nfs4_xattr_alloc_entry(NULL, buf, NULL, buflen); in nfs4_xattr_cache_set_list()
751 if (entry == NULL) in nfs4_xattr_cache_set_list()
755 * This is just there to be able to get to bucket->cache, in nfs4_xattr_cache_set_list()
759 entry->bucket = &cache->buckets[0]; in nfs4_xattr_cache_set_list()
761 if (!nfs4_xattr_set_listcache(cache, entry)) in nfs4_xattr_cache_set_list()
762 kref_put(&entry->ref, nfs4_xattr_free_entry_cb); in nfs4_xattr_cache_set_list()
765 kref_put(&cache->ref, nfs4_xattr_free_cache_cb); in nfs4_xattr_cache_set_list()
775 spin_lock(&inode->i_lock); in nfs4_xattr_cache_zap()
777 spin_unlock(&inode->i_lock); in nfs4_xattr_cache_zap()
784 * The entry LRU is shrunk more aggressively than the cache LRU,
788 * pruning all but one entry.
832 if (atomic_long_read(&cache->nent) > 1) in cache_lru_isolate()
840 inode = cache->inode; in cache_lru_isolate()
842 if (!spin_trylock(&inode->i_lock)) in cache_lru_isolate()
845 kref_get(&cache->ref); in cache_lru_isolate()
847 cache->inode = NULL; in cache_lru_isolate()
848 NFS_I(inode)->xattr_cache = NULL; in cache_lru_isolate()
849 NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_XATTR; in cache_lru_isolate()
850 list_lru_isolate(lru, &cache->lru); in cache_lru_isolate()
852 spin_unlock(&inode->i_lock); in cache_lru_isolate()
854 list_add_tail(&cache->dispose, dispose); in cache_lru_isolate()
870 list_del_init(&cache->dispose); in nfs4_xattr_cache_scan()
872 kref_put(&cache->ref, nfs4_xattr_free_cache_cb); in nfs4_xattr_cache_scan()
895 struct nfs4_xattr_entry *entry = container_of(item, in entry_lru_isolate() local
898 bucket = entry->bucket; in entry_lru_isolate()
899 cache = bucket->cache; in entry_lru_isolate()
902 * Unhook the entry from its parent (either a cache bucket in entry_lru_isolate()
908 * trylock and skip the entry if we can't get the lock. in entry_lru_isolate()
910 if (entry->xattr_name != NULL) { in entry_lru_isolate()
911 /* Regular cache entry */ in entry_lru_isolate()
912 if (!spin_trylock(&bucket->lock)) in entry_lru_isolate()
915 kref_get(&entry->ref); in entry_lru_isolate()
917 hlist_del_init(&entry->hnode); in entry_lru_isolate()
918 atomic_long_dec(&cache->nent); in entry_lru_isolate()
919 list_lru_isolate(lru, &entry->lru); in entry_lru_isolate()
921 spin_unlock(&bucket->lock); in entry_lru_isolate()
923 /* Listxattr cache entry */ in entry_lru_isolate()
924 if (!spin_trylock(&cache->listxattr_lock)) in entry_lru_isolate()
927 kref_get(&entry->ref); in entry_lru_isolate()
929 cache->listxattr = NULL; in entry_lru_isolate()
930 list_lru_isolate(lru, &entry->lru); in entry_lru_isolate()
932 spin_unlock(&cache->listxattr_lock); in entry_lru_isolate()
935 list_add_tail(&entry->dispose, dispose); in entry_lru_isolate()
944 struct nfs4_xattr_entry *entry; in nfs4_xattr_entry_scan() local
953 entry = list_first_entry(&dispose, struct nfs4_xattr_entry, in nfs4_xattr_entry_scan()
955 list_del_init(&entry->dispose); in nfs4_xattr_entry_scan()
960 * when the entry was first allocated. in nfs4_xattr_entry_scan()
962 kref_put(&entry->ref, nfs4_xattr_free_entry_cb); in nfs4_xattr_entry_scan()
963 kref_put(&entry->ref, nfs4_xattr_free_entry_cb); in nfs4_xattr_entry_scan()
987 spin_lock_init(&cache->listxattr_lock); in nfs4_xattr_cache_init_once()
988 atomic_long_set(&cache->nent, 0); in nfs4_xattr_cache_init_once()
990 cache->listxattr = NULL; in nfs4_xattr_cache_init_once()
991 INIT_LIST_HEAD(&cache->lru); in nfs4_xattr_cache_init_once()
992 INIT_LIST_HEAD(&cache->dispose); in nfs4_xattr_cache_init_once()
1004 return -ENOMEM; in nfs4_xattr_cache_init()