Lines Matching +full:0 +full:xa

34  * 0-62: Sibling entries
54 WARN_ON((long)v < 0); in xa_mk_value()
85 * @tag: Tag value (0, 1 or 3).
88 * of storing value entries. Three tags are available (0, 1 and 3).
134 * Internal entries are used for a number of purposes. Entries 0-255 are
135 * used for sibling entries (only 0-62 are used by the current code). 256
211 * the errno from the pointer value, or returns 0 if the pointer does not
215 * Return: A negative errno or 0.
222 return 0; in xa_err()
233 * * xa_limit_32b - [0 - UINT_MAX]
234 * * xa_limit_31b - [0 - INT_MAX]
235 * * xa_limit_16b - [0 - USHRT_MAX]
244 #define xa_limit_32b XA_LIMIT(0, UINT_MAX)
245 #define xa_limit_31b XA_LIMIT(0, INT_MAX)
246 #define xa_limit_16b XA_LIMIT(0, USHRT_MAX)
249 #define XA_MARK_0 ((__force xa_mark_t)0U)
274 /* ALLOC is for a normal 0-based alloc. ALLOC1 is for an 1-based alloc */
290 * If the only non-NULL entry in the array is at index 0, @xa_head is that
329 #define DEFINE_XARRAY(name) DEFINE_XARRAY_FLAGS(name, 0)
332 * DEFINE_XARRAY_ALLOC() - Define an XArray which allocates IDs starting at 0.
357 void *xa_find(struct xarray *xa, unsigned long *index,
359 void *xa_find_after(struct xarray *xa, unsigned long *index,
367 * @xa: XArray.
376 static inline void xa_init_flags(struct xarray *xa, gfp_t flags) in xa_init_flags() argument
378 spin_lock_init(&xa->xa_lock); in xa_init_flags()
379 xa->xa_flags = flags; in xa_init_flags()
380 xa->xa_head = NULL; in xa_init_flags()
385 * @xa: XArray.
391 static inline void xa_init(struct xarray *xa) in xa_init() argument
393 xa_init_flags(xa, 0); in xa_init()
398 * @xa: XArray.
403 static inline bool xa_empty(const struct xarray *xa) in xa_empty() argument
405 return xa->xa_head == NULL; in xa_empty()
410 * @xa: Array
416 static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark) in xa_marked() argument
418 return xa->xa_flags & XA_FLAGS_MARK(mark); in xa_marked()
423 * @xa: XArray.
430 * in @xa at @index. You may modify @index during the iteration if you
445 #define xa_for_each_range(xa, index, entry, start, last) \ argument
447 entry = xa_find(xa, &index, last, XA_PRESENT); \
449 entry = xa_find_after(xa, &index, last, XA_PRESENT))
453 * @xa: XArray.
459 * in @xa at @index. You may modify @index during the iteration if you
474 #define xa_for_each_start(xa, index, entry, start) \ argument
475 xa_for_each_range(xa, index, entry, start, ULONG_MAX)
479 * @xa: XArray.
484 * in @xa at @index. You may modify @index during the iteration if you want
498 #define xa_for_each(xa, index, entry) \ argument
499 xa_for_each_start(xa, index, entry, 0)
503 * @xa: XArray.
509 * in @xa at @index. The iteration will skip all entries in the array
525 #define xa_for_each_marked(xa, index, entry, filter) \ argument
526 for (index = 0, entry = xa_find(xa, &index, ULONG_MAX, filter); \
527 entry; entry = xa_find_after(xa, &index, ULONG_MAX, filter))
529 #define xa_trylock(xa) spin_trylock(&(xa)->xa_lock) argument
530 #define xa_lock(xa) spin_lock(&(xa)->xa_lock) argument
531 #define xa_unlock(xa) spin_unlock(&(xa)->xa_lock) argument
532 #define xa_lock_bh(xa) spin_lock_bh(&(xa)->xa_lock) argument
533 #define xa_unlock_bh(xa) spin_unlock_bh(&(xa)->xa_lock) argument
534 #define xa_lock_irq(xa) spin_lock_irq(&(xa)->xa_lock) argument
535 #define xa_unlock_irq(xa) spin_unlock_irq(&(xa)->xa_lock) argument
536 #define xa_lock_irqsave(xa, flags) \ argument
537 spin_lock_irqsave(&(xa)->xa_lock, flags)
538 #define xa_unlock_irqrestore(xa, flags) \ argument
539 spin_unlock_irqrestore(&(xa)->xa_lock, flags)
540 #define xa_lock_nested(xa, subclass) \ argument
541 spin_lock_nested(&(xa)->xa_lock, subclass)
542 #define xa_lock_bh_nested(xa, subclass) \ argument
543 spin_lock_bh_nested(&(xa)->xa_lock, subclass)
544 #define xa_lock_irq_nested(xa, subclass) \ argument
545 spin_lock_irq_nested(&(xa)->xa_lock, subclass)
546 #define xa_lock_irqsave_nested(xa, flags, subclass) \ argument
547 spin_lock_irqsave_nested(&(xa)->xa_lock, flags, subclass)
571 * @xa: XArray.
583 static inline void *xa_store_bh(struct xarray *xa, unsigned long index, in xa_store_bh() argument
588 xa_lock_bh(xa); in xa_store_bh()
589 curr = __xa_store(xa, index, entry, gfp); in xa_store_bh()
590 xa_unlock_bh(xa); in xa_store_bh()
597 * @xa: XArray.
609 static inline void *xa_store_irq(struct xarray *xa, unsigned long index, in xa_store_irq() argument
614 xa_lock_irq(xa); in xa_store_irq()
615 curr = __xa_store(xa, index, entry, gfp); in xa_store_irq()
616 xa_unlock_irq(xa); in xa_store_irq()
623 * @xa: XArray.
634 static inline void *xa_erase_bh(struct xarray *xa, unsigned long index) in xa_erase_bh() argument
638 xa_lock_bh(xa); in xa_erase_bh()
639 entry = __xa_erase(xa, index); in xa_erase_bh()
640 xa_unlock_bh(xa); in xa_erase_bh()
647 * @xa: XArray.
658 static inline void *xa_erase_irq(struct xarray *xa, unsigned long index) in xa_erase_irq() argument
662 xa_lock_irq(xa); in xa_erase_irq()
663 entry = __xa_erase(xa, index); in xa_erase_irq()
664 xa_unlock_irq(xa); in xa_erase_irq()
671 * @xa: XArray.
684 static inline void *xa_cmpxchg(struct xarray *xa, unsigned long index, in xa_cmpxchg() argument
689 xa_lock(xa); in xa_cmpxchg()
690 curr = __xa_cmpxchg(xa, index, old, entry, gfp); in xa_cmpxchg()
691 xa_unlock(xa); in xa_cmpxchg()
698 * @xa: XArray.
711 static inline void *xa_cmpxchg_bh(struct xarray *xa, unsigned long index, in xa_cmpxchg_bh() argument
716 xa_lock_bh(xa); in xa_cmpxchg_bh()
717 curr = __xa_cmpxchg(xa, index, old, entry, gfp); in xa_cmpxchg_bh()
718 xa_unlock_bh(xa); in xa_cmpxchg_bh()
725 * @xa: XArray.
738 static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index, in xa_cmpxchg_irq() argument
743 xa_lock_irq(xa); in xa_cmpxchg_irq()
744 curr = __xa_cmpxchg(xa, index, old, entry, gfp); in xa_cmpxchg_irq()
745 xa_unlock_irq(xa); in xa_cmpxchg_irq()
753 * @xa: XArray.
764 * Return: 0 if the store succeeded. -EBUSY if another entry was present.
767 static inline int __must_check xa_insert(struct xarray *xa, in xa_insert() argument
772 xa_lock(xa); in xa_insert()
773 err = __xa_insert(xa, index, entry, gfp); in xa_insert()
774 xa_unlock(xa); in xa_insert()
782 * @xa: XArray.
793 * Return: 0 if the store succeeded. -EBUSY if another entry was present.
796 static inline int __must_check xa_insert_bh(struct xarray *xa, in xa_insert_bh() argument
801 xa_lock_bh(xa); in xa_insert_bh()
802 err = __xa_insert(xa, index, entry, gfp); in xa_insert_bh()
803 xa_unlock_bh(xa); in xa_insert_bh()
811 * @xa: XArray.
822 * Return: 0 if the store succeeded. -EBUSY if another entry was present.
825 static inline int __must_check xa_insert_irq(struct xarray *xa, in xa_insert_irq() argument
830 xa_lock_irq(xa); in xa_insert_irq()
831 err = __xa_insert(xa, index, entry, gfp); in xa_insert_irq()
832 xa_unlock_irq(xa); in xa_insert_irq()
839 * @xa: XArray.
845 * Finds an empty entry in @xa between @limit.min and @limit.max,
851 * Return: 0 on success, -ENOMEM if memory could not be allocated or
854 static inline __must_check int xa_alloc(struct xarray *xa, u32 *id, in xa_alloc() argument
859 xa_lock(xa); in xa_alloc()
860 err = __xa_alloc(xa, id, entry, limit, gfp); in xa_alloc()
861 xa_unlock(xa); in xa_alloc()
868 * @xa: XArray.
874 * Finds an empty entry in @xa between @limit.min and @limit.max,
880 * Return: 0 on success, -ENOMEM if memory could not be allocated or
883 static inline int __must_check xa_alloc_bh(struct xarray *xa, u32 *id, in xa_alloc_bh() argument
888 xa_lock_bh(xa); in xa_alloc_bh()
889 err = __xa_alloc(xa, id, entry, limit, gfp); in xa_alloc_bh()
890 xa_unlock_bh(xa); in xa_alloc_bh()
897 * @xa: XArray.
903 * Finds an empty entry in @xa between @limit.min and @limit.max,
909 * Return: 0 on success, -ENOMEM if memory could not be allocated or
912 static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id, in xa_alloc_irq() argument
917 xa_lock_irq(xa); in xa_alloc_irq()
918 err = __xa_alloc(xa, id, entry, limit, gfp); in xa_alloc_irq()
919 xa_unlock_irq(xa); in xa_alloc_irq()
926 * @xa: XArray.
933 * Finds an empty entry in @xa between @limit.min and @limit.max,
941 * Return: 0 if the allocation succeeded without wrapping. 1 if the
945 static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry, in xa_alloc_cyclic() argument
950 xa_lock(xa); in xa_alloc_cyclic()
951 err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); in xa_alloc_cyclic()
952 xa_unlock(xa); in xa_alloc_cyclic()
959 * @xa: XArray.
966 * Finds an empty entry in @xa between @limit.min and @limit.max,
974 * Return: 0 if the allocation succeeded without wrapping. 1 if the
978 static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry, in xa_alloc_cyclic_bh() argument
983 xa_lock_bh(xa); in xa_alloc_cyclic_bh()
984 err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); in xa_alloc_cyclic_bh()
985 xa_unlock_bh(xa); in xa_alloc_cyclic_bh()
992 * @xa: XArray.
999 * Finds an empty entry in @xa between @limit.min and @limit.max,
1007 * Return: 0 if the allocation succeeded without wrapping. 1 if the
1011 static inline int xa_alloc_cyclic_irq(struct xarray *xa, u32 *id, void *entry, in xa_alloc_cyclic_irq() argument
1016 xa_lock_irq(xa); in xa_alloc_cyclic_irq()
1017 err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); in xa_alloc_cyclic_irq()
1018 xa_unlock_irq(xa); in xa_alloc_cyclic_irq()
1025 * @xa: XArray.
1039 * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
1042 int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp) in xa_reserve() argument
1044 return xa_err(xa_cmpxchg(xa, index, NULL, XA_ZERO_ENTRY, gfp)); in xa_reserve()
1049 * @xa: XArray.
1057 * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
1060 int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp) in xa_reserve_bh() argument
1062 return xa_err(xa_cmpxchg_bh(xa, index, NULL, XA_ZERO_ENTRY, gfp)); in xa_reserve_bh()
1067 * @xa: XArray.
1075 * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
1078 int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp) in xa_reserve_irq() argument
1080 return xa_err(xa_cmpxchg_irq(xa, index, NULL, XA_ZERO_ENTRY, gfp)); in xa_reserve_irq()
1085 * @xa: XArray.
1092 static inline void xa_release(struct xarray *xa, unsigned long index) in xa_release() argument
1094 xa_cmpxchg(xa, index, XA_ZERO_ENTRY, NULL, 0); in xa_release()
1146 #define XA_BUG_ON(xa, x) do { \ argument
1148 xa_dump(xa); \
1151 } while (0)
1157 } while (0)
1159 #define XA_BUG_ON(xa, x) do { } while (0) argument
1160 #define XA_NODE_BUG_ON(node, x) do { } while (0)
1164 static inline void *xa_head(const struct xarray *xa) in xa_head() argument
1166 return rcu_dereference_check(xa->xa_head, in xa_head()
1167 lockdep_is_held(&xa->xa_lock)); in xa_head()
1171 static inline void *xa_head_locked(const struct xarray *xa) in xa_head_locked() argument
1173 return rcu_dereference_protected(xa->xa_head, in xa_head_locked()
1174 lockdep_is_held(&xa->xa_lock)); in xa_head_locked()
1178 static inline void *xa_entry(const struct xarray *xa, in xa_entry() argument
1183 lockdep_is_held(&xa->xa_lock)); in xa_entry()
1187 static inline void *xa_entry_locked(const struct xarray *xa, in xa_entry_locked() argument
1192 lockdep_is_held(&xa->xa_lock)); in xa_entry_locked()
1196 static inline struct xa_node *xa_parent(const struct xarray *xa, in xa_parent() argument
1200 lockdep_is_held(&xa->xa_lock)); in xa_parent()
1204 static inline struct xa_node *xa_parent_locked(const struct xarray *xa, in xa_parent_locked() argument
1208 lockdep_is_held(&xa->xa_lock)); in xa_parent_locked()
1303 * single entry in the array at index 0, there are no allocated xa_nodes to
1311 struct xarray *xa; member
1331 .xa = array, \
1335 .xa_offset = 0, \
1336 .xa_pad = 0, \
1351 struct xa_state name = __XA_STATE(array, index, 0, 0)
1370 #define xas_marked(xas, mark) xa_marked((xas)->xa, (mark))
1371 #define xas_trylock(xas) xa_trylock((xas)->xa)
1372 #define xas_lock(xas) xa_lock((xas)->xa)
1373 #define xas_unlock(xas) xa_unlock((xas)->xa)
1374 #define xas_lock_bh(xas) xa_lock_bh((xas)->xa)
1375 #define xas_unlock_bh(xas) xa_unlock_bh((xas)->xa)
1376 #define xas_lock_irq(xas) xa_lock_irq((xas)->xa)
1377 #define xas_unlock_irq(xas) xa_unlock_irq((xas)->xa)
1379 xa_lock_irqsave((xas)->xa, flags)
1381 xa_unlock_irqrestore((xas)->xa, flags)
1387 * Return: 0 if no error has been noted. A negative errno if one has.
1517 static inline int xa_get_order(struct xarray *xa, unsigned long index) in xa_get_order() argument
1519 return 0; in xa_get_order()
1555 return xa_head(xas->xa); in xas_reload()
1558 entry = xa_entry(xas->xa, node, offset); in xas_reload()
1565 return xa_entry(xas->xa, node, offset); in xas_reload()
1593 xas->xa_index = order < BITS_PER_LONG ? (index >> order) << order : 0; in xas_set_order()
1598 BUG_ON(order > 0); in xas_set_order()
1641 entry = xa_entry(xas->xa, node, xas->xa_offset + 1); in xas_next_entry()
1662 unsigned long data = *addr & (~0UL << offset); in xas_find_chunk()
1700 entry = xa_entry(xas->xa, node, offset); in xas_next_marked()
1777 * If the iterator was referencing index 0, this function wraps
1788 xas->xa_offset == 0)) in xas_prev()
1793 return xa_entry(xas->xa, node, xas->xa_offset); in xas_prev()
1807 * around to 0.
1822 return xa_entry(xas->xa, node, xas->xa_offset); in xas_next()