Lines Matching +full:permit +full:-
1 /* SPDX-License-Identifier: GPL-2.0+ */
9 * See Documentation/core-api/xarray.rst for how to use the XArray.
34 * 0-62: Sibling entries
39 * space (-4094 to -2). They're never stored in the slots array; only
43 #define BITS_PER_XA_VALUE (BITS_PER_LONG - 1)
46 * xa_mk_value() - Create an XArray entry from an integer.
59 * xa_to_value() - Get value stored in an XArray entry.
71 * xa_is_value() - Determine if an entry is a value.
83 * xa_tag_pointer() - Create an XArray entry for a tagged pointer.
101 * xa_untag_pointer() - Turn an XArray entry into a plain pointer.
116 * xa_pointer_tag() - Get the tag stored in an XArray entry.
131 * xa_mk_internal() - Create an internal entry.
134 * Internal entries are used for a number of purposes. Entries 0-255 are
135 * used for sibling entries (only 0-62 are used by the current code). 256
149 * xa_to_internal() - Extract the value from an internal entry.
161 * xa_is_internal() - Is the entry an internal entry?
175 * xa_is_zero() - Is the entry a zero entry?
189 * xa_is_err() - Report whether an XArray operation returned an error
202 entry >= xa_mk_internal(-MAX_ERRNO)); in xa_is_err()
206 * xa_err() - Turn an XArray result into an errno.
226 * struct xa_limit - Represents a range of IDs.
233 * * xa_limit_32b - [0 - UINT_MAX]
234 * * xa_limit_31b - [0 - INT_MAX]
235 * * xa_limit_16b - [0 - USHRT_MAX]
274 /* ALLOC is for a normal 0-based alloc. ALLOC1 is for an 1-based alloc */
279 * struct xarray - The anchor of the XArray.
290 * If the only non-NULL entry in the array is at index 0, @xa_head is that
291 * entry. If any other entry in the array is non-NULL, @xa_head points
308 * DEFINE_XARRAY_FLAGS() - Define an XArray with custom flags.
321 * DEFINE_XARRAY() - Define an XArray.
332 * DEFINE_XARRAY_ALLOC() - Define an XArray which allocates IDs starting at 0.
341 * DEFINE_XARRAY_ALLOC1() - Define an XArray which allocates IDs starting at 1.
366 * xa_init_flags() - Initialise an empty XArray with flags.
378 spin_lock_init(&xa->xa_lock); in xa_init_flags()
379 xa->xa_flags = flags; in xa_init_flags()
380 xa->xa_head = NULL; in xa_init_flags()
384 * xa_init() - Initialise an empty XArray.
397 * xa_empty() - Determine if an array has any present entries.
405 return xa->xa_head == NULL; in xa_empty()
409 * xa_marked() - Inquire whether any entry in this array has a mark set
418 return xa->xa_flags & XA_FLAGS_MARK(mark); in xa_marked()
422 * xa_for_each_range() - Iterate over a portion of an XArray.
452 * xa_for_each_start() - Iterate over a portion of an XArray.
478 * xa_for_each() - Iterate over present entries in an XArray.
502 * xa_for_each_marked() - Iterate over marked entries in an XArray.
529 #define xa_trylock(xa) spin_trylock(&(xa)->xa_lock)
530 #define xa_lock(xa) spin_lock(&(xa)->xa_lock)
531 #define xa_unlock(xa) spin_unlock(&(xa)->xa_lock)
532 #define xa_lock_bh(xa) spin_lock_bh(&(xa)->xa_lock)
533 #define xa_unlock_bh(xa) spin_unlock_bh(&(xa)->xa_lock)
534 #define xa_lock_irq(xa) spin_lock_irq(&(xa)->xa_lock)
535 #define xa_unlock_irq(xa) spin_unlock_irq(&(xa)->xa_lock)
537 spin_lock_irqsave(&(xa)->xa_lock, flags)
539 spin_unlock_irqrestore(&(xa)->xa_lock, flags)
541 spin_lock_nested(&(xa)->xa_lock, subclass)
543 spin_lock_bh_nested(&(xa)->xa_lock, subclass)
545 spin_lock_irq_nested(&(xa)->xa_lock, subclass)
547 spin_lock_irqsave_nested(&(xa)->xa_lock, flags, subclass)
553 * may also re-enable interrupts if the XArray flags indicate the
570 * xa_store_bh() - Store this entry in the XArray.
596 * xa_store_irq() - Store this entry in the XArray.
622 * xa_erase_bh() - Erase this entry from the XArray.
627 * If the index is part of a multi-index entry, all indices will be erased
628 * and none of the entries will be part of a multi-index entry.
646 * xa_erase_irq() - Erase this entry from the XArray.
651 * If the index is part of a multi-index entry, all indices will be erased
652 * and none of the entries will be part of a multi-index entry.
670 * xa_cmpxchg() - Conditionally replace an entry in the XArray.
681 * if the @gfp flags permit.
697 * xa_cmpxchg_bh() - Conditionally replace an entry in the XArray.
708 * disabling softirqs. May sleep if the @gfp flags permit.
724 * xa_cmpxchg_irq() - Conditionally replace an entry in the XArray.
735 * disabling interrupts. May sleep if the @gfp flags permit.
751 * xa_insert() - Store this entry in the XArray unless another entry is
763 * the @gfp flags permit.
764 * Return: 0 if the store succeeded. -EBUSY if another entry was present.
765 * -ENOMEM if memory could not be allocated.
780 * xa_insert_bh() - Store this entry in the XArray unless another entry is
792 * disabling softirqs. May sleep if the @gfp flags permit.
793 * Return: 0 if the store succeeded. -EBUSY if another entry was present.
794 * -ENOMEM if memory could not be allocated.
809 * xa_insert_irq() - Store this entry in the XArray unless another entry is
821 * disabling interrupts. May sleep if the @gfp flags permit.
822 * Return: 0 if the store succeeded. -EBUSY if another entry was present.
823 * -ENOMEM if memory could not be allocated.
838 * xa_alloc() - Find somewhere to store this entry in the XArray.
850 * the @gfp flags permit.
851 * Return: 0 on success, -ENOMEM if memory could not be allocated or
852 * -EBUSY if there are no free entries in @limit.
867 * xa_alloc_bh() - Find somewhere to store this entry in the XArray.
879 * disabling softirqs. May sleep if the @gfp flags permit.
880 * Return: 0 on success, -ENOMEM if memory could not be allocated or
881 * -EBUSY if there are no free entries in @limit.
896 * xa_alloc_irq() - Find somewhere to store this entry in the XArray.
908 * disabling interrupts. May sleep if the @gfp flags permit.
909 * Return: 0 on success, -ENOMEM if memory could not be allocated or
910 * -EBUSY if there are no free entries in @limit.
925 * xa_alloc_cyclic() - Find somewhere to store this entry in the XArray.
940 * the @gfp flags permit.
942 * allocation succeeded after wrapping, -ENOMEM if memory could not be
943 * allocated or -EBUSY if there are no free entries in @limit.
958 * xa_alloc_cyclic_bh() - Find somewhere to store this entry in the XArray.
973 * disabling softirqs. May sleep if the @gfp flags permit.
975 * allocation succeeded after wrapping, -ENOMEM if memory could not be
976 * allocated or -EBUSY if there are no free entries in @limit.
991 * xa_alloc_cyclic_irq() - Find somewhere to store this entry in the XArray.
1006 * disabling interrupts. May sleep if the @gfp flags permit.
1008 * allocation succeeded after wrapping, -ENOMEM if memory could not be
1009 * allocated or -EBUSY if there are no free entries in @limit.
1024 * xa_reserve() - Reserve this index in the XArray.
1038 * May sleep if the @gfp flags permit.
1039 * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
1048 * xa_reserve_bh() - Reserve this index in the XArray.
1053 * A softirq-disabling version of xa_reserve().
1057 * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
1066 * xa_reserve_irq() - Reserve this index in the XArray.
1071 * An interrupt-disabling version of xa_reserve().
1075 * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
1084 * xa_release() - Release a reserved entry.
1105 * balanced against the memory consumption of each node. On a 64-bit system,
1113 #define XA_CHUNK_MASK (XA_CHUNK_SIZE - 1)
1118 * @count is the count of every non-NULL element in the ->slots array
1121 * @nr_values is the count of every element in ->slots which is
1166 return rcu_dereference_check(xa->xa_head, in xa_head()
1167 lockdep_is_held(&xa->xa_lock)); in xa_head()
1173 return rcu_dereference_protected(xa->xa_head, in xa_head_locked()
1174 lockdep_is_held(&xa->xa_lock)); in xa_head_locked()
1182 return rcu_dereference_check(node->slots[offset], in xa_entry()
1183 lockdep_is_held(&xa->xa_lock)); in xa_entry()
1191 return rcu_dereference_protected(node->slots[offset], in xa_entry_locked()
1192 lockdep_is_held(&xa->xa_lock)); in xa_entry_locked()
1199 return rcu_dereference_check(node->parent, in xa_parent()
1200 lockdep_is_held(&xa->xa_lock)); in xa_parent()
1207 return rcu_dereference_protected(node->parent, in xa_parent_locked()
1208 lockdep_is_held(&xa->xa_lock)); in xa_parent_locked()
1220 return (struct xa_node *)((unsigned long)entry - 2); in xa_to_node()
1242 * xa_is_sibling() - Is the entry a sibling entry?
1250 (entry < xa_mk_sibling(XA_CHUNK_SIZE - 1)); in xa_is_sibling()
1256 * xa_is_retry() - Is the entry a retry entry?
1267 * xa_is_advanced() - Is the entry only permitted for the advanced API?
1278 * typedef xa_update_node_t - A callback function from the XArray.
1286 * Implementations should not drop the xa_lock, nor re-enable
1323 * We encode errnos in the xas->xa_node. If an error has happened, we need to
1343 * XA_STATE() - Declare an XArray operation state.
1354 * XA_STATE_ORDER() - Declare an XArray operation state.
1367 order - (order % XA_CHUNK_SHIFT), \
1368 (1U << (order % XA_CHUNK_SHIFT)) - 1)
1370 #define xas_marked(xas, mark) xa_marked((xas)->xa, (mark))
1371 #define xas_trylock(xas) xa_trylock((xas)->xa)
1372 #define xas_lock(xas) xa_lock((xas)->xa)
1373 #define xas_unlock(xas) xa_unlock((xas)->xa)
1374 #define xas_lock_bh(xas) xa_lock_bh((xas)->xa)
1375 #define xas_unlock_bh(xas) xa_unlock_bh((xas)->xa)
1376 #define xas_lock_irq(xas) xa_lock_irq((xas)->xa)
1377 #define xas_unlock_irq(xas) xa_unlock_irq((xas)->xa)
1379 xa_lock_irqsave((xas)->xa, flags)
1381 xa_unlock_irqrestore((xas)->xa, flags)
1384 * xas_error() - Return an errno stored in the xa_state.
1391 return xa_err(xas->xa_node); in xas_error()
1395 * xas_set_err() - Note an error in the xa_state.
1405 xas->xa_node = XA_ERROR(err); in xas_set_err()
1409 * xas_invalid() - Is the xas in a retry or error state?
1416 return (unsigned long)xas->xa_node & 3; in xas_invalid()
1420 * xas_valid() - Is the xas a valid cursor into the array?
1431 * xas_is_node() - Does the xas point to a node?
1438 return xas_valid(xas) && xas->xa_node; in xas_is_node()
1453 /* True if the node represents head-of-tree, RESTART or BOUNDS */
1460 * xas_reset() - Reset an XArray operation state.
1471 xas->xa_node = XAS_RESTART; in xas_reset()
1475 * xas_retry() - Retry the operation if appropriate.
1535 * xas_reload() - Refetch an entry from the xarray.
1550 struct xa_node *node = xas->xa_node; in xas_reload()
1555 return xa_head(xas->xa); in xas_reload()
1557 offset = (xas->xa_index >> node->shift) & XA_CHUNK_MASK; in xas_reload()
1558 entry = xa_entry(xas->xa, node, offset); in xas_reload()
1563 offset = xas->xa_offset; in xas_reload()
1565 return xa_entry(xas->xa, node, offset); in xas_reload()
1569 * xas_set() - Set up XArray operation state for a different index.
1579 xas->xa_index = index; in xas_set()
1580 xas->xa_node = XAS_RESTART; in xas_set()
1584 * xas_set_order() - Set up XArray operation state for a multislot entry.
1593 xas->xa_index = order < BITS_PER_LONG ? (index >> order) << order : 0; in xas_set_order()
1594 xas->xa_shift = order - (order % XA_CHUNK_SHIFT); in xas_set_order()
1595 xas->xa_sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1; in xas_set_order()
1596 xas->xa_node = XAS_RESTART; in xas_set_order()
1604 * xas_set_update() - Set up XArray operation state for a callback.
1613 xas->xa_update = update; in xas_set_update()
1617 * xas_next_entry() - Advance iterator to next present entry.
1629 struct xa_node *node = xas->xa_node; in xas_next_entry()
1632 if (unlikely(xas_not_node(node) || node->shift || in xas_next_entry()
1633 xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK))) in xas_next_entry()
1637 if (unlikely(xas->xa_index >= max)) in xas_next_entry()
1639 if (unlikely(xas->xa_offset == XA_CHUNK_MASK)) in xas_next_entry()
1641 entry = xa_entry(xas->xa, node, xas->xa_offset + 1); in xas_next_entry()
1644 xas->xa_offset++; in xas_next_entry()
1645 xas->xa_index++; in xas_next_entry()
1655 unsigned long *addr = xas->xa_node->marks[(__force unsigned)mark]; in xas_find_chunk()
1656 unsigned int offset = xas->xa_offset; in xas_find_chunk()
1673 * xas_next_marked() - Advance iterator to next marked entry.
1687 struct xa_node *node = xas->xa_node; in xas_next_marked()
1691 if (unlikely(xas_not_node(node) || node->shift)) in xas_next_marked()
1694 xas->xa_offset = offset; in xas_next_marked()
1695 xas->xa_index = (xas->xa_index & ~XA_CHUNK_MASK) + offset; in xas_next_marked()
1696 if (xas->xa_index > max) in xas_next_marked()
1700 entry = xa_entry(xas->xa, node, offset); in xas_next_marked()
1715 * xas_for_each() - Iterate over a range of an XArray.
1732 * xas_for_each_marked() - Iterate over a range of an XArray.
1750 * xas_for_each_conflict() - Iterate over a range of an XArray.
1768 * xas_prev() - Move iterator to previous index.
1785 struct xa_node *node = xas->xa_node; in xas_prev()
1787 if (unlikely(xas_not_node(node) || node->shift || in xas_prev()
1788 xas->xa_offset == 0)) in xas_prev()
1791 xas->xa_index--; in xas_prev()
1792 xas->xa_offset--; in xas_prev()
1793 return xa_entry(xas->xa, node, xas->xa_offset); in xas_prev()
1797 * xas_next() - Move state to next index.
1814 struct xa_node *node = xas->xa_node; in xas_next()
1816 if (unlikely(xas_not_node(node) || node->shift || in xas_next()
1817 xas->xa_offset == XA_CHUNK_MASK)) in xas_next()
1820 xas->xa_index++; in xas_next()
1821 xas->xa_offset++; in xas_next()
1822 return xa_entry(xas->xa, node, xas->xa_offset); in xas_next()