/Linux-v5.15/Documentation/i2c/ |
D | i2c-topology.rst | 38 mux-locked or parent-locked muxes. As is evident from below, it can be 39 useful to know if a mux is mux-locked or if it is parent-locked. The 45 i2c-arb-gpio-challenge Parent-locked 46 i2c-mux-gpio Normally parent-locked, mux-locked iff 49 i2c-mux-gpmux Normally parent-locked, mux-locked iff 51 i2c-mux-ltc4306 Mux-locked 52 i2c-mux-mlxcpld Parent-locked 53 i2c-mux-pca9541 Parent-locked 54 i2c-mux-pca954x Parent-locked 55 i2c-mux-pinctrl Normally parent-locked, mux-locked iff [all …]
|
/Linux-v5.15/kernel/locking/ |
D | qspinlock_paravirt.h | 91 (cmpxchg_acquire(&lock->locked, 0, _Q_LOCKED_VAL) == 0)) { in pv_hybrid_queued_unfair_trylock() 121 return !READ_ONCE(lock->locked) && in trylock_clear_pending() 302 if (READ_ONCE(node->locked)) in pv_wait_node() 322 if (!READ_ONCE(node->locked)) { in pv_wait_node() 343 !READ_ONCE(node->locked)); in pv_wait_node() 391 WRITE_ONCE(lock->locked, _Q_SLOW_VAL); in pv_kick_node() 456 if (xchg(&lock->locked, _Q_SLOW_VAL) == 0) { in pv_wait_head_or_lock() 462 WRITE_ONCE(lock->locked, _Q_LOCKED_VAL); in pv_wait_head_or_lock() 470 pv_wait(&lock->locked, _Q_SLOW_VAL); in pv_wait_head_or_lock() 493 __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked) in __pv_queued_spin_unlock_slowpath() argument [all …]
|
D | mcs_spinlock.h | 20 int locked; /* 1 if lock acquired */ member 70 node->locked = 0; in mcs_spin_lock() 94 arch_mcs_spin_lock_contended(&node->locked); in mcs_spin_lock() 118 arch_mcs_spin_unlock_contended(&next->locked); in mcs_spin_unlock()
|
D | osq_lock.c | 97 node->locked = 0; in osq_lock() 143 if (smp_cond_load_relaxed(&node->locked, VAL || need_resched() || in osq_lock() 170 if (smp_load_acquire(&node->locked)) in osq_lock() 225 WRITE_ONCE(next->locked, 1); in osq_unlock() 231 WRITE_ONCE(next->locked, 1); in osq_unlock()
|
D | qspinlock.c | 263 WRITE_ONCE(lock->locked, _Q_LOCKED_VAL); in set_locked() 434 node->locked = 0; in queued_spin_lock_slowpath() 474 arch_mcs_spin_lock_contended(&node->locked); in queued_spin_lock_slowpath() 509 goto locked; in queued_spin_lock_slowpath() 513 locked: in queued_spin_lock_slowpath() 553 arch_mcs_spin_unlock_contended(&next->locked); in queued_spin_lock_slowpath()
|
/Linux-v5.15/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/ |
D | locks.h | 58 bool locked; member 65 return __sync_bool_compare_and_swap(&lock->locked, false, true); in lock_impl_trylock() 68 bool old_locked = lock->locked; in lock_impl_trylock() 69 lock->locked = true; in lock_impl_trylock() 96 BUG_ON(!__sync_bool_compare_and_swap(&lock->locked, true, false)); in lock_impl_unlock() 102 bool old_locked = lock->locked; in lock_impl_unlock() 103 lock->locked = false; in lock_impl_unlock() 112 lock->locked = false; in lock_impl_init() 115 #define LOCK_IMPL_INITIALIZER {.locked = false}
|
/Linux-v5.15/scripts/coccinelle/locks/ |
D | double_lock.cocci | 17 @locked@ 42 position p1 != locked.p1; 43 position locked.p; 45 expression x <= locked.E1; 46 expression E,locked.E1; 65 expression x <= locked.E1; 66 expression locked.E1; 69 position locked.p,p1,p2;
|
D | call_kern.cocci | 39 @locked exists@ 74 @depends on locked && patch@ 81 @depends on locked && !patch@ 90 p1 << locked.p1; 91 p2 << locked.p2; 101 p1 << locked.p1; 102 p2 << locked.p2;
|
/Linux-v5.15/drivers/media/dvb-frontends/ |
D | lgs8gxx.c | 295 static int lgs8gxx_is_locked(struct lgs8gxx_state *priv, u8 *locked) in lgs8gxx_is_locked() argument 308 *locked = ((t & 0x80) == 0x80) ? 1 : 0; in lgs8gxx_is_locked() 310 *locked = ((t & 0xC0) == 0xC0) ? 1 : 0; in lgs8gxx_is_locked() 315 static int lgs8gxx_wait_ca_lock(struct lgs8gxx_state *priv, u8 *locked) in lgs8gxx_wait_ca_lock() argument 331 *locked = (ret == 0) ? 1 : 0; in lgs8gxx_wait_ca_lock() 359 u8 *locked) in lgs8gxx_autolock_gi() argument 384 err = lgs8gxx_wait_ca_lock(priv, locked); in lgs8gxx_autolock_gi() 385 if (err || !(*locked)) in lgs8gxx_autolock_gi() 393 *locked = 0; in lgs8gxx_autolock_gi() 403 u8 locked = 0, tmp_gi; in lgs8gxx_auto_detect() local [all …]
|
D | atbm8830.c | 147 static int is_locked(struct atbm_state *priv, u8 *locked) in is_locked() argument 153 if (locked != NULL) in is_locked() 154 *locked = (status == 1); in is_locked() 261 u8 locked = 0; in atbm8830_set_fe() local 277 is_locked(priv, &locked); in atbm8830_set_fe() 278 if (locked != 0) { in atbm8830_set_fe() 329 u8 locked = 0; in atbm8830_read_status() local 335 is_locked(priv, &locked); in atbm8830_read_status() 336 if (locked) { in atbm8830_read_status()
|
/Linux-v5.15/mm/ |
D | gup.c | 913 unsigned long address, unsigned int *flags, int *locked) in faultin_page() argument 925 if (locked) in faultin_page() 947 if (locked && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) in faultin_page() 948 *locked = 0; in faultin_page() 1082 struct vm_area_struct **vmas, int *locked) in __get_user_pages() argument 1132 gup_flags, locked); in __get_user_pages() 1133 if (locked && *locked == 0) { in __get_user_pages() 1158 ret = faultin_page(vma, start, &foll_flags, locked); in __get_user_pages() 1312 int *locked, in __get_user_pages_locked() argument 1318 if (locked) { in __get_user_pages_locked() [all …]
|
D | mlock.c | 649 unsigned long locked; in do_mlock() local 663 locked = len >> PAGE_SHIFT; in do_mlock() 668 locked += current->mm->locked_vm; in do_mlock() 669 if ((locked > lock_limit) && (!capable(CAP_IPC_LOCK))) { in do_mlock() 676 locked -= count_mm_mlocked_page_nr(current->mm, in do_mlock() 681 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK)) in do_mlock() 823 unsigned long lock_limit, locked; in user_shm_lock() local 827 locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; in user_shm_lock() 833 memlock = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked); in user_shm_lock() 836 dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked); in user_shm_lock() [all …]
|
D | mremap.c | 565 bool *locked, unsigned long flags, in move_vma() argument 695 *locked = true; in move_vma() 759 unsigned long locked, lock_limit; in vma_to_resize() local 760 locked = mm->locked_vm << PAGE_SHIFT; in vma_to_resize() 762 locked += new_len - old_len; in vma_to_resize() 763 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) in vma_to_resize() 782 unsigned long new_addr, unsigned long new_len, bool *locked, in mremap_to() argument 862 ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf, in mremap_to() 903 bool locked = false; in SYSCALL_DEFINE5() local 955 &locked, flags, &uf, &uf_unmap_early, in SYSCALL_DEFINE5() [all …]
|
D | compaction.c | 528 unsigned long flags, bool *locked, struct compact_control *cc) in compact_unlock_should_abort() argument 530 if (*locked) { in compact_unlock_should_abort() 532 *locked = false; in compact_unlock_should_abort() 560 bool locked = false; in isolate_freepages_block() local 582 &locked, cc)) in isolate_freepages_block() 613 if (!locked) { in isolate_freepages_block() 614 locked = compact_lock_irqsave(&cc->zone->lock, in isolate_freepages_block() 650 if (locked) in isolate_freepages_block() 801 struct lruvec *locked = NULL; in isolate_migratepages_block() local 869 if (locked) { in isolate_migratepages_block() [all …]
|
/Linux-v5.15/sound/core/seq/ |
D | seq_queue.h | 29 bool locked; /* timer is only accesibble by owner if set */ member 57 struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned int flags); 86 int snd_seq_queue_set_owner(int queueid, int client, int locked); 87 int snd_seq_queue_set_locked(int queueid, int client, int locked);
|
D | seq_queue.c | 96 static struct snd_seq_queue *queue_new(int owner, int locked) in queue_new() argument 122 q->locked = locked; in queue_new() 168 struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned int info_flags) in snd_seq_queue_alloc() argument 172 q = queue_new(client, locked); in snd_seq_queue_alloc() 346 return (q->owner == client) || (!q->locked && !q->klocked); in check_access() 396 int snd_seq_queue_set_owner(int queueid, int client, int locked) in snd_seq_queue_set_owner() argument 410 q->locked = locked ? 1 : 0; in snd_seq_queue_set_owner() 729 bool locked; in snd_seq_info_queues_read() local 744 locked = q->locked; in snd_seq_info_queues_read() 750 snd_iprintf(buffer, "lock status : %s\n", locked ? "Locked" : "Free"); in snd_seq_info_queues_read()
|
/Linux-v5.15/drivers/thermal/intel/ |
D | intel_quark_dts_thermal.c | 103 bool locked; member 132 if (!aux_entry->locked) { in soc_dts_enable() 160 if (!aux_entry->locked) { in soc_dts_disable() 220 if (aux_entry->locked) { in update_trip_temp() 328 if (!aux_entry->locked) { in free_soc_dts() 364 aux_entry->locked = true; in alloc_soc_dts() 367 aux_entry->locked = false; in alloc_soc_dts() 372 if (!aux_entry->locked) { in alloc_soc_dts()
|
/Linux-v5.15/drivers/mtd/spi-nor/ |
D | otp.c | 253 int ret, locked; in spi_nor_mtd_otp_info() local 266 locked = ops->is_locked(nor, i); in spi_nor_mtd_otp_info() 267 if (locked < 0) { in spi_nor_mtd_otp_info() 268 ret = locked; in spi_nor_mtd_otp_info() 272 buf->locked = !!locked; in spi_nor_mtd_otp_info() 289 int locked; in spi_nor_mtd_otp_range_is_locked() local 298 locked = ops->is_locked(nor, region); in spi_nor_mtd_otp_range_is_locked() 300 if (locked) in spi_nor_mtd_otp_range_is_locked() 301 return locked; in spi_nor_mtd_otp_range_is_locked()
|
/Linux-v5.15/drivers/net/ethernet/intel/ice/ |
D | ice_dcb_lib.h | 24 ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked); 28 int ice_init_pf_dcb(struct ice_pf *pf, bool locked); 83 ice_init_pf_dcb(struct ice_pf *pf, bool __always_unused locked) in ice_init_pf_dcb() argument 92 bool __always_unused locked) in ice_pf_dcb_cfg() argument
|
/Linux-v5.15/drivers/vme/ |
D | vme_bridge.h | 19 int locked; member 32 int locked; member 65 int locked; member 76 int locked; member
|
/Linux-v5.15/drivers/gpu/drm/ttm/ |
D | ttm_bo.c | 602 bool *locked, bool *busy) in ttm_bo_evict_swapout_allowable() argument 610 *locked = false; in ttm_bo_evict_swapout_allowable() 615 *locked = ret; in ttm_bo_evict_swapout_allowable() 622 if (*locked) { in ttm_bo_evict_swapout_allowable() 624 *locked = false; in ttm_bo_evict_swapout_allowable() 673 bool locked = false; in ttm_mem_evict_first() local 683 &locked, &busy)) { in ttm_mem_evict_first() 691 if (locked) in ttm_mem_evict_first() 717 ctx->no_wait_gpu, locked); in ttm_mem_evict_first() 725 if (locked) in ttm_mem_evict_first() [all …]
|
/Linux-v5.15/Documentation/ABI/testing/ |
D | sysfs-platform-intel-pmc | 14 * bit 31 - global reset is locked 18 in case the register is not locked. 19 The "global reset bit" should be locked on a production
|
/Linux-v5.15/drivers/tty/ |
D | tty_ldsem.c | 234 int locked = 0; in down_write_failed() local 265 locked = writer_trylock(sem); in down_write_failed() 266 if (locked) in down_write_failed() 270 if (!locked) in down_write_failed() 280 if (!locked && list_empty(&sem->write_wait)) in down_write_failed() 288 if (!locked) in down_write_failed()
|
/Linux-v5.15/drivers/infiniband/hw/qib/ |
D | qib_user_pages.c | 97 unsigned long locked, lock_limit; in qib_get_user_pages() local 102 locked = atomic64_add_return(num_pages, ¤t->mm->pinned_vm); in qib_get_user_pages() 104 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) { in qib_get_user_pages()
|
/Linux-v5.15/tools/testing/selftests/vm/ |
D | mlock2-tests.c | 148 bool locked; in is_vma_lock_on_fault() local 151 locked = is_vmflag_set(addr, LOCKED); in is_vma_lock_on_fault() 152 if (!locked) in is_vma_lock_on_fault() 168 bool locked; in lock_check() local 171 locked = is_vmflag_set(addr, LOCKED); in lock_check() 172 if (!locked) in lock_check()
|