| /Zephyr-4.3.0/arch/riscv/core/ |
| D | fpu.c | 38 buf[3] = '0' + _current_cpu->id; in DBG() 73 _current_cpu->arch.fpu_state = (status & MSTATUS_FS); in z_riscv_fpu_disable() 85 atomic_ptr_set(&_current_cpu->arch.fpu_owner, _current); in z_riscv_fpu_load() 108 struct k_thread *owner = atomic_ptr_get(&_current_cpu->arch.fpu_owner); in arch_flush_local_fpu() 111 bool dirty = (_current_cpu->arch.fpu_state == MSTATUS_FS_DIRTY); in arch_flush_local_fpu() 127 atomic_ptr_clear(&_current_cpu->arch.fpu_owner); in arch_flush_local_fpu() 150 if (i == _current_cpu->id) { in flush_owned_fpu() 261 if (_current_cpu->arch.fpu_owner == _current) { in fpu_access_allowed() 279 _current_cpu->arch.fpu_state = MSTATUS_FS_CLEAN; in fpu_access_allowed() 300 esf->mstatus |= _current_cpu->arch.fpu_state; in z_riscv_fpu_exit_exc() [all …]
|
| D | ipi_clint.c | 25 unsigned int id = _current_cpu->id; in arch_sched_directed_ipi() 52 atomic_val_t pending_ipi = atomic_clear(&cpu_pending_ipi[_current_cpu->id]); in sched_ipi_handler() 81 atomic_val_t *pending_ipi = &cpu_pending_ipi[_current_cpu->id]; in arch_spin_relax() 88 arch_float_disable(_current_cpu->arch.fpu_owner); in arch_spin_relax()
|
| /Zephyr-4.3.0/kernel/ |
| D | spinlock_validate.c | 15 if ((thread_cpu & 3U) == _current_cpu->id) { in z_spin_lock_valid() 33 if (tcpu != (_current_cpu->id | (uintptr_t)_current)) { in z_spin_unlock_valid() 42 l->thread_cpu = _current_cpu->id | (uintptr_t)_current; in z_spin_lock_set_owner()
|
| D | usage.c | 81 _current_cpu->usage0 = usage_now(); /* Always update */ in z_sched_usage_start() 95 _current_cpu->usage0 = usage_now(); in z_sched_usage_start() 103 struct _cpu *cpu = _current_cpu; in z_sched_usage_stop() 130 if (cpu == _current_cpu) { in z_sched_cpu_usage() 179 cpu = _current_cpu; in z_sched_thread_usage() 257 struct _cpu *cpu = _current_cpu; in k_thread_runtime_stats_disable() 283 if (_current_cpu->usage->track_usage) { in k_sys_runtime_stats_enable() 317 if (!_current_cpu->usage->track_usage) { in k_sys_runtime_stats_disable() 385 if (thread != _current_cpu->current) { in z_thread_stats_reset() 403 uint32_t cycles = now - _current_cpu->usage0; in z_thread_stats_reset() [all …]
|
| D | ipi.c | 37 uint32_t id = _current_cpu->id; in ipi_mask_create() 102 unsigned int cpu_id = _current_cpu->id; in first_ipi_work() 129 unsigned int cpu_id = _current_cpu->id; in k_ipi_work_add() 165 unsigned int cpu_id = _current_cpu->id; in ipi_work_process() 204 ipi_work_process(&_kernel.cpus[_current_cpu->id].ipi_workq); in z_sched_ipi()
|
| D | timeslicing.c | 63 if (cpu != _current_cpu->id) { in slice_timeout() 70 int cpu = _current_cpu->id; in z_reset_time_slice() 117 if (slice_expired[_current_cpu->id] && thread_is_sliceable(curr)) { in z_time_slice()
|
| D | sched.c | 120 _current_cpu->swap_ok = true; in queue_thread() 166 _current_cpu->metairq_preempted = _current; in update_metairq_preempt() 169 _current_cpu->metairq_preempted = NULL; in update_metairq_preempt() 193 struct k_thread *mirqp = _current_cpu->metairq_preempted; in next_up() 199 _current_cpu->metairq_preempted = NULL; in next_up() 211 return (thread != NULL) ? thread : _current_cpu->idle_thread; in next_up() 228 thread = _current_cpu->idle_thread; in next_up() 235 if ((cmp > 0) || ((cmp == 0) && !_current_cpu->swap_ok)) { in next_up() 239 if (!should_preempt(thread, _current_cpu->swap_ok)) { in next_up() 255 && (_current != _current_cpu->metairq_preempted) in next_up() [all …]
|
| /Zephyr-4.3.0/samples/subsys/tracing/src/ |
| D | tracing_user.c | 17 __ASSERT_NO_MSG(nested_interrupts[_current_cpu->id] == 0); in sys_trace_thread_switched_in_user() 28 __ASSERT_NO_MSG(nested_interrupts[_current_cpu->id] == 0); in sys_trace_thread_switched_out_user() 38 _cpu_t *curr_cpu = _current_cpu; in sys_trace_isr_enter_user() 49 _cpu_t *curr_cpu = _current_cpu; in sys_trace_isr_exit_user()
|
| /Zephyr-4.3.0/include/zephyr/ |
| D | kernel_structs.h | 252 #define _current_cpu ({ __ASSERT_NO_MSG(!z_smp_cpu_mobile()); \ macro 259 #define _current_cpu (&_kernel.cpus[0]) macro 263 #define CPU_ID ((CONFIG_MP_MAX_NUM_CPUS == 1) ? 0 : _current_cpu->id) 266 #define z_current_thread_set(thread) ({ _current_cpu->current = (thread); }) 273 arch_current_thread_set(({ _current_cpu->current = (thread); }))
|
| /Zephyr-4.3.0/arch/arm64/core/ |
| D | fpu.c | 36 buf[3] = '0' + _current_cpu->id; in dbg_prefix() 144 struct k_thread *owner = atomic_ptr_get(&_current_cpu->arch.fpu_owner); in arch_flush_local_fpu() 168 atomic_ptr_clear(&_current_cpu->arch.fpu_owner); in arch_flush_local_fpu() 193 if (i == _current_cpu->id) { in flush_owned_fpu() 321 struct k_thread *owner = atomic_ptr_get(&_current_cpu->arch.fpu_owner); in z_arm64_fpu_trap() 342 atomic_ptr_clear(&_current_cpu->arch.fpu_owner); in z_arm64_fpu_trap() 373 atomic_ptr_set(&_current_cpu->arch.fpu_owner, _current); in z_arm64_fpu_trap() 432 if (atomic_ptr_get(&_current_cpu->arch.fpu_owner) == _current) { in fpu_access_update() 482 if (thread == atomic_ptr_get(&_current_cpu->arch.fpu_owner)) { in arch_float_disable()
|
| /Zephyr-4.3.0/arch/x86/core/intel64/ |
| D | irq_offload.c | 26 uint8_t cpu_id = _current_cpu->id; in dispatcher() 36 uint8_t cpu_id = _current_cpu->id; in arch_irq_offload()
|
| /Zephyr-4.3.0/arch/sparc/core/ |
| D | irq_manage.c | 31 _current_cpu->nested++; in z_sparc_enter_irq() 48 _current_cpu->nested--; in z_sparc_enter_irq()
|
| /Zephyr-4.3.0/arch/xtensa/core/ |
| D | irq_offload.c | 19 uint8_t cpu_id = _current_cpu->id; in irq_offload_isr() 29 uint8_t cpu_id = _current_cpu->id; in arch_irq_offload()
|
| /Zephyr-4.3.0/tests/benchmarks/ipi_metric/src/ |
| D | ipi_metric_primitive.c | 34 atomic_or(&ipi_cpu_bitmap, BIT(_current_cpu->id)); in z_trace_sched_ipi() 69 desired_ipi_set = (_current_cpu->id == 0) ? BIT(1) : BIT(0); in primitive_entry() 74 desired_ipi_set ^= BIT(_current_cpu->id); in primitive_entry()
|
| /Zephyr-4.3.0/arch/arm/core/cortex_a_r/ |
| D | fault.c | 214 if (_current_cpu->nested > 1) { in z_arm_fault_undef_instruction_fp() 222 (struct __fpu_sf *)_current_cpu->fp_ctx; in z_arm_fault_undef_instruction_fp() 228 _current_cpu->fp_ctx = NULL; in z_arm_fault_undef_instruction_fp() 238 if (((_current_cpu->nested == 2) in z_arm_fault_undef_instruction_fp() 240 || ((_current_cpu->nested > 2) in z_arm_fault_undef_instruction_fp()
|
| /Zephyr-4.3.0/arch/mips/core/ |
| D | irq_manage.c | 65 _current_cpu->nested++; in z_mips_enter_irq() 91 _current_cpu->nested--; in z_mips_enter_irq()
|
| /Zephyr-4.3.0/tests/subsys/pm/power_mgmt_multicore/src/ |
| D | main.c | 36 switch (state_testing[_current_cpu->id]) { in pm_state_set() 44 zassert_equal(_current_cpu->id, 1U); in pm_state_set()
|
| /Zephyr-4.3.0/tests/kernel/ipi_work/src/ |
| D | main.c | 60 timer_target_cpu = _current_cpu->id == 0 ? BIT(1) : BIT(0); in timer_func() 102 target_cpu_mask = _current_cpu->id == 0 ? BIT(1) : BIT(0); in ZTEST()
|
| /Zephyr-4.3.0/arch/mips/include/ |
| D | kernel_arch_func.h | 47 return _current_cpu->nested != 0U; in arch_is_in_isr()
|
| /Zephyr-4.3.0/arch/sparc/include/ |
| D | kernel_arch_func.h | 55 return _current_cpu->nested != 0U; in arch_is_in_isr()
|
| /Zephyr-4.3.0/arch/arm/include/cortex_a_r/ |
| D | exception.h | 52 return (_current_cpu->arch.exc_depth > 1U) ? (true) : (false); in arch_is_in_nested_exception()
|
| /Zephyr-4.3.0/subsys/profiling/perf/backends/ |
| D | perf_x86.c | 45 *((struct isf **)(((void **)_current_cpu->irq_stack)-1)); in arch_perf_current_stack_trace()
|
| D | perf_riscv.c | 44 *((struct arch_esf **)(((uintptr_t)_current_cpu->irq_stack) - 16)); in arch_perf_current_stack_trace()
|
| /Zephyr-4.3.0/tests/arch/riscv/pmp/isr-stack-guard/src/ |
| D | main.c | 32 char *isr_stack = (char *)z_interrupt_stacks[_current_cpu->id]; in check_isr_stack_guard()
|
| /Zephyr-4.3.0/subsys/cpu_freq/ |
| D | cpu_freq.c | 82 target_cpus ^= (1U << _current_cpu->id); in cpu_freq_timer_handler()
|