/Linux-v4.19/include/linux/ |
D | rcu_node_tree.h | 65 #if NR_CPUS <= RCU_FANOUT_1 72 #elif NR_CPUS <= RCU_FANOUT_2 75 # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) 80 #elif NR_CPUS <= RCU_FANOUT_3 83 # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2) 84 # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) 89 #elif NR_CPUS <= RCU_FANOUT_4 92 # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3) 93 # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2) 94 # define NUM_RCU_LVL_3 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
|
D | cpumask.h | 16 typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; 35 #if NR_CPUS == 1 46 #define nr_cpumask_bits ((unsigned int)NR_CPUS) 98 #if NR_CPUS > 1 132 #if NR_CPUS == 1 296 [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \ 771 extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS); 788 bitmap_zero(cpumask_bits(&__cpu_possible_mask), NR_CPUS); in reset_cpu_possible_mask() 855 cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)]; 866 #if NR_CPUS <= BITS_PER_LONG [all …]
|
D | blockgroup_lock.h | 14 #define NR_BG_LOCKS (4 << ilog2(NR_CPUS < 32 ? NR_CPUS : 32))
|
/Linux-v4.19/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/ |
D | preempt.c | 26 struct lock_impl cpu_preemption_locks[NR_CPUS] = { 28 #if (NR_CPUS - 1) & 1 31 #if (NR_CPUS - 1) & 2 34 #if (NR_CPUS - 1) & 4 37 #if (NR_CPUS - 1) & 8 40 #if (NR_CPUS - 1) & 16 43 #if (NR_CPUS - 1) & 32 67 assume(thread_cpu_id < NR_CPUS); in preempt_disable()
|
/Linux-v4.19/arch/powerpc/include/asm/ |
D | irq.h | 51 extern struct thread_info *critirq_ctx[NR_CPUS]; 52 extern struct thread_info *dbgirq_ctx[NR_CPUS]; 53 extern struct thread_info *mcheckirq_ctx[NR_CPUS]; 62 extern struct thread_info *hardirq_ctx[NR_CPUS]; 63 extern struct thread_info *softirq_ctx[NR_CPUS];
|
/Linux-v4.19/arch/mips/paravirt/ |
D | paravirt-smp.c | 24 unsigned long paravirt_smp_sp[NR_CPUS]; 25 unsigned long paravirt_smp_gp[NR_CPUS]; 34 if (newval < 1 || newval >= NR_CPUS) in set_numcpus() 50 if (WARN_ON(cpunum >= NR_CPUS)) in paravirt_smp_setup() 54 for (id = 0; id < NR_CPUS; id++) { in paravirt_smp_setup()
|
/Linux-v4.19/arch/ia64/kernel/ |
D | err_inject.c | 43 static u64 call_start[NR_CPUS]; 44 static u64 phys_addr[NR_CPUS]; 45 static u64 err_type_info[NR_CPUS]; 46 static u64 err_struct_info[NR_CPUS]; 51 } __attribute__((__aligned__(16))) err_data_buffer[NR_CPUS]; 52 static s64 status[NR_CPUS]; 53 static u64 capabilities[NR_CPUS]; 54 static u64 resources[NR_CPUS];
|
D | numa.c | 27 u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned; 78 for (i = 0; i < NR_CPUS; ++i) in build_cpu_to_node_map()
|
/Linux-v4.19/arch/sparc/include/asm/ |
D | topology_64.h | 53 extern cpumask_t cpu_core_map[NR_CPUS]; 54 extern cpumask_t cpu_core_sib_map[NR_CPUS]; 55 extern cpumask_t cpu_core_sib_cache_map[NR_CPUS];
|
D | irq_64.h | 94 extern void *hardirq_stack[NR_CPUS]; 95 extern void *softirq_stack[NR_CPUS];
|
/Linux-v4.19/arch/ia64/include/asm/ |
D | smp.h | 56 int cpu_phys_id[NR_CPUS]; 61 extern cpumask_t cpu_core_map[NR_CPUS]; 81 for (i = 0; i < NR_CPUS; ++i) in cpu_logical_id()
|
D | numa.h | 25 extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned; 51 extern struct node_cpuid_s node_cpuid[NR_CPUS];
|
/Linux-v4.19/arch/ia64/include/asm/native/ |
D | irq.h | 27 #if (NR_VECTORS + 32 * NR_CPUS) < 1024 28 #define IA64_NATIVE_NR_IRQS (NR_VECTORS + 32 * NR_CPUS)
|
/Linux-v4.19/arch/arm/mach-shmobile/ |
D | headsmp.S | 121 cmp r1, #NR_CPUS 144 .space NR_CPUS * 4 147 .space NR_CPUS * 4 150 .space NR_CPUS * 4
|
/Linux-v4.19/arch/sparc/kernel/ |
D | leon_smp.c | 56 extern volatile unsigned long cpu_callin_map[NR_CPUS]; 169 (unsigned int)nrcpu, (unsigned int)NR_CPUS, in leon_boot_cpus() 239 for (i = 0; i < NR_CPUS; i++) { in leon_smp_done() 370 unsigned long processors_in[NR_CPUS]; /* Set when ipi entered. */ 371 unsigned long processors_out[NR_CPUS]; /* Set when ipi exited. */ 382 register int high = NR_CPUS - 1; in leon_cross_call()
|
D | smp_32.c | 44 volatile unsigned long cpu_callin_map[NR_CPUS] = {0,}; 192 if (cpuid >= NR_CPUS) in smp_prepare_cpus() 236 if (mid < NR_CPUS) { in smp_setup_cpu_possible_map() 248 if (cpuid >= NR_CPUS) { in smp_prepare_boot_cpu()
|
/Linux-v4.19/arch/sh/kernel/ |
D | irq.c | 65 static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; 66 static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; 68 static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; 69 static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
|
/Linux-v4.19/arch/sh/include/asm/ |
D | fixmap.h | 57 FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * NR_CPUS) - 1, 61 FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
|
D | smp.h | 19 extern int __cpu_number_map[NR_CPUS]; 23 extern int __cpu_logical_map[NR_CPUS];
|
/Linux-v4.19/arch/powerpc/lib/ |
D | locks.c | 33 BUG_ON(holder_cpu >= NR_CPUS); in __spin_yield() 59 BUG_ON(holder_cpu >= NR_CPUS); in __rw_yield()
|
/Linux-v4.19/arch/x86/kernel/ |
D | cpuid.c | 157 if (__register_chrdev(CPUID_MAJOR, 0, NR_CPUS, in cpuid_init() 181 __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid"); in cpuid_init() 190 __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid"); in cpuid_exit()
|
/Linux-v4.19/arch/riscv/kernel/ |
D | smpboot.c | 39 void *__cpu_up_stack_pointer[NR_CPUS]; 40 void *__cpu_up_task_pointer[NR_CPUS];
|
/Linux-v4.19/arch/alpha/kernel/ |
D | smp.c | 56 struct cpuinfo_alpha cpu_data[NR_CPUS]; 62 } ipi_data[NR_CPUS] __cacheline_aligned; 249 for (i = 0; i < NR_CPUS; i++) { in recv_secondary_console_msg() 492 for(cpu = 0; cpu < NR_CPUS; cpu++) in smp_cpus_done() 659 for (cpu = 0; cpu < NR_CPUS; cpu++) { in flush_tlb_mm() 708 for (cpu = 0; cpu < NR_CPUS; cpu++) { in flush_tlb_page() 764 for (cpu = 0; cpu < NR_CPUS; cpu++) { in flush_icache_user_range()
|
/Linux-v4.19/arch/powerpc/platforms/86xx/ |
D | mpc86xx_smp.c | 43 if (nr < 0 || nr >= NR_CPUS) in smp_86xx_release_core() 67 if (nr < 0 || nr >= NR_CPUS) in smp_86xx_kick_cpu()
|
/Linux-v4.19/arch/ia64/mm/ |
D | numa.c | 30 struct node_cpuid_s node_cpuid[NR_CPUS] = 31 { [0 ... NR_CPUS-1] = { .phys_id = 0, .nid = NUMA_NO_NODE } };
|