| /Linux-v5.4/include/linux/ |
| D | rcu_node_tree.h | 52 #if NR_CPUS <= RCU_FANOUT_1 59 #elif NR_CPUS <= RCU_FANOUT_2 62 # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) 67 #elif NR_CPUS <= RCU_FANOUT_3 70 # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2) 71 # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) 76 #elif NR_CPUS <= RCU_FANOUT_4 79 # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3) 80 # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2) 81 # define NUM_RCU_LVL_3 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
|
| D | cpumask.h | 17 typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; 36 #if NR_CPUS == 1 47 #define nr_cpumask_bits ((unsigned int)NR_CPUS) 101 #if NR_CPUS > 1 148 #if NR_CPUS == 1 312 [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \ 800 extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS); 817 bitmap_zero(cpumask_bits(&__cpu_possible_mask), NR_CPUS); in reset_cpu_possible_mask() 877 cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)]; 888 #if NR_CPUS <= BITS_PER_LONG [all …]
|
| D | blockgroup_lock.h | 14 #define NR_BG_LOCKS (4 << ilog2(NR_CPUS < 32 ? NR_CPUS : 32))
|
| /Linux-v5.4/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/ |
| D | preempt.c | 26 struct lock_impl cpu_preemption_locks[NR_CPUS] = { 28 #if (NR_CPUS - 1) & 1 31 #if (NR_CPUS - 1) & 2 34 #if (NR_CPUS - 1) & 4 37 #if (NR_CPUS - 1) & 8 40 #if (NR_CPUS - 1) & 16 43 #if (NR_CPUS - 1) & 32 67 assume(thread_cpu_id < NR_CPUS); in preempt_disable()
|
| /Linux-v5.4/arch/powerpc/include/asm/ |
| D | irq.h | 48 extern void *critirq_ctx[NR_CPUS]; 49 extern void *dbgirq_ctx[NR_CPUS]; 50 extern void *mcheckirq_ctx[NR_CPUS]; 56 extern void *hardirq_ctx[NR_CPUS]; 57 extern void *softirq_ctx[NR_CPUS];
|
| /Linux-v5.4/arch/mips/paravirt/ |
| D | paravirt-smp.c | 24 unsigned long paravirt_smp_sp[NR_CPUS]; 25 unsigned long paravirt_smp_gp[NR_CPUS]; 34 if (newval < 1 || newval >= NR_CPUS) in set_numcpus() 50 if (WARN_ON(cpunum >= NR_CPUS)) in paravirt_smp_setup() 54 for (id = 0; id < NR_CPUS; id++) { in paravirt_smp_setup()
|
| /Linux-v5.4/arch/ia64/kernel/ |
| D | err_inject.c | 43 static u64 call_start[NR_CPUS]; 44 static u64 phys_addr[NR_CPUS]; 45 static u64 err_type_info[NR_CPUS]; 46 static u64 err_struct_info[NR_CPUS]; 51 } __attribute__((__aligned__(16))) err_data_buffer[NR_CPUS]; 52 static s64 status[NR_CPUS]; 53 static u64 capabilities[NR_CPUS]; 54 static u64 resources[NR_CPUS];
|
| D | numa.c | 15 u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned; 66 for (i = 0; i < NR_CPUS; ++i) in build_cpu_to_node_map()
|
| /Linux-v5.4/arch/sparc/include/asm/ |
| D | topology_64.h | 53 extern cpumask_t cpu_core_map[NR_CPUS]; 54 extern cpumask_t cpu_core_sib_map[NR_CPUS]; 55 extern cpumask_t cpu_core_sib_cache_map[NR_CPUS];
|
| /Linux-v5.4/arch/riscv/kernel/ |
| D | smp.c | 30 unsigned long __cpuid_to_hartid_map[NR_CPUS] = { 31 [0 ... NR_CPUS-1] = INVALID_HARTID 43 } ipi_data[NR_CPUS] __cacheline_aligned; 49 for (i = 0; i < NR_CPUS; i++) in riscv_hartid_to_cpuid()
|
| D | smpboot.c | 36 void *__cpu_up_stack_pointer[NR_CPUS]; 37 void *__cpu_up_task_pointer[NR_CPUS]; 77 if (cpuid >= NR_CPUS) { in setup_smp()
|
| /Linux-v5.4/arch/ia64/include/asm/ |
| D | smp.h | 56 int cpu_phys_id[NR_CPUS]; 61 extern cpumask_t cpu_core_map[NR_CPUS]; 81 for (i = 0; i < NR_CPUS; ++i) in cpu_logical_id()
|
| D | numa.h | 25 extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned; 51 extern struct node_cpuid_s node_cpuid[NR_CPUS];
|
| /Linux-v5.4/arch/ia64/include/asm/native/ |
| D | irq.h | 14 #if (NR_VECTORS + 32 * NR_CPUS) < 1024 15 #define IA64_NATIVE_NR_IRQS (NR_VECTORS + 32 * NR_CPUS)
|
| /Linux-v5.4/arch/arm/mach-shmobile/ |
| D | headsmp.S | 118 cmp r1, #NR_CPUS 141 .space NR_CPUS * 4 144 .space NR_CPUS * 4 147 .space NR_CPUS * 4
|
| /Linux-v5.4/arch/sparc/kernel/ |
| D | leon_smp.c | 56 extern volatile unsigned long cpu_callin_map[NR_CPUS]; 169 (unsigned int)nrcpu, (unsigned int)NR_CPUS, in leon_boot_cpus() 239 for (i = 0; i < NR_CPUS; i++) { in leon_smp_done() 370 unsigned long processors_in[NR_CPUS]; /* Set when ipi entered. */ 371 unsigned long processors_out[NR_CPUS]; /* Set when ipi exited. */ 382 register int high = NR_CPUS - 1; in leon_cross_call()
|
| D | smp_32.c | 44 volatile unsigned long cpu_callin_map[NR_CPUS] = {0,}; 192 if (cpuid >= NR_CPUS) in smp_prepare_cpus() 236 if (mid < NR_CPUS) { in smp_setup_cpu_possible_map() 248 if (cpuid >= NR_CPUS) { in smp_prepare_boot_cpu()
|
| /Linux-v5.4/arch/sh/kernel/ |
| D | irq.c | 65 static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; 66 static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; 68 static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; 69 static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
|
| /Linux-v5.4/arch/sh/include/asm/ |
| D | fixmap.h | 54 FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * NR_CPUS) - 1, 58 FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
|
| D | smp.h | 19 extern int __cpu_number_map[NR_CPUS]; 23 extern int __cpu_logical_map[NR_CPUS];
|
| /Linux-v5.4/arch/powerpc/lib/ |
| D | locks.c | 29 BUG_ON(holder_cpu >= NR_CPUS); in splpar_spin_yield() 55 BUG_ON(holder_cpu >= NR_CPUS); in splpar_rw_yield()
|
| /Linux-v5.4/tools/testing/selftests/rseq/ |
| D | run_param_test.sh | 4 NR_CPUS=`grep '^processor' /proc/cpuinfo | wc -l` 33 NR_THREADS=$((6*${NR_CPUS}))
|
| /Linux-v5.4/arch/x86/kernel/ |
| D | cpuid.c | 152 if (__register_chrdev(CPUID_MAJOR, 0, NR_CPUS, in cpuid_init() 176 __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid"); in cpuid_init() 185 __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid"); in cpuid_exit()
|
| /Linux-v5.4/arch/alpha/kernel/ |
| D | smp.c | 56 struct cpuinfo_alpha cpu_data[NR_CPUS]; 62 } ipi_data[NR_CPUS] __cacheline_aligned; 249 for (i = 0; i < NR_CPUS; i++) { in recv_secondary_console_msg() 492 for(cpu = 0; cpu < NR_CPUS; cpu++) in smp_cpus_done() 656 for (cpu = 0; cpu < NR_CPUS; cpu++) { in flush_tlb_mm() 703 for (cpu = 0; cpu < NR_CPUS; cpu++) { in flush_tlb_page() 757 for (cpu = 0; cpu < NR_CPUS; cpu++) { in flush_icache_user_range()
|
| /Linux-v5.4/arch/powerpc/kernel/ |
| D | irq.c | 674 void *critirq_ctx[NR_CPUS] __read_mostly; 675 void *dbgirq_ctx[NR_CPUS] __read_mostly; 676 void *mcheckirq_ctx[NR_CPUS] __read_mostly; 679 void *softirq_ctx[NR_CPUS] __read_mostly; 680 void *hardirq_ctx[NR_CPUS] __read_mostly;
|