/Linux-v4.19/fs/squashfs/ |
D | decompressor_multi_percpu.c | 32 struct squashfs_stream __percpu *percpu; in squashfs_decompressor_create() local 35 percpu = alloc_percpu(struct squashfs_stream); in squashfs_decompressor_create() 36 if (percpu == NULL) in squashfs_decompressor_create() 40 stream = per_cpu_ptr(percpu, cpu); in squashfs_decompressor_create() 49 return (__force void *) percpu; in squashfs_decompressor_create() 53 stream = per_cpu_ptr(percpu, cpu); in squashfs_decompressor_create() 57 free_percpu(percpu); in squashfs_decompressor_create() 63 struct squashfs_stream __percpu *percpu = in squashfs_decompressor_destroy() local 70 stream = per_cpu_ptr(percpu, cpu); in squashfs_decompressor_destroy() 73 free_percpu(percpu); in squashfs_decompressor_destroy() [all …]
|
/Linux-v4.19/arch/alpha/boot/ |
D | bootp.c | 72 struct percpu_struct * percpu; in pal_init() local 102 percpu = (struct percpu_struct *) in pal_init() 104 rev = percpu->pal_revision = percpu->palcode_avail[2]; in pal_init()
|
D | main.c | 66 struct percpu_struct * percpu; in pal_init() local 96 percpu = (struct percpu_struct *) in pal_init() 98 rev = percpu->pal_revision = percpu->palcode_avail[2]; in pal_init()
|
D | bootpz.c | 120 struct percpu_struct * percpu; in pal_init() local 150 percpu = (struct percpu_struct *) in pal_init() 152 rev = percpu->pal_revision = percpu->palcode_avail[2]; in pal_init()
|
/Linux-v4.19/include/asm-generic/ |
D | vmlinux.lds.h | 823 *(.data..percpu..decrypted) \ 860 *(.data..percpu..first) \ 862 *(.data..percpu..page_aligned) \ 864 *(.data..percpu..read_mostly) \ 866 *(.data..percpu) \ 867 *(.data..percpu..shared_aligned) \ 897 .data..percpu vaddr : AT(__per_cpu_load - LOAD_OFFSET) { \ 900 . = __per_cpu_load + SIZEOF(.data..percpu); 916 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
|
/Linux-v4.19/net/rds/ |
D | ib_recv.c | 106 cache->percpu = alloc_percpu_gfp(struct rds_ib_cache_head, gfp); in rds_ib_recv_alloc_cache() 107 if (!cache->percpu) in rds_ib_recv_alloc_cache() 111 head = per_cpu_ptr(cache->percpu, cpu); in rds_ib_recv_alloc_cache() 129 free_percpu(ic->i_cache_incs.percpu); in rds_ib_recv_alloc_caches() 142 head = per_cpu_ptr(cache->percpu, cpu); in rds_ib_cache_splice_all_lists() 165 free_percpu(ic->i_cache_incs.percpu); in rds_ib_recv_free_caches() 175 free_percpu(ic->i_cache_frags.percpu); in rds_ib_recv_free_caches() 476 chpfirst = __this_cpu_read(cache->percpu->first); in rds_ib_recv_cache_put() 482 __this_cpu_write(cache->percpu->first, new_item); in rds_ib_recv_cache_put() 483 __this_cpu_inc(cache->percpu->count); in rds_ib_recv_cache_put() [all …]
|
/Linux-v4.19/kernel/bpf/ |
D | bpf_lru_list.h | 64 bool percpu; member 76 int bpf_lru_init(struct bpf_lru *lru, bool percpu, u32 hash_offset,
|
D | bpf_lru_list.c | 499 if (lru->percpu) in bpf_lru_pop_free() 555 if (lru->percpu) in bpf_lru_push_free() 613 if (lru->percpu) in bpf_lru_populate() 648 int bpf_lru_init(struct bpf_lru *lru, bool percpu, u32 hash_offset, in bpf_lru_init() argument 653 if (percpu) { in bpf_lru_init() 683 lru->percpu = percpu; in bpf_lru_init() 693 if (lru->percpu) in bpf_lru_destroy()
|
D | hashtab.c | 236 bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH || in htab_map_alloc_check() local 270 if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru)) in htab_map_alloc_check() 300 bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH || in htab_map_alloc() local 338 if (percpu) in htab_map_alloc() 352 if (percpu) in htab_map_alloc() 387 if (!percpu && !lru) { in htab_map_alloc() 712 static u32 htab_size_value(const struct bpf_htab *htab, bool percpu) in htab_size_value() argument 716 if (percpu || fd_htab_map_needs_adjust(htab)) in htab_size_value() 723 bool percpu, bool onallcpus, in alloc_htab_elem() argument 726 u32 size = htab_size_value(htab, percpu); in alloc_htab_elem() [all …]
|
D | arraymap.c | 59 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; in array_map_alloc_check() local 66 (percpu && numa_node != NUMA_NO_NODE)) in array_map_alloc_check() 80 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; in array_map_alloc() local 111 if (percpu) in array_map_alloc() 120 if (percpu) { in array_map_alloc() 143 if (percpu && bpf_array_alloc_percpu(array)) { in array_map_alloc()
|
/Linux-v4.19/drivers/clocksource/ |
D | qcom-timer.c | 162 bool percpu) in msm_timer_init() argument 168 msm_timer_has_ppi = percpu; in msm_timer_init() 176 if (percpu) in msm_timer_init()
|
D | timer-of.c | 39 of_irq->percpu ? free_percpu_irq(of_irq->irq, clkevt) : in timer_of_irq_exit() 81 ret = of_irq->percpu ? in timer_of_irq_init()
|
D | timer-of.h | 14 int percpu; member
|
/Linux-v4.19/arch/sparc/kernel/ |
D | sun4m_irq.c | 108 bool percpu; member 201 if (handler_data->percpu) { in sun4m_mask_irq() 220 if (handler_data->percpu) { in sun4m_unmask_irq() 279 handler_data->percpu = real_irq < OBP_INT_LEVEL_ONBOARD; in sun4m_build_device_irq()
|
/Linux-v4.19/arch/x86/kernel/ |
D | vmlinux.lds.S | 103 percpu PT_LOAD FLAGS(6); /* RW_ */ 237 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu) 238 ASSERT(SIZEOF(.data..percpu) < CONFIG_PHYSICAL_START,
|
/Linux-v4.19/drivers/md/ |
D | raid5.c | 1398 struct raid5_percpu *percpu, int i) in to_addr_conv() argument 1402 addr = flex_array_get(percpu->scribble, i); in to_addr_conv() 1407 static struct page **to_addr_page(struct raid5_percpu *percpu, int i) in to_addr_page() argument 1411 addr = flex_array_get(percpu->scribble, i); in to_addr_page() 1416 ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_compute5() argument 1419 struct page **xor_srcs = to_addr_page(percpu, 0); in ops_run_compute5() 1441 ops_complete_compute, sh, to_addr_conv(sh, percpu, 0)); in ops_run_compute5() 1498 ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_compute6_1() argument 1501 struct page **blocks = to_addr_page(percpu, 0); in ops_run_compute6_1() 1535 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_1() [all …]
|
/Linux-v4.19/Documentation/RCU/ |
D | rcuref.txt | 4 Please note that the percpu-ref feature is likely your first 6 include/linux/percpu-refcount.h for more information. However, in 7 those unusual cases where percpu-ref would consume too much memory,
|
/Linux-v4.19/arch/ia64/kernel/ |
D | vmlinux.lds.S | 17 percpu PT_LOAD; 178 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
|
/Linux-v4.19/kernel/sched/ |
D | cpuacct.c | 207 u64 percpu; in __cpuacct_percpu_seq_show() local 211 percpu = cpuacct_cpuusage_read(ca, i, index); in __cpuacct_percpu_seq_show() 212 seq_printf(m, "%llu ", (unsigned long long) percpu); in __cpuacct_percpu_seq_show()
|
/Linux-v4.19/arch/powerpc/include/asm/nohash/ |
D | pgtable.h | 145 pte_t *ptep, pte_t pte, int percpu) in __set_pte_at() argument 152 if (IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_PTE_64BIT) && !percpu) { in __set_pte_at()
|
/Linux-v4.19/arch/powerpc/include/asm/book3s/32/ |
D | pgtable.h | 405 pte_t *ptep, pte_t pte, int percpu) in __set_pte_at() argument 414 if (percpu) in __set_pte_at() 429 if (percpu) { in __set_pte_at()
|
/Linux-v4.19/include/linux/ |
D | stm.h | 118 unsigned int percpu; member
|
/Linux-v4.19/mm/ |
D | Makefile | 39 mm_init.o mmu_context.o percpu.o slab_common.o \ 105 obj-$(CONFIG_PERCPU_STATS) += percpu-stats.o
|
/Linux-v4.19/arch/sh/include/asm/ |
D | Kbuild | 15 generic-y += percpu.h
|
/Linux-v4.19/arch/mips/include/asm/ |
D | Kbuild | 13 generic-y += percpu.h
|