/Linux-v5.10/include/linux/ |
D | topology.h | 82 DECLARE_PER_CPU(int, numa_node); 88 return raw_cpu_read(numa_node); in numa_node_id() 95 return per_cpu(numa_node, cpu); in cpu_to_node() 102 this_cpu_write(numa_node, node); in set_numa_node() 109 per_cpu(numa_node, cpu) = node; in set_cpu_numa_node()
|
/Linux-v5.10/drivers/dax/ |
D | kmem.c | 49 int numa_node; in dev_dax_kmem_probe() local 57 numa_node = dev_dax->target_node; in dev_dax_kmem_probe() 58 if (numa_node < 0) { in dev_dax_kmem_probe() 60 numa_node); in dev_dax_kmem_probe() 111 rc = add_memory_driver_managed(numa_node, range.start, in dev_dax_kmem_probe()
|
/Linux-v5.10/kernel/bpf/ |
D | ringbuf.c | 61 static struct bpf_ringbuf *bpf_ringbuf_area_alloc(size_t data_sz, int numa_node) in bpf_ringbuf_area_alloc() argument 92 pages = vmalloc_node(array_size, numa_node); in bpf_ringbuf_area_alloc() 94 pages = kmalloc_node(array_size, flags, numa_node); in bpf_ringbuf_area_alloc() 99 page = alloc_pages_node(numa_node, flags, 0); in bpf_ringbuf_area_alloc() 131 static struct bpf_ringbuf *bpf_ringbuf_alloc(size_t data_sz, int numa_node) in bpf_ringbuf_alloc() argument 135 rb = bpf_ringbuf_area_alloc(data_sz, numa_node); in bpf_ringbuf_alloc() 183 rb_map->rb = bpf_ringbuf_alloc(attr->max_entries, rb_map->map.numa_node); in ringbuf_map_alloc()
|
D | local_storage.c | 170 map->numa_node); in cgroup_storage_update_elem() 288 int numa_node = bpf_map_attr_numa_node(attr); in cgroup_storage_map_alloc() local 316 __GFP_ZERO | GFP_USER, numa_node); in cgroup_storage_map_alloc() 515 __GFP_ZERO | GFP_USER, map->numa_node); in bpf_cgroup_storage_alloc() 522 storage->buf = kmalloc_node(size, flags, map->numa_node); in bpf_cgroup_storage_alloc()
|
D | devmap.c | 90 int numa_node) in dev_map_create_hash() argument 95 hash = bpf_map_area_alloc(entries * sizeof(*hash), numa_node); in dev_map_create_hash() 150 dtab->map.numa_node); in dev_map_init_map() 158 dtab->map.numa_node); in dev_map_init_map() 606 dtab->map.numa_node); in __dev_map_alloc_node()
|
D | queue_stack_maps.c | 69 int ret, numa_node = bpf_map_attr_numa_node(attr); in queue_stack_map_alloc() local 81 qs = bpf_map_area_alloc(queue_size, numa_node); in queue_stack_map_alloc()
|
/Linux-v5.10/drivers/virt/nitro_enclaves/ |
D | ne_misc_dev.c | 123 int numa_node; member 175 int numa_node = -1; in ne_setup_cpu_pool() local 217 if (numa_node < 0) { in ne_setup_cpu_pool() 218 numa_node = cpu_to_node(cpu); in ne_setup_cpu_pool() 219 if (numa_node < 0) { in ne_setup_cpu_pool() 221 ne_misc_dev.name, numa_node); in ne_setup_cpu_pool() 228 if (numa_node != cpu_to_node(cpu)) { in ne_setup_cpu_pool() 342 ne_cpu_pool.numa_node = numa_node; in ne_setup_cpu_pool() 362 ne_cpu_pool.numa_node = -1; in ne_setup_cpu_pool() 405 ne_cpu_pool.numa_node = -1; in ne_teardown_cpu_pool() [all …]
|
D | ne_misc_dev.h | 77 int numa_node; member
|
/Linux-v5.10/drivers/nvdimm/ |
D | of_pmem.c | 57 ndr_desc.numa_node = dev_to_node(&pdev->dev); in of_pmem_region_probe() 58 ndr_desc.target_node = ndr_desc.numa_node; in of_pmem_region_probe()
|
D | e820.c | 28 ndr_desc.numa_node = numa_map_to_online_node(nid); in e820_register_one()
|
/Linux-v5.10/tools/perf/util/ |
D | env.h | 27 struct numa_node { struct 77 struct numa_node *numa_nodes;
|
/Linux-v5.10/arch/sparc/kernel/ |
D | pci.c | 254 int numa_node) in pci_init_dev_archdata() argument 260 sd->numa_node = numa_node; in pci_init_dev_archdata() 279 pbm->numa_node); in of_create_pci_dev() 283 sd->numa_node = pbm->numa_node; in of_create_pci_dev() 914 return pbm->numa_node; in pcibus_to_node() 1028 psd->numa_node); in pcibios_add_device()
|
D | of_device_common.c | 69 op->dev.archdata.numa_node = bus_sd->numa_node; in of_propagate_archdata()
|
D | iommu.c | 95 int numa_node) in iommu_table_init() argument 111 iommu->tbl.map = kzalloc_node(sz, GFP_KERNEL, numa_node); in iommu_table_init() 122 page = alloc_pages_node(numa_node, GFP_KERNEL, 0); in iommu_table_init() 133 page = alloc_pages_node(numa_node, GFP_KERNEL, order); in iommu_table_init() 213 nid = dev->archdata.numa_node; in dma_4u_alloc_coherent()
|
/Linux-v5.10/net/xdp/ |
D | xskmap.c | 77 int err, numa_node; in xsk_map_alloc() local 89 numa_node = bpf_map_attr_numa_node(attr); in xsk_map_alloc() 96 m = bpf_map_area_alloc(size, numa_node); in xsk_map_alloc()
|
/Linux-v5.10/drivers/hv/ |
D | channel_mgmt.c | 678 int numa_node; in init_vp_index() local 698 numa_node = next_numa_node_id++; in init_vp_index() 699 if (numa_node == nr_node_ids) { in init_vp_index() 703 if (cpumask_empty(cpumask_of_node(numa_node))) in init_vp_index() 707 alloced_mask = &hv_context.hv_numa_map[numa_node]; in init_vp_index() 710 cpumask_weight(cpumask_of_node(numa_node))) { in init_vp_index() 718 cpumask_xor(available_mask, alloced_mask, cpumask_of_node(numa_node)); in init_vp_index()
|
/Linux-v5.10/arch/sparc/include/asm/ |
D | device.h | 18 int numa_node; member
|
D | iommu_64.h | 91 int numa_node);
|
/Linux-v5.10/samples/bpf/ |
D | bpf_load.c | 268 int i, numa_node; in load_maps() local 280 numa_node = maps[i].def.map_flags & BPF_F_NUMA_NODE ? in load_maps() 281 maps[i].def.numa_node : -1; in load_maps() 293 numa_node); in load_maps() 301 numa_node); in load_maps()
|
D | bpf_load.h | 17 unsigned int numa_node; member
|
/Linux-v5.10/drivers/nvme/target/ |
D | loop.c | 205 unsigned int numa_node) in nvme_loop_init_request() argument 343 ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node; in nvme_loop_configure_admin_queue() 514 ctrl->tag_set.numa_node = ctrl->ctrl.numa_node; in nvme_loop_create_io_queues()
|
/Linux-v5.10/drivers/net/ethernet/amazon/ena/ |
D | ena_eth_com.h | 190 u8 numa_node) in ena_com_update_numa_node() argument 197 numa_cfg.numa_cfg = (numa_node & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK) in ena_com_update_numa_node()
|
/Linux-v5.10/include/uapi/rdma/hfi/ |
D | hfi1_ioctl.h | 90 __u16 numa_node; /* NUMA node of the assigned device */ member
|
/Linux-v5.10/drivers/dax/hmem/ |
D | device.c | 43 pdev->dev.numa_node = numa_map_to_online_node(target_nid); in hmem_register_device()
|
/Linux-v5.10/tools/perf/include/bpf/ |
D | bpf.h | 18 unsigned int numa_node; member
|