Lines Matching full:nodes

15  * interleave     Allocate memory interleaved over a set of nodes,
22 * bind Only allocate memory on a specific set of nodes,
26 * the allocation to memory nodes instead
34 * preferred many Try a set of nodes first before normal fallback. This is
178 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
179 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
195 static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes) in mpol_new_nodemask() argument
197 if (nodes_empty(*nodes)) in mpol_new_nodemask()
199 pol->nodes = *nodes; in mpol_new_nodemask()
203 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) in mpol_new_preferred() argument
205 if (nodes_empty(*nodes)) in mpol_new_preferred()
208 nodes_clear(pol->nodes); in mpol_new_preferred()
209 node_set(first_node(*nodes), pol->nodes); in mpol_new_preferred()
215 * any, for the new policy. mpol_new() has already validated the nodes
222 const nodemask_t *nodes, struct nodemask_scratch *nsc) in mpol_set_nodemask() argument
238 VM_BUG_ON(!nodes); in mpol_set_nodemask()
241 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1); in mpol_set_nodemask()
243 nodes_and(nsc->mask2, *nodes, nsc->mask1); in mpol_set_nodemask()
246 pol->w.user_nodemask = *nodes; in mpol_set_nodemask()
256 * initialization. You must invoke mpol_set_nodemask() to set nodes.
259 nodemask_t *nodes) in mpol_new() argument
263 pr_debug("setting mode %d flags %d nodes[0] %lx\n", in mpol_new()
264 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE); in mpol_new()
267 if (nodes && !nodes_empty(*nodes)) in mpol_new()
271 VM_BUG_ON(!nodes); in mpol_new()
279 if (nodes_empty(*nodes)) { in mpol_new()
287 if (!nodes_empty(*nodes) || in mpol_new()
291 } else if (nodes_empty(*nodes)) in mpol_new()
311 static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes) in mpol_rebind_default() argument
315 static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes) in mpol_rebind_nodemask() argument
320 nodes_and(tmp, pol->w.user_nodemask, *nodes); in mpol_rebind_nodemask()
322 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); in mpol_rebind_nodemask()
324 nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed, in mpol_rebind_nodemask()
325 *nodes); in mpol_rebind_nodemask()
326 pol->w.cpuset_mems_allowed = *nodes; in mpol_rebind_nodemask()
330 tmp = *nodes; in mpol_rebind_nodemask()
332 pol->nodes = tmp; in mpol_rebind_nodemask()
336 const nodemask_t *nodes) in mpol_rebind_preferred() argument
338 pol->w.cpuset_mems_allowed = *nodes; in mpol_rebind_preferred()
342 * mpol_rebind_policy - Migrate a policy to a different set of nodes
710 * If pages found in a given range are on a set of nodes (determined by
711 * @nodes and @flags,) it's isolated and queued to the pagelist which is
724 nodemask_t *nodes, unsigned long flags, in queue_pages_range() argument
731 .nmask = nodes, in queue_pages_range()
844 nodemask_t *nodes) in do_set_mempolicy() argument
853 new = mpol_new(mode, flags, nodes); in do_set_mempolicy()
859 ret = mpol_set_nodemask(new, nodes, scratch); in do_set_mempolicy()
882 static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) in get_policy_nodemask() argument
884 nodes_clear(*nodes); in get_policy_nodemask()
893 *nodes = p->nodes; in get_policy_nodemask()
981 *policy = next_node_in(current->il_prev, pol->nodes); in do_get_mempolicy()
1117 * This lets us pick a pair of nodes to migrate between, such that in do_migrate_pages()
1146 * However if the number of source nodes is not equal to in do_migrate_pages()
1147 * the number of destination nodes we can not preserve in do_migrate_pages()
1167 /* dest not in remaining from nodes? */ in do_migrate_pages()
1293 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n", in do_mbind()
1379 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, in get_nodes() argument
1383 nodes_clear(*nodes); in get_nodes()
1390 * When the user specified more nodes than supported just check in get_nodes()
1411 return get_bitmap(nodes_addr(*nodes), nmask, maxnode); in get_nodes()
1416 nodemask_t *nodes) in copy_nodes_to_user() argument
1436 nodes_addr(*nodes), maxnode); in copy_nodes_to_user()
1438 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; in copy_nodes_to_user()
1464 nodemask_t nodes; in kernel_mbind() local
1473 err = get_nodes(&nodes, nmask, maxnode); in kernel_mbind()
1477 return do_mbind(start, len, lmode, mode_flags, &nodes, flags); in kernel_mbind()
1492 nodemask_t nodes; in kernel_set_mempolicy() local
1500 err = get_nodes(&nodes, nmask, maxnode); in kernel_set_mempolicy()
1504 return do_set_mempolicy(lmode, mode_flags, &nodes); in kernel_set_mempolicy()
1563 /* Is the user allowed to access the target nodes? */ in kernel_migrate_pages()
1618 nodemask_t nodes; in kernel_get_mempolicy() local
1625 err = do_get_mempolicy(&pval, &nodes, addr, flags); in kernel_get_mempolicy()
1634 err = copy_nodes_to_user(nmask, maxnode, &nodes); in kernel_get_mempolicy()
1751 * if policy->nodes has movable memory only, in apply_policy_zone()
1754 * policy->nodes is intersect with node_states[N_MEMORY]. in apply_policy_zone()
1756 * policy->nodes has movable memory only. in apply_policy_zone()
1758 if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY])) in apply_policy_zone()
1765 * Return a nodemask representing a mempolicy for filtering nodes for
1775 cpuset_nodemask_valid_mems_allowed(&policy->nodes)) in policy_nodemask()
1776 return &policy->nodes; in policy_nodemask()
1779 return &policy->nodes; in policy_nodemask()
1794 nd = first_node(policy->nodes); in policy_node()
1813 next = next_node_in(me->il_prev, policy->nodes); in interleave_nodes()
1837 return first_node(policy->nodes); in mempolicy_slab_node()
1855 &policy->nodes); in mempolicy_slab_node()
1868 * node in pol->nodes (starting from n=0), wrapping around if n exceeds the
1869 * number of present nodes.
1873 nodemask_t nodemask = pol->nodes; in offset_il_node()
1881 * Between first_node() and next_node(), pol->nodes could be changed in offset_il_node()
1882 * by other threads. So we put pol->nodes in a local stack. in offset_il_node()
1950 *nodemask = &(*mpol)->nodes; in huge_node()
1985 *mask = mempolicy->nodes; in init_nodemask_of_mempolicy()
2007 * memory allocated from all nodes in system.
2023 ret = nodes_intersects(mempolicy->nodes, *mask); in mempolicy_in_oom_domain()
2056 * preferred nodes but skip the direct reclaim and allow the in alloc_pages_preferred_many()
2058 * nodes in system. in alloc_pages_preferred_many()
2062 page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes); in alloc_pages_preferred_many()
2117 * node and don't fall back to other nodes, as the cost of in alloc_pages_vma()
2124 hpage_node = first_node(pol->nodes); in alloc_pages_vma()
2262 return !!nodes_equal(a->nodes, b->nodes); in __mpol_equal()
2402 if (node_isset(curnid, pol->nodes)) in mpol_misplaced()
2404 polnid = first_node(pol->nodes); in mpol_misplaced()
2412 /* Optimize placement among multiple nodes via NUMA balancing */ in mpol_misplaced()
2414 if (node_isset(thisnid, pol->nodes)) in mpol_misplaced()
2424 * If no allowed nodes, use current [!misplaced]. in mpol_misplaced()
2426 if (node_isset(curnid, pol->nodes)) in mpol_misplaced()
2431 &pol->nodes); in mpol_misplaced()
2634 npol ? nodes_addr(npol->nodes)[0] : NUMA_NO_NODE); in mpol_set_shared_policy()
2732 .nodes = nodemask_of_node(nid), in numa_policy_init()
2738 * enabled across suitably sized nodes (default is >= 16MB), or in numa_policy_init()
2802 nodemask_t nodes; in mpol_parse_str() local
2813 if (nodelist_parse(nodelist, nodes)) in mpol_parse_str()
2815 if (!nodes_subset(nodes, node_states[N_MEMORY])) in mpol_parse_str()
2818 nodes_clear(nodes); in mpol_parse_str()
2828 * we use first_node(nodes) to grab a single node, so here in mpol_parse_str()
2829 * nodelist (or nodes) cannot be empty. in mpol_parse_str()
2837 if (nodes_empty(nodes)) in mpol_parse_str()
2843 * Default to online nodes with memory if no nodelist in mpol_parse_str()
2846 nodes = node_states[N_MEMORY]; in mpol_parse_str()
2885 new = mpol_new(mode, mode_flags, &nodes); in mpol_parse_str()
2890 * Save nodes for mpol_to_str() to show the tmpfs mount options in mpol_parse_str()
2894 new->nodes = nodes; in mpol_parse_str()
2896 nodes_clear(new->nodes); in mpol_parse_str()
2897 node_set(first_node(nodes), new->nodes); in mpol_parse_str()
2903 * Save nodes for contextualization: this will be used to "clone" in mpol_parse_str()
2906 new->w.user_nodemask = nodes; in mpol_parse_str()
2935 nodemask_t nodes = NODE_MASK_NONE; in mpol_to_str() local
2952 nodes = pol->nodes; in mpol_to_str()
2974 if (!nodes_empty(nodes)) in mpol_to_str()
2976 nodemask_pr_args(&nodes)); in mpol_to_str()