Lines Matching refs:pol

163 	struct mempolicy *pol = p->mempolicy;  in get_task_policy()  local
166 if (pol) in get_task_policy()
167 return pol; in get_task_policy()
171 pol = &preferred_node_policy[node]; in get_task_policy()
173 if (pol->mode) in get_task_policy()
174 return pol; in get_task_policy()
181 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
182 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
185 static inline int mpol_store_user_nodemask(const struct mempolicy *pol) in mpol_store_user_nodemask() argument
187 return pol->flags & MPOL_MODE_FLAGS; in mpol_store_user_nodemask()
198 static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes) in mpol_new_nodemask() argument
202 pol->nodes = *nodes; in mpol_new_nodemask()
206 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) in mpol_new_preferred() argument
211 nodes_clear(pol->nodes); in mpol_new_preferred()
212 node_set(first_node(*nodes), pol->nodes); in mpol_new_preferred()
224 static int mpol_set_nodemask(struct mempolicy *pol, in mpol_set_nodemask() argument
234 if (!pol || pol->mode == MPOL_LOCAL) in mpol_set_nodemask()
243 if (pol->flags & MPOL_F_RELATIVE_NODES) in mpol_set_nodemask()
248 if (mpol_store_user_nodemask(pol)) in mpol_set_nodemask()
249 pol->w.user_nodemask = *nodes; in mpol_set_nodemask()
251 pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed; in mpol_set_nodemask()
253 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); in mpol_set_nodemask()
315 static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes) in mpol_rebind_default() argument
319 static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes) in mpol_rebind_nodemask() argument
323 if (pol->flags & MPOL_F_STATIC_NODES) in mpol_rebind_nodemask()
324 nodes_and(tmp, pol->w.user_nodemask, *nodes); in mpol_rebind_nodemask()
325 else if (pol->flags & MPOL_F_RELATIVE_NODES) in mpol_rebind_nodemask()
326 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); in mpol_rebind_nodemask()
328 nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed, in mpol_rebind_nodemask()
330 pol->w.cpuset_mems_allowed = *nodes; in mpol_rebind_nodemask()
336 pol->nodes = tmp; in mpol_rebind_nodemask()
339 static void mpol_rebind_preferred(struct mempolicy *pol, in mpol_rebind_preferred() argument
342 pol->w.cpuset_mems_allowed = *nodes; in mpol_rebind_preferred()
352 static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask) in mpol_rebind_policy() argument
354 if (!pol || pol->mode == MPOL_LOCAL) in mpol_rebind_policy()
356 if (!mpol_store_user_nodemask(pol) && in mpol_rebind_policy()
357 nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) in mpol_rebind_policy()
360 mpol_ops[pol->mode].rebind(pol, newmask); in mpol_rebind_policy()
776 struct mempolicy *pol) in vma_replace_policy() argument
789 new = mpol_dup(pol); in vma_replace_policy()
940 struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL; in do_get_mempolicy() local
969 pol = vma->vm_ops->get_policy(vma, addr); in do_get_mempolicy()
971 pol = vma->vm_policy; in do_get_mempolicy()
975 if (!pol) in do_get_mempolicy()
976 pol = &default_policy; /* indicates default behavior */ in do_get_mempolicy()
985 pol_refcount = pol; in do_get_mempolicy()
987 mpol_get(pol); in do_get_mempolicy()
993 } else if (pol == current->mempolicy && in do_get_mempolicy()
994 pol->mode == MPOL_INTERLEAVE) { in do_get_mempolicy()
995 *policy = next_node_in(current->il_prev, pol->nodes); in do_get_mempolicy()
1001 *policy = pol == &default_policy ? MPOL_DEFAULT : in do_get_mempolicy()
1002 pol->mode; in do_get_mempolicy()
1007 *policy |= (pol->flags & MPOL_MODE_FLAGS); in do_get_mempolicy()
1012 if (mpol_store_user_nodemask(pol)) { in do_get_mempolicy()
1013 *nmask = pol->w.user_nodemask; in do_get_mempolicy()
1016 get_policy_nodemask(pol, nmask); in do_get_mempolicy()
1022 mpol_cond_put(pol); in do_get_mempolicy()
1768 struct mempolicy *pol = NULL; in __get_vma_policy() local
1772 pol = vma->vm_ops->get_policy(vma, addr); in __get_vma_policy()
1774 pol = vma->vm_policy; in __get_vma_policy()
1782 if (mpol_needs_cond_ref(pol)) in __get_vma_policy()
1783 mpol_get(pol); in __get_vma_policy()
1787 return pol; in __get_vma_policy()
1805 struct mempolicy *pol = __get_vma_policy(vma, addr); in get_vma_policy() local
1807 if (!pol) in get_vma_policy()
1808 pol = get_task_policy(current); in get_vma_policy()
1810 return pol; in get_vma_policy()
1815 struct mempolicy *pol; in vma_policy_mof() local
1820 pol = vma->vm_ops->get_policy(vma, vma->vm_start); in vma_policy_mof()
1821 if (pol && (pol->flags & MPOL_F_MOF)) in vma_policy_mof()
1823 mpol_cond_put(pol); in vma_policy_mof()
1828 pol = vma->vm_policy; in vma_policy_mof()
1829 if (!pol) in vma_policy_mof()
1830 pol = get_task_policy(current); in vma_policy_mof()
1832 return pol->flags & MPOL_F_MOF; in vma_policy_mof()
1967 static unsigned offset_il_node(struct mempolicy *pol, unsigned long n) in offset_il_node() argument
1969 nodemask_t nodemask = pol->nodes; in offset_il_node()
1993 static inline unsigned interleave_nid(struct mempolicy *pol, in interleave_nid() argument
2009 return offset_il_node(pol, off); in interleave_nid()
2011 return interleave_nodes(pol); in interleave_nid()
2145 int nid, struct mempolicy *pol) in alloc_pages_preferred_many() argument
2158 page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes); in alloc_pages_preferred_many()
2183 struct mempolicy *pol; in vma_alloc_folio() local
2189 pol = get_vma_policy(vma, addr); in vma_alloc_folio()
2191 if (pol->mode == MPOL_INTERLEAVE) { in vma_alloc_folio()
2195 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); in vma_alloc_folio()
2196 mpol_cond_put(pol); in vma_alloc_folio()
2205 if (pol->mode == MPOL_PREFERRED_MANY) { in vma_alloc_folio()
2208 node = policy_node(gfp, pol, node); in vma_alloc_folio()
2210 page = alloc_pages_preferred_many(gfp, order, node, pol); in vma_alloc_folio()
2211 mpol_cond_put(pol); in vma_alloc_folio()
2231 if (pol->mode == MPOL_PREFERRED) in vma_alloc_folio()
2232 hpage_node = first_node(pol->nodes); in vma_alloc_folio()
2234 nmask = policy_nodemask(gfp, pol); in vma_alloc_folio()
2236 mpol_cond_put(pol); in vma_alloc_folio()
2258 nmask = policy_nodemask(gfp, pol); in vma_alloc_folio()
2259 preferred_nid = policy_node(gfp, pol, node); in vma_alloc_folio()
2261 mpol_cond_put(pol); in vma_alloc_folio()
2283 struct mempolicy *pol = &default_policy; in alloc_pages() local
2287 pol = get_task_policy(current); in alloc_pages()
2293 if (pol->mode == MPOL_INTERLEAVE) in alloc_pages()
2294 page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); in alloc_pages()
2295 else if (pol->mode == MPOL_PREFERRED_MANY) in alloc_pages()
2297 policy_node(gfp, pol, numa_node_id()), pol); in alloc_pages()
2300 policy_node(gfp, pol, numa_node_id()), in alloc_pages()
2301 policy_nodemask(gfp, pol)); in alloc_pages()
2319 struct mempolicy *pol, unsigned long nr_pages, in alloc_pages_bulk_array_interleave() argument
2329 nodes = nodes_weight(pol->nodes); in alloc_pages_bulk_array_interleave()
2336 interleave_nodes(pol), NULL, in alloc_pages_bulk_array_interleave()
2342 interleave_nodes(pol), NULL, in alloc_pages_bulk_array_interleave()
2354 struct mempolicy *pol, unsigned long nr_pages, in alloc_pages_bulk_array_preferred_many() argument
2363 nr_allocated = __alloc_pages_bulk(preferred_gfp, nid, &pol->nodes, in alloc_pages_bulk_array_preferred_many()
2382 struct mempolicy *pol = &default_policy; in alloc_pages_bulk_array_mempolicy() local
2385 pol = get_task_policy(current); in alloc_pages_bulk_array_mempolicy()
2387 if (pol->mode == MPOL_INTERLEAVE) in alloc_pages_bulk_array_mempolicy()
2388 return alloc_pages_bulk_array_interleave(gfp, pol, in alloc_pages_bulk_array_mempolicy()
2391 if (pol->mode == MPOL_PREFERRED_MANY) in alloc_pages_bulk_array_mempolicy()
2393 numa_node_id(), pol, nr_pages, page_array); in alloc_pages_bulk_array_mempolicy()
2395 return __alloc_pages_bulk(gfp, policy_node(gfp, pol, numa_node_id()), in alloc_pages_bulk_array_mempolicy()
2396 policy_nodemask(gfp, pol), nr_pages, NULL, in alloc_pages_bulk_array_mempolicy()
2402 struct mempolicy *pol = mpol_dup(vma_policy(src)); in vma_dup_policy() local
2404 if (IS_ERR(pol)) in vma_dup_policy()
2405 return PTR_ERR(pol); in vma_dup_policy()
2406 dst->vm_policy = pol; in vma_dup_policy()
2547 struct mempolicy *pol = NULL; in mpol_shared_policy_lookup() local
2556 pol = sn->policy; in mpol_shared_policy_lookup()
2559 return pol; in mpol_shared_policy_lookup()
2584 struct mempolicy *pol; in mpol_misplaced() local
2593 pol = get_vma_policy(vma, addr); in mpol_misplaced()
2594 if (!(pol->flags & MPOL_F_MOF)) in mpol_misplaced()
2597 switch (pol->mode) { in mpol_misplaced()
2601 polnid = offset_il_node(pol, pgoff); in mpol_misplaced()
2605 if (node_isset(curnid, pol->nodes)) in mpol_misplaced()
2607 polnid = first_node(pol->nodes); in mpol_misplaced()
2616 if (pol->flags & MPOL_F_MORON) { in mpol_misplaced()
2617 if (node_isset(thisnid, pol->nodes)) in mpol_misplaced()
2629 if (node_isset(curnid, pol->nodes)) in mpol_misplaced()
2634 &pol->nodes); in mpol_misplaced()
2643 if (pol->flags & MPOL_F_MORON) { in mpol_misplaced()
2653 mpol_cond_put(pol); in mpol_misplaced()
2666 struct mempolicy *pol; in mpol_put_task_policy() local
2669 pol = task->mempolicy; in mpol_put_task_policy()
2672 mpol_put(pol); in mpol_put_task_policy()
2683 unsigned long end, struct mempolicy *pol) in sp_node_init() argument
2687 node->policy = pol; in sp_node_init()
2691 struct mempolicy *pol) in sp_alloc() argument
2700 newpol = mpol_dup(pol); in sp_alloc()
3136 void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) in mpol_to_str() argument
3143 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) { in mpol_to_str()
3144 mode = pol->mode; in mpol_to_str()
3145 flags = pol->flags; in mpol_to_str()
3156 nodes = pol->nodes; in mpol_to_str()