Lines Matching +full:non +full:- +full:live
1 // SPDX-License-Identifier: GPL-2.0
46 if (pgmap->type == MEMORY_DEVICE_PRIVATE || in devmap_managed_enable_put()
47 pgmap->type == MEMORY_DEVICE_FS_DAX) in devmap_managed_enable_put()
53 if (pgmap->type == MEMORY_DEVICE_PRIVATE || in devmap_managed_enable_get()
54 pgmap->type == MEMORY_DEVICE_FS_DAX) in devmap_managed_enable_get()
68 xa_store_range(&pgmap_array, PHYS_PFN(range->start), PHYS_PFN(range->end), in pgmap_array_delete()
75 struct range *range = &pgmap->ranges[range_id]; in pfn_first()
76 unsigned long pfn = PHYS_PFN(range->start); in pfn_first()
87 for (i = 0; i < pgmap->nr_range; i++) { in pgmap_pfn_valid()
88 struct range *range = &pgmap->ranges[i]; in pgmap_pfn_valid()
90 if (pfn >= PHYS_PFN(range->start) && in pgmap_pfn_valid()
91 pfn <= PHYS_PFN(range->end)) in pgmap_pfn_valid()
100 const struct range *range = &pgmap->ranges[range_id]; in pfn_end()
102 return (range->start + range_len(range)) >> PAGE_SHIFT; in pfn_end()
117 if (pgmap->ops && pgmap->ops->kill) in dev_pagemap_kill()
118 pgmap->ops->kill(pgmap); in dev_pagemap_kill()
120 percpu_ref_kill(pgmap->ref); in dev_pagemap_kill()
125 if (pgmap->ops && pgmap->ops->cleanup) { in dev_pagemap_cleanup()
126 pgmap->ops->cleanup(pgmap); in dev_pagemap_cleanup()
128 wait_for_completion(&pgmap->done); in dev_pagemap_cleanup()
129 percpu_ref_exit(pgmap->ref); in dev_pagemap_cleanup()
133 * caller may re-enable the same pgmap. in dev_pagemap_cleanup()
135 if (pgmap->ref == &pgmap->internal_ref) in dev_pagemap_cleanup()
136 pgmap->ref = NULL; in dev_pagemap_cleanup()
141 struct range *range = &pgmap->ranges[range_id]; in pageunmap_range()
149 remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(range->start), in pageunmap_range()
151 if (pgmap->type == MEMORY_DEVICE_PRIVATE) { in pageunmap_range()
152 __remove_pages(PHYS_PFN(range->start), in pageunmap_range()
155 arch_remove_memory(range->start, range_len(range), in pageunmap_range()
157 kasan_remove_zero_shadow(__va(range->start), range_len(range)); in pageunmap_range()
161 untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range)); in pageunmap_range()
171 for (i = 0; i < pgmap->nr_range; i++) in memunmap_pages()
176 for (i = 0; i < pgmap->nr_range; i++) in memunmap_pages()
179 WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n"); in memunmap_pages()
194 complete(&pgmap->done); in dev_pagemap_percpu_release()
200 const bool is_private = pgmap->type == MEMORY_DEVICE_PRIVATE; in pagemap_range()
201 struct range *range = &pgmap->ranges[range_id]; in pagemap_range()
207 return -EINVAL; in pagemap_range()
209 conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start), NULL); in pagemap_range()
213 return -ENOMEM; in pagemap_range()
216 conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end), NULL); in pagemap_range()
220 return -ENOMEM; in pagemap_range()
223 is_ram = region_intersects(range->start, range_len(range), in pagemap_range()
227 WARN_ONCE(1, "attempted on %s region %#llx-%#llx\n", in pagemap_range()
229 range->start, range->end); in pagemap_range()
230 return -ENXIO; in pagemap_range()
233 error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(range->start), in pagemap_range()
234 PHYS_PFN(range->end), pgmap, GFP_KERNEL)); in pagemap_range()
241 error = track_pfn_remap(NULL, ¶ms->pgprot, PHYS_PFN(range->start), 0, in pagemap_range()
246 if (!mhp_range_allowed(range->start, range_len(range), !is_private)) { in pagemap_range()
247 error = -EINVAL; in pagemap_range()
255 * allocate and initialize struct page for the device memory. More- in pagemap_range()
256 * over the device memory is un-accessible thus we do not want to in pagemap_range()
265 error = add_pages(nid, PHYS_PFN(range->start), in pagemap_range()
268 error = kasan_add_zero_shadow(__va(range->start), range_len(range)); in pagemap_range()
274 error = arch_add_memory(nid, range->start, range_len(range), in pagemap_range()
281 zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE]; in pagemap_range()
282 move_pfn_range_to_zone(zone, PHYS_PFN(range->start), in pagemap_range()
283 PHYS_PFN(range_len(range)), params->altmap, in pagemap_range()
295 memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], in pagemap_range()
296 PHYS_PFN(range->start), in pagemap_range()
298 percpu_ref_get_many(pgmap->ref, pfn_end(pgmap, range_id) in pagemap_range()
299 - pfn_first(pgmap, range_id)); in pagemap_range()
303 kasan_remove_zero_shadow(__va(range->start), range_len(range)); in pagemap_range()
305 untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range)); in pagemap_range()
323 const int nr_range = pgmap->nr_range; in memremap_pages()
327 return ERR_PTR(-EINVAL); in memremap_pages()
329 switch (pgmap->type) { in memremap_pages()
333 return ERR_PTR(-EINVAL); in memremap_pages()
335 if (!pgmap->ops || !pgmap->ops->migrate_to_ram) { in memremap_pages()
337 return ERR_PTR(-EINVAL); in memremap_pages()
339 if (!pgmap->ops->page_free) { in memremap_pages()
341 return ERR_PTR(-EINVAL); in memremap_pages()
343 if (!pgmap->owner) { in memremap_pages()
345 return ERR_PTR(-EINVAL); in memremap_pages()
352 return ERR_PTR(-EINVAL); in memremap_pages()
361 WARN(1, "Invalid pgmap type %d\n", pgmap->type); in memremap_pages()
365 if (!pgmap->ref) { in memremap_pages()
366 if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup)) in memremap_pages()
367 return ERR_PTR(-EINVAL); in memremap_pages()
369 init_completion(&pgmap->done); in memremap_pages()
370 error = percpu_ref_init(&pgmap->internal_ref, in memremap_pages()
374 pgmap->ref = &pgmap->internal_ref; in memremap_pages()
376 if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) { in memremap_pages()
378 return ERR_PTR(-EINVAL); in memremap_pages()
389 pgmap->nr_range = 0; in memremap_pages()
395 pgmap->nr_range++; in memremap_pages()
400 pgmap->nr_range = nr_range; in memremap_pages()
404 return __va(pgmap->ranges[0].start); in memremap_pages()
409 * devm_memremap_pages - remap and provide memmap backing for the given resource
418 * PGMAP_ALTMAP_VALID must be set in pgmap->flags.
420 * 3/ The ref field may optionally be provided, in which pgmap->ref must be
421 * 'live' on entry and will be killed and reaped at
455 return altmap->reserve + altmap->free; in vmem_altmap_offset()
461 altmap->alloc -= nr_pfns; in vmem_altmap_free()
465 * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
469 * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap
470 * is non-NULL but does not cover @pfn the reference to it will be released.
478 * In the cached case we're already holding a live reference. in get_dev_pagemap()
481 if (phys >= pgmap->range.start && phys <= pgmap->range.end) in get_dev_pagemap()
489 if (pgmap && !percpu_ref_tryget_live(pgmap->ref)) in get_dev_pagemap()
502 wake_up_var(&page->_refcount); in free_devmap_managed_page()
511 * When a device_private page is freed, the page->mapping field in free_devmap_managed_page()
513 * lower bits of page->mapping may still identify the page as an in free_devmap_managed_page()
522 * ...checks page->mapping, via PageAnon(page) call, in free_devmap_managed_page()
529 * to clear page->mapping. in free_devmap_managed_page()
531 page->mapping = NULL; in free_devmap_managed_page()
532 page->pgmap->ops->page_free(page); in free_devmap_managed_page()