Lines Matching +full:non +full:- +full:live

1 /* SPDX-License-Identifier: GPL-2.0 */
46 if (pgmap->type == MEMORY_DEVICE_PRIVATE || in devmap_managed_enable_put()
47 pgmap->type == MEMORY_DEVICE_FS_DAX) in devmap_managed_enable_put()
53 if (pgmap->type == MEMORY_DEVICE_PRIVATE || in devmap_managed_enable_get()
54 pgmap->type == MEMORY_DEVICE_FS_DAX) in devmap_managed_enable_get()
68 xa_store_range(&pgmap_array, PHYS_PFN(range->start), PHYS_PFN(range->end), in pgmap_array_delete()
75 struct range *range = &pgmap->ranges[range_id]; in pfn_first()
76 unsigned long pfn = PHYS_PFN(range->start); in pfn_first()
85 const struct range *range = &pgmap->ranges[range_id]; in pfn_end()
87 return (range->start + range_len(range)) >> PAGE_SHIFT; in pfn_end()
102 if (pgmap->ops && pgmap->ops->kill) in dev_pagemap_kill()
103 pgmap->ops->kill(pgmap); in dev_pagemap_kill()
105 percpu_ref_kill(pgmap->ref); in dev_pagemap_kill()
110 if (pgmap->ops && pgmap->ops->cleanup) { in dev_pagemap_cleanup()
111 pgmap->ops->cleanup(pgmap); in dev_pagemap_cleanup()
113 wait_for_completion(&pgmap->done); in dev_pagemap_cleanup()
114 percpu_ref_exit(pgmap->ref); in dev_pagemap_cleanup()
118 * caller may re-enable the same pgmap. in dev_pagemap_cleanup()
120 if (pgmap->ref == &pgmap->internal_ref) in dev_pagemap_cleanup()
121 pgmap->ref = NULL; in dev_pagemap_cleanup()
126 struct range *range = &pgmap->ranges[range_id]; in pageunmap_range()
137 remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(range->start), in pageunmap_range()
139 if (pgmap->type == MEMORY_DEVICE_PRIVATE) { in pageunmap_range()
140 __remove_pages(PHYS_PFN(range->start), in pageunmap_range()
143 arch_remove_memory(nid, range->start, range_len(range), in pageunmap_range()
145 kasan_remove_zero_shadow(__va(range->start), range_len(range)); in pageunmap_range()
149 untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range)); in pageunmap_range()
159 for (i = 0; i < pgmap->nr_range; i++) in memunmap_pages()
164 for (i = 0; i < pgmap->nr_range; i++) in memunmap_pages()
167 WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n"); in memunmap_pages()
182 complete(&pgmap->done); in dev_pagemap_percpu_release()
188 struct range *range = &pgmap->ranges[range_id]; in pagemap_range()
194 return -EINVAL; in pagemap_range()
196 conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start), NULL); in pagemap_range()
200 return -ENOMEM; in pagemap_range()
203 conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end), NULL); in pagemap_range()
207 return -ENOMEM; in pagemap_range()
210 is_ram = region_intersects(range->start, range_len(range), in pagemap_range()
214 WARN_ONCE(1, "attempted on %s region %#llx-%#llx\n", in pagemap_range()
216 range->start, range->end); in pagemap_range()
217 return -ENXIO; in pagemap_range()
220 error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(range->start), in pagemap_range()
221 PHYS_PFN(range->end), pgmap, GFP_KERNEL)); in pagemap_range()
228 error = track_pfn_remap(NULL, &params->pgprot, PHYS_PFN(range->start), 0, in pagemap_range()
237 * allocate and initialize struct page for the device memory. More- in pagemap_range()
238 * over the device memory is un-accessible thus we do not want to in pagemap_range()
246 if (pgmap->type == MEMORY_DEVICE_PRIVATE) { in pagemap_range()
247 error = add_pages(nid, PHYS_PFN(range->start), in pagemap_range()
250 error = kasan_add_zero_shadow(__va(range->start), range_len(range)); in pagemap_range()
256 error = arch_add_memory(nid, range->start, range_len(range), in pagemap_range()
263 zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE]; in pagemap_range()
264 move_pfn_range_to_zone(zone, PHYS_PFN(range->start), in pagemap_range()
265 PHYS_PFN(range_len(range)), params->altmap, in pagemap_range()
277 memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], in pagemap_range()
278 PHYS_PFN(range->start), in pagemap_range()
280 percpu_ref_get_many(pgmap->ref, pfn_end(pgmap, range_id) in pagemap_range()
281 - pfn_first(pgmap, range_id)); in pagemap_range()
285 kasan_remove_zero_shadow(__va(range->start), range_len(range)); in pagemap_range()
287 untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range)); in pagemap_range()
305 const int nr_range = pgmap->nr_range; in memremap_pages()
309 return ERR_PTR(-EINVAL); in memremap_pages()
311 switch (pgmap->type) { in memremap_pages()
315 return ERR_PTR(-EINVAL); in memremap_pages()
317 if (!pgmap->ops || !pgmap->ops->migrate_to_ram) { in memremap_pages()
319 return ERR_PTR(-EINVAL); in memremap_pages()
321 if (!pgmap->ops->page_free) { in memremap_pages()
323 return ERR_PTR(-EINVAL); in memremap_pages()
325 if (!pgmap->owner) { in memremap_pages()
327 return ERR_PTR(-EINVAL); in memremap_pages()
334 return ERR_PTR(-EINVAL); in memremap_pages()
343 WARN(1, "Invalid pgmap type %d\n", pgmap->type); in memremap_pages()
347 if (!pgmap->ref) { in memremap_pages()
348 if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup)) in memremap_pages()
349 return ERR_PTR(-EINVAL); in memremap_pages()
351 init_completion(&pgmap->done); in memremap_pages()
352 error = percpu_ref_init(&pgmap->internal_ref, in memremap_pages()
356 pgmap->ref = &pgmap->internal_ref; in memremap_pages()
358 if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) { in memremap_pages()
360 return ERR_PTR(-EINVAL); in memremap_pages()
371 pgmap->nr_range = 0; in memremap_pages()
377 pgmap->nr_range++; in memremap_pages()
382 pgmap->nr_range = nr_range; in memremap_pages()
386 return __va(pgmap->ranges[0].start); in memremap_pages()
391 * devm_memremap_pages - remap and provide memmap backing for the given resource
400 * PGMAP_ALTMAP_VALID must be set in pgmap->flags.
402 * 3/ The ref field may optionally be provided, in which pgmap->ref must be
403 * 'live' on entry and will be killed and reaped at
437 return altmap->reserve + altmap->free; in vmem_altmap_offset()
443 altmap->alloc -= nr_pfns; in vmem_altmap_free()
447 * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
451 * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap
452 * is non-NULL but does not cover @pfn the reference to it will be released.
460 * In the cached case we're already holding a live reference. in get_dev_pagemap()
463 if (phys >= pgmap->range.start && phys <= pgmap->range.end) in get_dev_pagemap()
471 if (pgmap && !percpu_ref_tryget_live(pgmap->ref)) in get_dev_pagemap()
484 wake_up_var(&page->_refcount); in free_devmap_managed_page()
493 * When a device_private page is freed, the page->mapping field in free_devmap_managed_page()
495 * lower bits of page->mapping may still identify the page as an in free_devmap_managed_page()
504 * ...checks page->mapping, via PageAnon(page) call, in free_devmap_managed_page()
511 * to clear page->mapping. in free_devmap_managed_page()
513 page->mapping = NULL; in free_devmap_managed_page()
514 page->pgmap->ops->page_free(page); in free_devmap_managed_page()