Lines Matching refs:pgmap

44 static void devmap_managed_enable_put(struct dev_pagemap *pgmap)  in devmap_managed_enable_put()  argument
46 if (pgmap->type == MEMORY_DEVICE_PRIVATE || in devmap_managed_enable_put()
47 pgmap->type == MEMORY_DEVICE_FS_DAX) in devmap_managed_enable_put()
51 static void devmap_managed_enable_get(struct dev_pagemap *pgmap) in devmap_managed_enable_get() argument
53 if (pgmap->type == MEMORY_DEVICE_PRIVATE || in devmap_managed_enable_get()
54 pgmap->type == MEMORY_DEVICE_FS_DAX) in devmap_managed_enable_get()
58 static void devmap_managed_enable_get(struct dev_pagemap *pgmap) in devmap_managed_enable_get() argument
61 static void devmap_managed_enable_put(struct dev_pagemap *pgmap) in devmap_managed_enable_put() argument
73 static unsigned long pfn_first(struct dev_pagemap *pgmap, int range_id) in pfn_first() argument
75 struct range *range = &pgmap->ranges[range_id]; in pfn_first()
80 return pfn + vmem_altmap_offset(pgmap_altmap(pgmap)); in pfn_first()
83 static unsigned long pfn_end(struct dev_pagemap *pgmap, int range_id) in pfn_end() argument
85 const struct range *range = &pgmap->ranges[range_id]; in pfn_end()
100 static void dev_pagemap_kill(struct dev_pagemap *pgmap) in dev_pagemap_kill() argument
102 if (pgmap->ops && pgmap->ops->kill) in dev_pagemap_kill()
103 pgmap->ops->kill(pgmap); in dev_pagemap_kill()
105 percpu_ref_kill(pgmap->ref); in dev_pagemap_kill()
108 static void dev_pagemap_cleanup(struct dev_pagemap *pgmap) in dev_pagemap_cleanup() argument
110 if (pgmap->ops && pgmap->ops->cleanup) { in dev_pagemap_cleanup()
111 pgmap->ops->cleanup(pgmap); in dev_pagemap_cleanup()
113 wait_for_completion(&pgmap->done); in dev_pagemap_cleanup()
114 percpu_ref_exit(pgmap->ref); in dev_pagemap_cleanup()
120 if (pgmap->ref == &pgmap->internal_ref) in dev_pagemap_cleanup()
121 pgmap->ref = NULL; in dev_pagemap_cleanup()
124 static void pageunmap_range(struct dev_pagemap *pgmap, int range_id) in pageunmap_range() argument
126 struct range *range = &pgmap->ranges[range_id]; in pageunmap_range()
131 first_page = pfn_to_page(pfn_first(pgmap, range_id)); in pageunmap_range()
139 if (pgmap->type == MEMORY_DEVICE_PRIVATE) { in pageunmap_range()
144 pgmap_altmap(pgmap)); in pageunmap_range()
153 void memunmap_pages(struct dev_pagemap *pgmap) in memunmap_pages() argument
158 dev_pagemap_kill(pgmap); in memunmap_pages()
159 for (i = 0; i < pgmap->nr_range; i++) in memunmap_pages()
160 for_each_device_pfn(pfn, pgmap, i) in memunmap_pages()
162 dev_pagemap_cleanup(pgmap); in memunmap_pages()
164 for (i = 0; i < pgmap->nr_range; i++) in memunmap_pages()
165 pageunmap_range(pgmap, i); in memunmap_pages()
167 WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n"); in memunmap_pages()
168 devmap_managed_enable_put(pgmap); in memunmap_pages()
179 struct dev_pagemap *pgmap = in dev_pagemap_percpu_release() local
182 complete(&pgmap->done); in dev_pagemap_percpu_release()
185 static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params, in pagemap_range() argument
188 struct range *range = &pgmap->ranges[range_id]; in pagemap_range()
192 if (WARN_ONCE(pgmap_altmap(pgmap) && range_id > 0, in pagemap_range()
221 PHYS_PFN(range->end), pgmap, GFP_KERNEL)); in pagemap_range()
246 if (pgmap->type == MEMORY_DEVICE_PRIVATE) { in pagemap_range()
279 PHYS_PFN(range_len(range)), pgmap); in pagemap_range()
280 percpu_ref_get_many(pgmap->ref, pfn_end(pgmap, range_id) in pagemap_range()
281 - pfn_first(pgmap, range_id)); in pagemap_range()
299 void *memremap_pages(struct dev_pagemap *pgmap, int nid) in memremap_pages() argument
302 .altmap = pgmap_altmap(pgmap), in memremap_pages()
305 const int nr_range = pgmap->nr_range; in memremap_pages()
311 switch (pgmap->type) { in memremap_pages()
317 if (!pgmap->ops || !pgmap->ops->migrate_to_ram) { in memremap_pages()
321 if (!pgmap->ops->page_free) { in memremap_pages()
325 if (!pgmap->owner) { in memremap_pages()
343 WARN(1, "Invalid pgmap type %d\n", pgmap->type); in memremap_pages()
347 if (!pgmap->ref) { in memremap_pages()
348 if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup)) in memremap_pages()
351 init_completion(&pgmap->done); in memremap_pages()
352 error = percpu_ref_init(&pgmap->internal_ref, in memremap_pages()
356 pgmap->ref = &pgmap->internal_ref; in memremap_pages()
358 if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) { in memremap_pages()
364 devmap_managed_enable_get(pgmap); in memremap_pages()
371 pgmap->nr_range = 0; in memremap_pages()
374 error = pagemap_range(pgmap, &params, i, nid); in memremap_pages()
377 pgmap->nr_range++; in memremap_pages()
381 memunmap_pages(pgmap); in memremap_pages()
382 pgmap->nr_range = nr_range; in memremap_pages()
386 return __va(pgmap->ranges[0].start); in memremap_pages()
410 void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) in devm_memremap_pages() argument
415 ret = memremap_pages(pgmap, dev_to_node(dev)); in devm_memremap_pages()
420 pgmap); in devm_memremap_pages()
427 void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap) in devm_memunmap_pages() argument
429 devm_release_action(dev, devm_memremap_pages_release, pgmap); in devm_memunmap_pages()
455 struct dev_pagemap *pgmap) in get_dev_pagemap() argument
462 if (pgmap) { in get_dev_pagemap()
463 if (phys >= pgmap->range.start && phys <= pgmap->range.end) in get_dev_pagemap()
464 return pgmap; in get_dev_pagemap()
465 put_dev_pagemap(pgmap); in get_dev_pagemap()
470 pgmap = xa_load(&pgmap_array, PHYS_PFN(phys)); in get_dev_pagemap()
471 if (pgmap && !percpu_ref_tryget_live(pgmap->ref)) in get_dev_pagemap()
472 pgmap = NULL; in get_dev_pagemap()
475 return pgmap; in get_dev_pagemap()
514 page->pgmap->ops->page_free(page); in free_devmap_managed_page()