1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2015 Intel Corporation. All rights reserved. */
3 #include <linux/radix-tree.h>
4 #include <linux/device.h>
5 #include <linux/types.h>
6 #include <linux/pfn_t.h>
7 #include <linux/io.h>
8 #include <linux/kasan.h>
9 #include <linux/mm.h>
10 #include <linux/memory_hotplug.h>
11 #include <linux/swap.h>
12 #include <linux/swapops.h>
13 #include <linux/wait_bit.h>
14 
15 static DEFINE_MUTEX(pgmap_lock);
16 static RADIX_TREE(pgmap_radix, GFP_KERNEL);
17 #define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
18 #define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
19 
order_at(struct resource * res,unsigned long pgoff)20 static unsigned long order_at(struct resource *res, unsigned long pgoff)
21 {
22 	unsigned long phys_pgoff = PHYS_PFN(res->start) + pgoff;
23 	unsigned long nr_pages, mask;
24 
25 	nr_pages = PHYS_PFN(resource_size(res));
26 	if (nr_pages == pgoff)
27 		return ULONG_MAX;
28 
29 	/*
30 	 * What is the largest aligned power-of-2 range available from
31 	 * this resource pgoff to the end of the resource range,
32 	 * considering the alignment of the current pgoff?
33 	 */
34 	mask = phys_pgoff | rounddown_pow_of_two(nr_pages - pgoff);
35 	if (!mask)
36 		return ULONG_MAX;
37 
38 	return find_first_bit(&mask, BITS_PER_LONG);
39 }
40 
41 #define foreach_order_pgoff(res, order, pgoff) \
42 	for (pgoff = 0, order = order_at((res), pgoff); order < ULONG_MAX; \
43 			pgoff += 1UL << order, order = order_at((res), pgoff))
44 
45 #if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
device_private_entry_fault(struct vm_area_struct * vma,unsigned long addr,swp_entry_t entry,unsigned int flags,pmd_t * pmdp)46 vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
47 		       unsigned long addr,
48 		       swp_entry_t entry,
49 		       unsigned int flags,
50 		       pmd_t *pmdp)
51 {
52 	struct page *page = device_private_entry_to_page(entry);
53 
54 	/*
55 	 * The page_fault() callback must migrate page back to system memory
56 	 * so that CPU can access it. This might fail for various reasons
57 	 * (device issue, device was unsafely unplugged, ...). When such
58 	 * error conditions happen, the callback must return VM_FAULT_SIGBUS.
59 	 *
60 	 * Note that because memory cgroup charges are accounted to the device
61 	 * memory, this should never fail because of memory restrictions (but
62 	 * allocation of regular system page might still fail because we are
63 	 * out of memory).
64 	 *
65 	 * There is a more in-depth description of what that callback can and
66 	 * cannot do, in include/linux/memremap.h
67 	 */
68 	return page->pgmap->page_fault(vma, addr, page, flags, pmdp);
69 }
70 EXPORT_SYMBOL(device_private_entry_fault);
71 #endif /* CONFIG_DEVICE_PRIVATE */
72 
pgmap_radix_release(struct resource * res,unsigned long end_pgoff)73 static void pgmap_radix_release(struct resource *res, unsigned long end_pgoff)
74 {
75 	unsigned long pgoff, order;
76 
77 	mutex_lock(&pgmap_lock);
78 	foreach_order_pgoff(res, order, pgoff) {
79 		if (pgoff >= end_pgoff)
80 			break;
81 		radix_tree_delete(&pgmap_radix, PHYS_PFN(res->start) + pgoff);
82 	}
83 	mutex_unlock(&pgmap_lock);
84 
85 	synchronize_rcu();
86 }
87 
pfn_first(struct dev_pagemap * pgmap)88 static unsigned long pfn_first(struct dev_pagemap *pgmap)
89 {
90 	const struct resource *res = &pgmap->res;
91 	struct vmem_altmap *altmap = &pgmap->altmap;
92 	unsigned long pfn;
93 
94 	pfn = res->start >> PAGE_SHIFT;
95 	if (pgmap->altmap_valid)
96 		pfn += vmem_altmap_offset(altmap);
97 	return pfn;
98 }
99 
pfn_end(struct dev_pagemap * pgmap)100 static unsigned long pfn_end(struct dev_pagemap *pgmap)
101 {
102 	const struct resource *res = &pgmap->res;
103 
104 	return (res->start + resource_size(res)) >> PAGE_SHIFT;
105 }
106 
pfn_next(unsigned long pfn)107 static unsigned long pfn_next(unsigned long pfn)
108 {
109 	if (pfn % 1024 == 0)
110 		cond_resched();
111 	return pfn + 1;
112 }
113 
114 #define for_each_device_pfn(pfn, map) \
115 	for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn))
116 
devm_memremap_pages_release(void * data)117 static void devm_memremap_pages_release(void *data)
118 {
119 	struct dev_pagemap *pgmap = data;
120 	struct device *dev = pgmap->dev;
121 	struct resource *res = &pgmap->res;
122 	resource_size_t align_start, align_size;
123 	unsigned long pfn;
124 
125 	for_each_device_pfn(pfn, pgmap)
126 		put_page(pfn_to_page(pfn));
127 
128 	if (percpu_ref_tryget_live(pgmap->ref)) {
129 		dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
130 		percpu_ref_put(pgmap->ref);
131 	}
132 
133 	/* pages are dead and unused, undo the arch mapping */
134 	align_start = res->start & ~(SECTION_SIZE - 1);
135 	align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
136 		- align_start;
137 
138 	mem_hotplug_begin();
139 	arch_remove_memory(align_start, align_size, pgmap->altmap_valid ?
140 			&pgmap->altmap : NULL);
141 	kasan_remove_zero_shadow(__va(align_start), align_size);
142 	mem_hotplug_done();
143 
144 	untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
145 	pgmap_radix_release(res, -1);
146 	dev_WARN_ONCE(dev, pgmap->altmap.alloc,
147 		      "%s: failed to free all reserved pages\n", __func__);
148 }
149 
150 /**
151  * devm_memremap_pages - remap and provide memmap backing for the given resource
152  * @dev: hosting device for @res
153  * @pgmap: pointer to a struct dev_pgmap
154  *
155  * Notes:
156  * 1/ At a minimum the res, ref and type members of @pgmap must be initialized
157  *    by the caller before passing it to this function
158  *
159  * 2/ The altmap field may optionally be initialized, in which case altmap_valid
160  *    must be set to true
161  *
162  * 3/ pgmap.ref must be 'live' on entry and 'dead' before devm_memunmap_pages()
163  *    time (or devm release event). The expected order of events is that ref has
164  *    been through percpu_ref_kill() before devm_memremap_pages_release(). The
165  *    wait for the completion of all references being dropped and
166  *    percpu_ref_exit() must occur after devm_memremap_pages_release().
167  *
168  * 4/ res is expected to be a host memory range that could feasibly be
169  *    treated as a "System RAM" range, i.e. not a device mmio range, but
170  *    this is not enforced.
171  */
devm_memremap_pages(struct device * dev,struct dev_pagemap * pgmap)172 void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
173 {
174 	resource_size_t align_start, align_size, align_end;
175 	struct vmem_altmap *altmap = pgmap->altmap_valid ?
176 			&pgmap->altmap : NULL;
177 	struct resource *res = &pgmap->res;
178 	unsigned long pfn, pgoff, order;
179 	pgprot_t pgprot = PAGE_KERNEL;
180 	int error, nid, is_ram;
181 	struct dev_pagemap *conflict_pgmap;
182 
183 	align_start = res->start & ~(SECTION_SIZE - 1);
184 	align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
185 		- align_start;
186 	align_end = align_start + align_size - 1;
187 
188 	conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_start), NULL);
189 	if (conflict_pgmap) {
190 		dev_WARN(dev, "Conflicting mapping in same section\n");
191 		put_dev_pagemap(conflict_pgmap);
192 		return ERR_PTR(-ENOMEM);
193 	}
194 
195 	conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_end), NULL);
196 	if (conflict_pgmap) {
197 		dev_WARN(dev, "Conflicting mapping in same section\n");
198 		put_dev_pagemap(conflict_pgmap);
199 		return ERR_PTR(-ENOMEM);
200 	}
201 
202 	is_ram = region_intersects(align_start, align_size,
203 		IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
204 
205 	if (is_ram == REGION_MIXED) {
206 		WARN_ONCE(1, "%s attempted on mixed region %pr\n",
207 				__func__, res);
208 		return ERR_PTR(-ENXIO);
209 	}
210 
211 	if (is_ram == REGION_INTERSECTS)
212 		return __va(res->start);
213 
214 	if (!pgmap->ref)
215 		return ERR_PTR(-EINVAL);
216 
217 	pgmap->dev = dev;
218 
219 	mutex_lock(&pgmap_lock);
220 	error = 0;
221 
222 	foreach_order_pgoff(res, order, pgoff) {
223 		error = __radix_tree_insert(&pgmap_radix,
224 				PHYS_PFN(res->start) + pgoff, order, pgmap);
225 		if (error) {
226 			dev_err(dev, "%s: failed: %d\n", __func__, error);
227 			break;
228 		}
229 	}
230 	mutex_unlock(&pgmap_lock);
231 	if (error)
232 		goto err_radix;
233 
234 	nid = dev_to_node(dev);
235 	if (nid < 0)
236 		nid = numa_mem_id();
237 
238 	error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0,
239 			align_size);
240 	if (error)
241 		goto err_pfn_remap;
242 
243 	mem_hotplug_begin();
244 	error = kasan_add_zero_shadow(__va(align_start), align_size);
245 	if (error) {
246 		mem_hotplug_done();
247 		goto err_kasan;
248 	}
249 
250 	error = arch_add_memory(nid, align_start, align_size, altmap, false);
251 	if (!error)
252 		move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
253 					align_start >> PAGE_SHIFT,
254 					align_size >> PAGE_SHIFT, altmap);
255 	mem_hotplug_done();
256 	if (error)
257 		goto err_add_memory;
258 
259 	for_each_device_pfn(pfn, pgmap) {
260 		struct page *page = pfn_to_page(pfn);
261 
262 		/*
263 		 * ZONE_DEVICE pages union ->lru with a ->pgmap back
264 		 * pointer.  It is a bug if a ZONE_DEVICE page is ever
265 		 * freed or placed on a driver-private list.  Seed the
266 		 * storage with LIST_POISON* values.
267 		 */
268 		list_del(&page->lru);
269 		page->pgmap = pgmap;
270 		percpu_ref_get(pgmap->ref);
271 	}
272 
273 	devm_add_action(dev, devm_memremap_pages_release, pgmap);
274 
275 	return __va(res->start);
276 
277  err_add_memory:
278 	kasan_remove_zero_shadow(__va(align_start), align_size);
279  err_kasan:
280 	untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
281  err_pfn_remap:
282  err_radix:
283 	pgmap_radix_release(res, pgoff);
284 	return ERR_PTR(error);
285 }
286 EXPORT_SYMBOL(devm_memremap_pages);
287 
vmem_altmap_offset(struct vmem_altmap * altmap)288 unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
289 {
290 	/* number of pfns from base where pfn_to_page() is valid */
291 	return altmap->reserve + altmap->free;
292 }
293 
vmem_altmap_free(struct vmem_altmap * altmap,unsigned long nr_pfns)294 void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
295 {
296 	altmap->alloc -= nr_pfns;
297 }
298 
299 /**
300  * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
301  * @pfn: page frame number to lookup page_map
302  * @pgmap: optional known pgmap that already has a reference
303  *
304  * If @pgmap is non-NULL and covers @pfn it will be returned as-is.  If @pgmap
305  * is non-NULL but does not cover @pfn the reference to it will be released.
306  */
get_dev_pagemap(unsigned long pfn,struct dev_pagemap * pgmap)307 struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
308 		struct dev_pagemap *pgmap)
309 {
310 	resource_size_t phys = PFN_PHYS(pfn);
311 
312 	/*
313 	 * In the cached case we're already holding a live reference.
314 	 */
315 	if (pgmap) {
316 		if (phys >= pgmap->res.start && phys <= pgmap->res.end)
317 			return pgmap;
318 		put_dev_pagemap(pgmap);
319 	}
320 
321 	/* fall back to slow path lookup */
322 	rcu_read_lock();
323 	pgmap = radix_tree_lookup(&pgmap_radix, PHYS_PFN(phys));
324 	if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
325 		pgmap = NULL;
326 	rcu_read_unlock();
327 
328 	return pgmap;
329 }
330 EXPORT_SYMBOL_GPL(get_dev_pagemap);
331 
332 #ifdef CONFIG_DEV_PAGEMAP_OPS
333 DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
334 EXPORT_SYMBOL(devmap_managed_key);
335 static atomic_t devmap_enable;
336 
337 /*
338  * Toggle the static key for ->page_free() callbacks when dev_pagemap
339  * pages go idle.
340  */
dev_pagemap_get_ops(void)341 void dev_pagemap_get_ops(void)
342 {
343 	if (atomic_inc_return(&devmap_enable) == 1)
344 		static_branch_enable(&devmap_managed_key);
345 }
346 EXPORT_SYMBOL_GPL(dev_pagemap_get_ops);
347 
dev_pagemap_put_ops(void)348 void dev_pagemap_put_ops(void)
349 {
350 	if (atomic_dec_and_test(&devmap_enable))
351 		static_branch_disable(&devmap_managed_key);
352 }
353 EXPORT_SYMBOL_GPL(dev_pagemap_put_ops);
354 
__put_devmap_managed_page(struct page * page)355 void __put_devmap_managed_page(struct page *page)
356 {
357 	int count = page_ref_dec_return(page);
358 
359 	/*
360 	 * If refcount is 1 then page is freed and refcount is stable as nobody
361 	 * holds a reference on the page.
362 	 */
363 	if (count == 1) {
364 		/* Clear Active bit in case of parallel mark_page_accessed */
365 		__ClearPageActive(page);
366 		__ClearPageWaiters(page);
367 
368 		mem_cgroup_uncharge(page);
369 
370 		page->pgmap->page_free(page, page->pgmap->data);
371 	} else if (!count)
372 		__put_page(page);
373 }
374 EXPORT_SYMBOL(__put_devmap_managed_page);
375 #endif /* CONFIG_DEV_PAGEMAP_OPS */
376