1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/devres.c - device resource management
4  *
5  * Copyright (c) 2006  SUSE Linux Products GmbH
6  * Copyright (c) 2006  Tejun Heo <teheo@suse.de>
7  */
8 
9 #include <linux/device.h>
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/percpu.h>
13 
14 #include <asm/sections.h>
15 
16 #include "base.h"
17 
18 struct devres_node {
19 	struct list_head		entry;
20 	dr_release_t			release;
21 #ifdef CONFIG_DEBUG_DEVRES
22 	const char			*name;
23 	size_t				size;
24 #endif
25 };
26 
27 struct devres {
28 	struct devres_node		node;
29 	/*
30 	 * Some archs want to perform DMA into kmalloc caches
31 	 * and need a guaranteed alignment larger than
32 	 * the alignment of a 64-bit integer.
33 	 * Thus we use ARCH_KMALLOC_MINALIGN here and get exactly the same
34 	 * buffer alignment as if it was allocated by plain kmalloc().
35 	 */
36 	u8 __aligned(ARCH_KMALLOC_MINALIGN) data[];
37 };
38 
39 struct devres_group {
40 	struct devres_node		node[2];
41 	void				*id;
42 	int				color;
43 	/* -- 8 pointers */
44 };
45 
46 #ifdef CONFIG_DEBUG_DEVRES
47 static int log_devres = 0;
48 module_param_named(log, log_devres, int, S_IRUGO | S_IWUSR);
49 
set_node_dbginfo(struct devres_node * node,const char * name,size_t size)50 static void set_node_dbginfo(struct devres_node *node, const char *name,
51 			     size_t size)
52 {
53 	node->name = name;
54 	node->size = size;
55 }
56 
devres_log(struct device * dev,struct devres_node * node,const char * op)57 static void devres_log(struct device *dev, struct devres_node *node,
58 		       const char *op)
59 {
60 	if (unlikely(log_devres))
61 		dev_err(dev, "DEVRES %3s %p %s (%lu bytes)\n",
62 			op, node, node->name, (unsigned long)node->size);
63 }
64 #else /* CONFIG_DEBUG_DEVRES */
65 #define set_node_dbginfo(node, n, s)	do {} while (0)
66 #define devres_log(dev, node, op)	do {} while (0)
67 #endif /* CONFIG_DEBUG_DEVRES */
68 
69 /*
70  * Release functions for devres group.  These callbacks are used only
71  * for identification.
72  */
group_open_release(struct device * dev,void * res)73 static void group_open_release(struct device *dev, void *res)
74 {
75 	/* noop */
76 }
77 
group_close_release(struct device * dev,void * res)78 static void group_close_release(struct device *dev, void *res)
79 {
80 	/* noop */
81 }
82 
node_to_group(struct devres_node * node)83 static struct devres_group * node_to_group(struct devres_node *node)
84 {
85 	if (node->release == &group_open_release)
86 		return container_of(node, struct devres_group, node[0]);
87 	if (node->release == &group_close_release)
88 		return container_of(node, struct devres_group, node[1]);
89 	return NULL;
90 }
91 
alloc_dr(dr_release_t release,size_t size,gfp_t gfp,int nid)92 static __always_inline struct devres * alloc_dr(dr_release_t release,
93 						size_t size, gfp_t gfp, int nid)
94 {
95 	size_t tot_size;
96 	struct devres *dr;
97 
98 	/* We must catch any near-SIZE_MAX cases that could overflow. */
99 	if (unlikely(check_add_overflow(sizeof(struct devres), size,
100 					&tot_size)))
101 		return NULL;
102 
103 	dr = kmalloc_node_track_caller(tot_size, gfp, nid);
104 	if (unlikely(!dr))
105 		return NULL;
106 
107 	memset(dr, 0, offsetof(struct devres, data));
108 
109 	INIT_LIST_HEAD(&dr->node.entry);
110 	dr->node.release = release;
111 	return dr;
112 }
113 
add_dr(struct device * dev,struct devres_node * node)114 static void add_dr(struct device *dev, struct devres_node *node)
115 {
116 	devres_log(dev, node, "ADD");
117 	BUG_ON(!list_empty(&node->entry));
118 	list_add_tail(&node->entry, &dev->devres_head);
119 }
120 
121 #ifdef CONFIG_DEBUG_DEVRES
__devres_alloc_node(dr_release_t release,size_t size,gfp_t gfp,int nid,const char * name)122 void * __devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid,
123 		      const char *name)
124 {
125 	struct devres *dr;
126 
127 	dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid);
128 	if (unlikely(!dr))
129 		return NULL;
130 	set_node_dbginfo(&dr->node, name, size);
131 	return dr->data;
132 }
133 EXPORT_SYMBOL_GPL(__devres_alloc_node);
134 #else
135 /**
136  * devres_alloc - Allocate device resource data
137  * @release: Release function devres will be associated with
138  * @size: Allocation size
139  * @gfp: Allocation flags
140  * @nid: NUMA node
141  *
142  * Allocate devres of @size bytes.  The allocated area is zeroed, then
143  * associated with @release.  The returned pointer can be passed to
144  * other devres_*() functions.
145  *
146  * RETURNS:
147  * Pointer to allocated devres on success, NULL on failure.
148  */
devres_alloc_node(dr_release_t release,size_t size,gfp_t gfp,int nid)149 void * devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid)
150 {
151 	struct devres *dr;
152 
153 	dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid);
154 	if (unlikely(!dr))
155 		return NULL;
156 	return dr->data;
157 }
158 EXPORT_SYMBOL_GPL(devres_alloc_node);
159 #endif
160 
161 /**
162  * devres_for_each_res - Resource iterator
163  * @dev: Device to iterate resource from
164  * @release: Look for resources associated with this release function
165  * @match: Match function (optional)
166  * @match_data: Data for the match function
167  * @fn: Function to be called for each matched resource.
168  * @data: Data for @fn, the 3rd parameter of @fn
169  *
170  * Call @fn for each devres of @dev which is associated with @release
171  * and for which @match returns 1.
172  *
173  * RETURNS:
174  * 	void
175  */
devres_for_each_res(struct device * dev,dr_release_t release,dr_match_t match,void * match_data,void (* fn)(struct device *,void *,void *),void * data)176 void devres_for_each_res(struct device *dev, dr_release_t release,
177 			dr_match_t match, void *match_data,
178 			void (*fn)(struct device *, void *, void *),
179 			void *data)
180 {
181 	struct devres_node *node;
182 	struct devres_node *tmp;
183 	unsigned long flags;
184 
185 	if (!fn)
186 		return;
187 
188 	spin_lock_irqsave(&dev->devres_lock, flags);
189 	list_for_each_entry_safe_reverse(node, tmp,
190 			&dev->devres_head, entry) {
191 		struct devres *dr = container_of(node, struct devres, node);
192 
193 		if (node->release != release)
194 			continue;
195 		if (match && !match(dev, dr->data, match_data))
196 			continue;
197 		fn(dev, dr->data, data);
198 	}
199 	spin_unlock_irqrestore(&dev->devres_lock, flags);
200 }
201 EXPORT_SYMBOL_GPL(devres_for_each_res);
202 
203 /**
204  * devres_free - Free device resource data
205  * @res: Pointer to devres data to free
206  *
207  * Free devres created with devres_alloc().
208  */
devres_free(void * res)209 void devres_free(void *res)
210 {
211 	if (res) {
212 		struct devres *dr = container_of(res, struct devres, data);
213 
214 		BUG_ON(!list_empty(&dr->node.entry));
215 		kfree(dr);
216 	}
217 }
218 EXPORT_SYMBOL_GPL(devres_free);
219 
220 /**
221  * devres_add - Register device resource
222  * @dev: Device to add resource to
223  * @res: Resource to register
224  *
225  * Register devres @res to @dev.  @res should have been allocated
226  * using devres_alloc().  On driver detach, the associated release
227  * function will be invoked and devres will be freed automatically.
228  */
devres_add(struct device * dev,void * res)229 void devres_add(struct device *dev, void *res)
230 {
231 	struct devres *dr = container_of(res, struct devres, data);
232 	unsigned long flags;
233 
234 	spin_lock_irqsave(&dev->devres_lock, flags);
235 	add_dr(dev, &dr->node);
236 	spin_unlock_irqrestore(&dev->devres_lock, flags);
237 }
238 EXPORT_SYMBOL_GPL(devres_add);
239 
find_dr(struct device * dev,dr_release_t release,dr_match_t match,void * match_data)240 static struct devres *find_dr(struct device *dev, dr_release_t release,
241 			      dr_match_t match, void *match_data)
242 {
243 	struct devres_node *node;
244 
245 	list_for_each_entry_reverse(node, &dev->devres_head, entry) {
246 		struct devres *dr = container_of(node, struct devres, node);
247 
248 		if (node->release != release)
249 			continue;
250 		if (match && !match(dev, dr->data, match_data))
251 			continue;
252 		return dr;
253 	}
254 
255 	return NULL;
256 }
257 
258 /**
259  * devres_find - Find device resource
260  * @dev: Device to lookup resource from
261  * @release: Look for resources associated with this release function
262  * @match: Match function (optional)
263  * @match_data: Data for the match function
264  *
265  * Find the latest devres of @dev which is associated with @release
266  * and for which @match returns 1.  If @match is NULL, it's considered
267  * to match all.
268  *
269  * RETURNS:
270  * Pointer to found devres, NULL if not found.
271  */
devres_find(struct device * dev,dr_release_t release,dr_match_t match,void * match_data)272 void * devres_find(struct device *dev, dr_release_t release,
273 		   dr_match_t match, void *match_data)
274 {
275 	struct devres *dr;
276 	unsigned long flags;
277 
278 	spin_lock_irqsave(&dev->devres_lock, flags);
279 	dr = find_dr(dev, release, match, match_data);
280 	spin_unlock_irqrestore(&dev->devres_lock, flags);
281 
282 	if (dr)
283 		return dr->data;
284 	return NULL;
285 }
286 EXPORT_SYMBOL_GPL(devres_find);
287 
288 /**
289  * devres_get - Find devres, if non-existent, add one atomically
290  * @dev: Device to lookup or add devres for
291  * @new_res: Pointer to new initialized devres to add if not found
292  * @match: Match function (optional)
293  * @match_data: Data for the match function
294  *
295  * Find the latest devres of @dev which has the same release function
296  * as @new_res and for which @match return 1.  If found, @new_res is
297  * freed; otherwise, @new_res is added atomically.
298  *
299  * RETURNS:
300  * Pointer to found or added devres.
301  */
devres_get(struct device * dev,void * new_res,dr_match_t match,void * match_data)302 void * devres_get(struct device *dev, void *new_res,
303 		  dr_match_t match, void *match_data)
304 {
305 	struct devres *new_dr = container_of(new_res, struct devres, data);
306 	struct devres *dr;
307 	unsigned long flags;
308 
309 	spin_lock_irqsave(&dev->devres_lock, flags);
310 	dr = find_dr(dev, new_dr->node.release, match, match_data);
311 	if (!dr) {
312 		add_dr(dev, &new_dr->node);
313 		dr = new_dr;
314 		new_res = NULL;
315 	}
316 	spin_unlock_irqrestore(&dev->devres_lock, flags);
317 	devres_free(new_res);
318 
319 	return dr->data;
320 }
321 EXPORT_SYMBOL_GPL(devres_get);
322 
323 /**
324  * devres_remove - Find a device resource and remove it
325  * @dev: Device to find resource from
326  * @release: Look for resources associated with this release function
327  * @match: Match function (optional)
328  * @match_data: Data for the match function
329  *
330  * Find the latest devres of @dev associated with @release and for
331  * which @match returns 1.  If @match is NULL, it's considered to
332  * match all.  If found, the resource is removed atomically and
333  * returned.
334  *
335  * RETURNS:
336  * Pointer to removed devres on success, NULL if not found.
337  */
devres_remove(struct device * dev,dr_release_t release,dr_match_t match,void * match_data)338 void * devres_remove(struct device *dev, dr_release_t release,
339 		     dr_match_t match, void *match_data)
340 {
341 	struct devres *dr;
342 	unsigned long flags;
343 
344 	spin_lock_irqsave(&dev->devres_lock, flags);
345 	dr = find_dr(dev, release, match, match_data);
346 	if (dr) {
347 		list_del_init(&dr->node.entry);
348 		devres_log(dev, &dr->node, "REM");
349 	}
350 	spin_unlock_irqrestore(&dev->devres_lock, flags);
351 
352 	if (dr)
353 		return dr->data;
354 	return NULL;
355 }
356 EXPORT_SYMBOL_GPL(devres_remove);
357 
358 /**
359  * devres_destroy - Find a device resource and destroy it
360  * @dev: Device to find resource from
361  * @release: Look for resources associated with this release function
362  * @match: Match function (optional)
363  * @match_data: Data for the match function
364  *
365  * Find the latest devres of @dev associated with @release and for
366  * which @match returns 1.  If @match is NULL, it's considered to
367  * match all.  If found, the resource is removed atomically and freed.
368  *
369  * Note that the release function for the resource will not be called,
370  * only the devres-allocated data will be freed.  The caller becomes
371  * responsible for freeing any other data.
372  *
373  * RETURNS:
374  * 0 if devres is found and freed, -ENOENT if not found.
375  */
devres_destroy(struct device * dev,dr_release_t release,dr_match_t match,void * match_data)376 int devres_destroy(struct device *dev, dr_release_t release,
377 		   dr_match_t match, void *match_data)
378 {
379 	void *res;
380 
381 	res = devres_remove(dev, release, match, match_data);
382 	if (unlikely(!res))
383 		return -ENOENT;
384 
385 	devres_free(res);
386 	return 0;
387 }
388 EXPORT_SYMBOL_GPL(devres_destroy);
389 
390 
391 /**
392  * devres_release - Find a device resource and destroy it, calling release
393  * @dev: Device to find resource from
394  * @release: Look for resources associated with this release function
395  * @match: Match function (optional)
396  * @match_data: Data for the match function
397  *
398  * Find the latest devres of @dev associated with @release and for
399  * which @match returns 1.  If @match is NULL, it's considered to
400  * match all.  If found, the resource is removed atomically, the
401  * release function called and the resource freed.
402  *
403  * RETURNS:
404  * 0 if devres is found and freed, -ENOENT if not found.
405  */
devres_release(struct device * dev,dr_release_t release,dr_match_t match,void * match_data)406 int devres_release(struct device *dev, dr_release_t release,
407 		   dr_match_t match, void *match_data)
408 {
409 	void *res;
410 
411 	res = devres_remove(dev, release, match, match_data);
412 	if (unlikely(!res))
413 		return -ENOENT;
414 
415 	(*release)(dev, res);
416 	devres_free(res);
417 	return 0;
418 }
419 EXPORT_SYMBOL_GPL(devres_release);
420 
remove_nodes(struct device * dev,struct list_head * first,struct list_head * end,struct list_head * todo)421 static int remove_nodes(struct device *dev,
422 			struct list_head *first, struct list_head *end,
423 			struct list_head *todo)
424 {
425 	int cnt = 0, nr_groups = 0;
426 	struct list_head *cur;
427 
428 	/* First pass - move normal devres entries to @todo and clear
429 	 * devres_group colors.
430 	 */
431 	cur = first;
432 	while (cur != end) {
433 		struct devres_node *node;
434 		struct devres_group *grp;
435 
436 		node = list_entry(cur, struct devres_node, entry);
437 		cur = cur->next;
438 
439 		grp = node_to_group(node);
440 		if (grp) {
441 			/* clear color of group markers in the first pass */
442 			grp->color = 0;
443 			nr_groups++;
444 		} else {
445 			/* regular devres entry */
446 			if (&node->entry == first)
447 				first = first->next;
448 			list_move_tail(&node->entry, todo);
449 			cnt++;
450 		}
451 	}
452 
453 	if (!nr_groups)
454 		return cnt;
455 
456 	/* Second pass - Scan groups and color them.  A group gets
457 	 * color value of two iff the group is wholly contained in
458 	 * [cur, end).  That is, for a closed group, both opening and
459 	 * closing markers should be in the range, while just the
460 	 * opening marker is enough for an open group.
461 	 */
462 	cur = first;
463 	while (cur != end) {
464 		struct devres_node *node;
465 		struct devres_group *grp;
466 
467 		node = list_entry(cur, struct devres_node, entry);
468 		cur = cur->next;
469 
470 		grp = node_to_group(node);
471 		BUG_ON(!grp || list_empty(&grp->node[0].entry));
472 
473 		grp->color++;
474 		if (list_empty(&grp->node[1].entry))
475 			grp->color++;
476 
477 		BUG_ON(grp->color <= 0 || grp->color > 2);
478 		if (grp->color == 2) {
479 			/* No need to update cur or end.  The removed
480 			 * nodes are always before both.
481 			 */
482 			list_move_tail(&grp->node[0].entry, todo);
483 			list_del_init(&grp->node[1].entry);
484 		}
485 	}
486 
487 	return cnt;
488 }
489 
release_nodes(struct device * dev,struct list_head * first,struct list_head * end,unsigned long flags)490 static int release_nodes(struct device *dev, struct list_head *first,
491 			 struct list_head *end, unsigned long flags)
492 	__releases(&dev->devres_lock)
493 {
494 	LIST_HEAD(todo);
495 	int cnt;
496 	struct devres *dr, *tmp;
497 
498 	cnt = remove_nodes(dev, first, end, &todo);
499 
500 	spin_unlock_irqrestore(&dev->devres_lock, flags);
501 
502 	/* Release.  Note that both devres and devres_group are
503 	 * handled as devres in the following loop.  This is safe.
504 	 */
505 	list_for_each_entry_safe_reverse(dr, tmp, &todo, node.entry) {
506 		devres_log(dev, &dr->node, "REL");
507 		dr->node.release(dev, dr->data);
508 		kfree(dr);
509 	}
510 
511 	return cnt;
512 }
513 
514 /**
515  * devres_release_all - Release all managed resources
516  * @dev: Device to release resources for
517  *
518  * Release all resources associated with @dev.  This function is
519  * called on driver detach.
520  */
devres_release_all(struct device * dev)521 int devres_release_all(struct device *dev)
522 {
523 	unsigned long flags;
524 
525 	/* Looks like an uninitialized device structure */
526 	if (WARN_ON(dev->devres_head.next == NULL))
527 		return -ENODEV;
528 	spin_lock_irqsave(&dev->devres_lock, flags);
529 	return release_nodes(dev, dev->devres_head.next, &dev->devres_head,
530 			     flags);
531 }
532 
533 /**
534  * devres_open_group - Open a new devres group
535  * @dev: Device to open devres group for
536  * @id: Separator ID
537  * @gfp: Allocation flags
538  *
539  * Open a new devres group for @dev with @id.  For @id, using a
540  * pointer to an object which won't be used for another group is
541  * recommended.  If @id is NULL, address-wise unique ID is created.
542  *
543  * RETURNS:
544  * ID of the new group, NULL on failure.
545  */
devres_open_group(struct device * dev,void * id,gfp_t gfp)546 void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
547 {
548 	struct devres_group *grp;
549 	unsigned long flags;
550 
551 	grp = kmalloc(sizeof(*grp), gfp);
552 	if (unlikely(!grp))
553 		return NULL;
554 
555 	grp->node[0].release = &group_open_release;
556 	grp->node[1].release = &group_close_release;
557 	INIT_LIST_HEAD(&grp->node[0].entry);
558 	INIT_LIST_HEAD(&grp->node[1].entry);
559 	set_node_dbginfo(&grp->node[0], "grp<", 0);
560 	set_node_dbginfo(&grp->node[1], "grp>", 0);
561 	grp->id = grp;
562 	if (id)
563 		grp->id = id;
564 
565 	spin_lock_irqsave(&dev->devres_lock, flags);
566 	add_dr(dev, &grp->node[0]);
567 	spin_unlock_irqrestore(&dev->devres_lock, flags);
568 	return grp->id;
569 }
570 EXPORT_SYMBOL_GPL(devres_open_group);
571 
572 /* Find devres group with ID @id.  If @id is NULL, look for the latest. */
find_group(struct device * dev,void * id)573 static struct devres_group * find_group(struct device *dev, void *id)
574 {
575 	struct devres_node *node;
576 
577 	list_for_each_entry_reverse(node, &dev->devres_head, entry) {
578 		struct devres_group *grp;
579 
580 		if (node->release != &group_open_release)
581 			continue;
582 
583 		grp = container_of(node, struct devres_group, node[0]);
584 
585 		if (id) {
586 			if (grp->id == id)
587 				return grp;
588 		} else if (list_empty(&grp->node[1].entry))
589 			return grp;
590 	}
591 
592 	return NULL;
593 }
594 
595 /**
596  * devres_close_group - Close a devres group
597  * @dev: Device to close devres group for
598  * @id: ID of target group, can be NULL
599  *
600  * Close the group identified by @id.  If @id is NULL, the latest open
601  * group is selected.
602  */
devres_close_group(struct device * dev,void * id)603 void devres_close_group(struct device *dev, void *id)
604 {
605 	struct devres_group *grp;
606 	unsigned long flags;
607 
608 	spin_lock_irqsave(&dev->devres_lock, flags);
609 
610 	grp = find_group(dev, id);
611 	if (grp)
612 		add_dr(dev, &grp->node[1]);
613 	else
614 		WARN_ON(1);
615 
616 	spin_unlock_irqrestore(&dev->devres_lock, flags);
617 }
618 EXPORT_SYMBOL_GPL(devres_close_group);
619 
620 /**
621  * devres_remove_group - Remove a devres group
622  * @dev: Device to remove group for
623  * @id: ID of target group, can be NULL
624  *
625  * Remove the group identified by @id.  If @id is NULL, the latest
626  * open group is selected.  Note that removing a group doesn't affect
627  * any other resources.
628  */
devres_remove_group(struct device * dev,void * id)629 void devres_remove_group(struct device *dev, void *id)
630 {
631 	struct devres_group *grp;
632 	unsigned long flags;
633 
634 	spin_lock_irqsave(&dev->devres_lock, flags);
635 
636 	grp = find_group(dev, id);
637 	if (grp) {
638 		list_del_init(&grp->node[0].entry);
639 		list_del_init(&grp->node[1].entry);
640 		devres_log(dev, &grp->node[0], "REM");
641 	} else
642 		WARN_ON(1);
643 
644 	spin_unlock_irqrestore(&dev->devres_lock, flags);
645 
646 	kfree(grp);
647 }
648 EXPORT_SYMBOL_GPL(devres_remove_group);
649 
650 /**
651  * devres_release_group - Release resources in a devres group
652  * @dev: Device to release group for
653  * @id: ID of target group, can be NULL
654  *
655  * Release all resources in the group identified by @id.  If @id is
656  * NULL, the latest open group is selected.  The selected group and
657  * groups properly nested inside the selected group are removed.
658  *
659  * RETURNS:
660  * The number of released non-group resources.
661  */
devres_release_group(struct device * dev,void * id)662 int devres_release_group(struct device *dev, void *id)
663 {
664 	struct devres_group *grp;
665 	unsigned long flags;
666 	int cnt = 0;
667 
668 	spin_lock_irqsave(&dev->devres_lock, flags);
669 
670 	grp = find_group(dev, id);
671 	if (grp) {
672 		struct list_head *first = &grp->node[0].entry;
673 		struct list_head *end = &dev->devres_head;
674 
675 		if (!list_empty(&grp->node[1].entry))
676 			end = grp->node[1].entry.next;
677 
678 		cnt = release_nodes(dev, first, end, flags);
679 	} else {
680 		WARN_ON(1);
681 		spin_unlock_irqrestore(&dev->devres_lock, flags);
682 	}
683 
684 	return cnt;
685 }
686 EXPORT_SYMBOL_GPL(devres_release_group);
687 
688 /*
689  * Custom devres actions allow inserting a simple function call
690  * into the teadown sequence.
691  */
692 
693 struct action_devres {
694 	void *data;
695 	void (*action)(void *);
696 };
697 
devm_action_match(struct device * dev,void * res,void * p)698 static int devm_action_match(struct device *dev, void *res, void *p)
699 {
700 	struct action_devres *devres = res;
701 	struct action_devres *target = p;
702 
703 	return devres->action == target->action &&
704 	       devres->data == target->data;
705 }
706 
devm_action_release(struct device * dev,void * res)707 static void devm_action_release(struct device *dev, void *res)
708 {
709 	struct action_devres *devres = res;
710 
711 	devres->action(devres->data);
712 }
713 
714 /**
715  * devm_add_action() - add a custom action to list of managed resources
716  * @dev: Device that owns the action
717  * @action: Function that should be called
718  * @data: Pointer to data passed to @action implementation
719  *
720  * This adds a custom action to the list of managed resources so that
721  * it gets executed as part of standard resource unwinding.
722  */
devm_add_action(struct device * dev,void (* action)(void *),void * data)723 int devm_add_action(struct device *dev, void (*action)(void *), void *data)
724 {
725 	struct action_devres *devres;
726 
727 	devres = devres_alloc(devm_action_release,
728 			      sizeof(struct action_devres), GFP_KERNEL);
729 	if (!devres)
730 		return -ENOMEM;
731 
732 	devres->data = data;
733 	devres->action = action;
734 
735 	devres_add(dev, devres);
736 	return 0;
737 }
738 EXPORT_SYMBOL_GPL(devm_add_action);
739 
740 /**
741  * devm_remove_action() - removes previously added custom action
742  * @dev: Device that owns the action
743  * @action: Function implementing the action
744  * @data: Pointer to data passed to @action implementation
745  *
746  * Removes instance of @action previously added by devm_add_action().
747  * Both action and data should match one of the existing entries.
748  */
devm_remove_action(struct device * dev,void (* action)(void *),void * data)749 void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
750 {
751 	struct action_devres devres = {
752 		.data = data,
753 		.action = action,
754 	};
755 
756 	WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match,
757 			       &devres));
758 }
759 EXPORT_SYMBOL_GPL(devm_remove_action);
760 
761 /**
762  * devm_release_action() - release previously added custom action
763  * @dev: Device that owns the action
764  * @action: Function implementing the action
765  * @data: Pointer to data passed to @action implementation
766  *
767  * Releases and removes instance of @action previously added by
768  * devm_add_action().  Both action and data should match one of the
769  * existing entries.
770  */
devm_release_action(struct device * dev,void (* action)(void *),void * data)771 void devm_release_action(struct device *dev, void (*action)(void *), void *data)
772 {
773 	struct action_devres devres = {
774 		.data = data,
775 		.action = action,
776 	};
777 
778 	WARN_ON(devres_release(dev, devm_action_release, devm_action_match,
779 			       &devres));
780 
781 }
782 EXPORT_SYMBOL_GPL(devm_release_action);
783 
784 /*
785  * Managed kmalloc/kfree
786  */
devm_kmalloc_release(struct device * dev,void * res)787 static void devm_kmalloc_release(struct device *dev, void *res)
788 {
789 	/* noop */
790 }
791 
devm_kmalloc_match(struct device * dev,void * res,void * data)792 static int devm_kmalloc_match(struct device *dev, void *res, void *data)
793 {
794 	return res == data;
795 }
796 
797 /**
798  * devm_kmalloc - Resource-managed kmalloc
799  * @dev: Device to allocate memory for
800  * @size: Allocation size
801  * @gfp: Allocation gfp flags
802  *
803  * Managed kmalloc.  Memory allocated with this function is
804  * automatically freed on driver detach.  Like all other devres
805  * resources, guaranteed alignment is unsigned long long.
806  *
807  * RETURNS:
808  * Pointer to allocated memory on success, NULL on failure.
809  */
devm_kmalloc(struct device * dev,size_t size,gfp_t gfp)810 void * devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
811 {
812 	struct devres *dr;
813 
814 	/* use raw alloc_dr for kmalloc caller tracing */
815 	dr = alloc_dr(devm_kmalloc_release, size, gfp, dev_to_node(dev));
816 	if (unlikely(!dr))
817 		return NULL;
818 
819 	/*
820 	 * This is named devm_kzalloc_release for historical reasons
821 	 * The initial implementation did not support kmalloc, only kzalloc
822 	 */
823 	set_node_dbginfo(&dr->node, "devm_kzalloc_release", size);
824 	devres_add(dev, dr->data);
825 	return dr->data;
826 }
827 EXPORT_SYMBOL_GPL(devm_kmalloc);
828 
829 /**
830  * devm_kstrdup - Allocate resource managed space and
831  *                copy an existing string into that.
832  * @dev: Device to allocate memory for
833  * @s: the string to duplicate
834  * @gfp: the GFP mask used in the devm_kmalloc() call when
835  *       allocating memory
836  * RETURNS:
837  * Pointer to allocated string on success, NULL on failure.
838  */
devm_kstrdup(struct device * dev,const char * s,gfp_t gfp)839 char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp)
840 {
841 	size_t size;
842 	char *buf;
843 
844 	if (!s)
845 		return NULL;
846 
847 	size = strlen(s) + 1;
848 	buf = devm_kmalloc(dev, size, gfp);
849 	if (buf)
850 		memcpy(buf, s, size);
851 	return buf;
852 }
853 EXPORT_SYMBOL_GPL(devm_kstrdup);
854 
855 /**
856  * devm_kstrdup_const - resource managed conditional string duplication
857  * @dev: device for which to duplicate the string
858  * @s: the string to duplicate
859  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
860  *
861  * Strings allocated by devm_kstrdup_const will be automatically freed when
862  * the associated device is detached.
863  *
864  * RETURNS:
865  * Source string if it is in .rodata section otherwise it falls back to
866  * devm_kstrdup.
867  */
devm_kstrdup_const(struct device * dev,const char * s,gfp_t gfp)868 const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp)
869 {
870 	if (is_kernel_rodata((unsigned long)s))
871 		return s;
872 
873 	return devm_kstrdup(dev, s, gfp);
874 }
875 EXPORT_SYMBOL_GPL(devm_kstrdup_const);
876 
877 /**
878  * devm_kvasprintf - Allocate resource managed space and format a string
879  *		     into that.
880  * @dev: Device to allocate memory for
881  * @gfp: the GFP mask used in the devm_kmalloc() call when
882  *       allocating memory
883  * @fmt: The printf()-style format string
884  * @ap: Arguments for the format string
885  * RETURNS:
886  * Pointer to allocated string on success, NULL on failure.
887  */
devm_kvasprintf(struct device * dev,gfp_t gfp,const char * fmt,va_list ap)888 char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
889 		      va_list ap)
890 {
891 	unsigned int len;
892 	char *p;
893 	va_list aq;
894 
895 	va_copy(aq, ap);
896 	len = vsnprintf(NULL, 0, fmt, aq);
897 	va_end(aq);
898 
899 	p = devm_kmalloc(dev, len+1, gfp);
900 	if (!p)
901 		return NULL;
902 
903 	vsnprintf(p, len+1, fmt, ap);
904 
905 	return p;
906 }
907 EXPORT_SYMBOL(devm_kvasprintf);
908 
909 /**
910  * devm_kasprintf - Allocate resource managed space and format a string
911  *		    into that.
912  * @dev: Device to allocate memory for
913  * @gfp: the GFP mask used in the devm_kmalloc() call when
914  *       allocating memory
915  * @fmt: The printf()-style format string
916  * @...: Arguments for the format string
917  * RETURNS:
918  * Pointer to allocated string on success, NULL on failure.
919  */
devm_kasprintf(struct device * dev,gfp_t gfp,const char * fmt,...)920 char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...)
921 {
922 	va_list ap;
923 	char *p;
924 
925 	va_start(ap, fmt);
926 	p = devm_kvasprintf(dev, gfp, fmt, ap);
927 	va_end(ap);
928 
929 	return p;
930 }
931 EXPORT_SYMBOL_GPL(devm_kasprintf);
932 
933 /**
934  * devm_kfree - Resource-managed kfree
935  * @dev: Device this memory belongs to
936  * @p: Memory to free
937  *
938  * Free memory allocated with devm_kmalloc().
939  */
devm_kfree(struct device * dev,const void * p)940 void devm_kfree(struct device *dev, const void *p)
941 {
942 	int rc;
943 
944 	/*
945 	 * Special case: pointer to a string in .rodata returned by
946 	 * devm_kstrdup_const().
947 	 */
948 	if (unlikely(is_kernel_rodata((unsigned long)p)))
949 		return;
950 
951 	rc = devres_destroy(dev, devm_kmalloc_release,
952 			    devm_kmalloc_match, (void *)p);
953 	WARN_ON(rc);
954 }
955 EXPORT_SYMBOL_GPL(devm_kfree);
956 
957 /**
958  * devm_kmemdup - Resource-managed kmemdup
959  * @dev: Device this memory belongs to
960  * @src: Memory region to duplicate
961  * @len: Memory region length
962  * @gfp: GFP mask to use
963  *
964  * Duplicate region of a memory using resource managed kmalloc
965  */
devm_kmemdup(struct device * dev,const void * src,size_t len,gfp_t gfp)966 void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp)
967 {
968 	void *p;
969 
970 	p = devm_kmalloc(dev, len, gfp);
971 	if (p)
972 		memcpy(p, src, len);
973 
974 	return p;
975 }
976 EXPORT_SYMBOL_GPL(devm_kmemdup);
977 
978 struct pages_devres {
979 	unsigned long addr;
980 	unsigned int order;
981 };
982 
devm_pages_match(struct device * dev,void * res,void * p)983 static int devm_pages_match(struct device *dev, void *res, void *p)
984 {
985 	struct pages_devres *devres = res;
986 	struct pages_devres *target = p;
987 
988 	return devres->addr == target->addr;
989 }
990 
devm_pages_release(struct device * dev,void * res)991 static void devm_pages_release(struct device *dev, void *res)
992 {
993 	struct pages_devres *devres = res;
994 
995 	free_pages(devres->addr, devres->order);
996 }
997 
998 /**
999  * devm_get_free_pages - Resource-managed __get_free_pages
1000  * @dev: Device to allocate memory for
1001  * @gfp_mask: Allocation gfp flags
1002  * @order: Allocation size is (1 << order) pages
1003  *
1004  * Managed get_free_pages.  Memory allocated with this function is
1005  * automatically freed on driver detach.
1006  *
1007  * RETURNS:
1008  * Address of allocated memory on success, 0 on failure.
1009  */
1010 
devm_get_free_pages(struct device * dev,gfp_t gfp_mask,unsigned int order)1011 unsigned long devm_get_free_pages(struct device *dev,
1012 				  gfp_t gfp_mask, unsigned int order)
1013 {
1014 	struct pages_devres *devres;
1015 	unsigned long addr;
1016 
1017 	addr = __get_free_pages(gfp_mask, order);
1018 
1019 	if (unlikely(!addr))
1020 		return 0;
1021 
1022 	devres = devres_alloc(devm_pages_release,
1023 			      sizeof(struct pages_devres), GFP_KERNEL);
1024 	if (unlikely(!devres)) {
1025 		free_pages(addr, order);
1026 		return 0;
1027 	}
1028 
1029 	devres->addr = addr;
1030 	devres->order = order;
1031 
1032 	devres_add(dev, devres);
1033 	return addr;
1034 }
1035 EXPORT_SYMBOL_GPL(devm_get_free_pages);
1036 
1037 /**
1038  * devm_free_pages - Resource-managed free_pages
1039  * @dev: Device this memory belongs to
1040  * @addr: Memory to free
1041  *
1042  * Free memory allocated with devm_get_free_pages(). Unlike free_pages,
1043  * there is no need to supply the @order.
1044  */
devm_free_pages(struct device * dev,unsigned long addr)1045 void devm_free_pages(struct device *dev, unsigned long addr)
1046 {
1047 	struct pages_devres devres = { .addr = addr };
1048 
1049 	WARN_ON(devres_release(dev, devm_pages_release, devm_pages_match,
1050 			       &devres));
1051 }
1052 EXPORT_SYMBOL_GPL(devm_free_pages);
1053 
devm_percpu_release(struct device * dev,void * pdata)1054 static void devm_percpu_release(struct device *dev, void *pdata)
1055 {
1056 	void __percpu *p;
1057 
1058 	p = *(void __percpu **)pdata;
1059 	free_percpu(p);
1060 }
1061 
devm_percpu_match(struct device * dev,void * data,void * p)1062 static int devm_percpu_match(struct device *dev, void *data, void *p)
1063 {
1064 	struct devres *devr = container_of(data, struct devres, data);
1065 
1066 	return *(void **)devr->data == p;
1067 }
1068 
1069 /**
1070  * __devm_alloc_percpu - Resource-managed alloc_percpu
1071  * @dev: Device to allocate per-cpu memory for
1072  * @size: Size of per-cpu memory to allocate
1073  * @align: Alignment of per-cpu memory to allocate
1074  *
1075  * Managed alloc_percpu. Per-cpu memory allocated with this function is
1076  * automatically freed on driver detach.
1077  *
1078  * RETURNS:
1079  * Pointer to allocated memory on success, NULL on failure.
1080  */
__devm_alloc_percpu(struct device * dev,size_t size,size_t align)1081 void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
1082 		size_t align)
1083 {
1084 	void *p;
1085 	void __percpu *pcpu;
1086 
1087 	pcpu = __alloc_percpu(size, align);
1088 	if (!pcpu)
1089 		return NULL;
1090 
1091 	p = devres_alloc(devm_percpu_release, sizeof(void *), GFP_KERNEL);
1092 	if (!p) {
1093 		free_percpu(pcpu);
1094 		return NULL;
1095 	}
1096 
1097 	*(void __percpu **)p = pcpu;
1098 
1099 	devres_add(dev, p);
1100 
1101 	return pcpu;
1102 }
1103 EXPORT_SYMBOL_GPL(__devm_alloc_percpu);
1104 
1105 /**
1106  * devm_free_percpu - Resource-managed free_percpu
1107  * @dev: Device this memory belongs to
1108  * @pdata: Per-cpu memory to free
1109  *
1110  * Free memory allocated with devm_alloc_percpu().
1111  */
devm_free_percpu(struct device * dev,void __percpu * pdata)1112 void devm_free_percpu(struct device *dev, void __percpu *pdata)
1113 {
1114 	WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match,
1115 			       (void *)pdata));
1116 }
1117 EXPORT_SYMBOL_GPL(devm_free_percpu);
1118