1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * platform.c - platform 'pseudo' bus for legacy devices
4  *
5  * Copyright (c) 2002-3 Patrick Mochel
6  * Copyright (c) 2002-3 Open Source Development Labs
7  *
8  * Please see Documentation/driver-api/driver-model/platform.rst for more
9  * information.
10  */
11 
12 #include <linux/string.h>
13 #include <linux/platform_device.h>
14 #include <linux/of_device.h>
15 #include <linux/of_irq.h>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/memblock.h>
20 #include <linux/err.h>
21 #include <linux/slab.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/pm_domain.h>
24 #include <linux/idr.h>
25 #include <linux/acpi.h>
26 #include <linux/clk/clk-conf.h>
27 #include <linux/limits.h>
28 #include <linux/property.h>
29 #include <linux/kmemleak.h>
30 
31 #include "base.h"
32 #include "power/power.h"
33 
34 /* For automatically allocated device IDs */
35 static DEFINE_IDA(platform_devid_ida);
36 
37 struct device platform_bus = {
38 	.init_name	= "platform",
39 };
40 EXPORT_SYMBOL_GPL(platform_bus);
41 
42 /**
43  * platform_get_resource - get a resource for a device
44  * @dev: platform device
45  * @type: resource type
46  * @num: resource index
47  */
platform_get_resource(struct platform_device * dev,unsigned int type,unsigned int num)48 struct resource *platform_get_resource(struct platform_device *dev,
49 				       unsigned int type, unsigned int num)
50 {
51 	int i;
52 
53 	for (i = 0; i < dev->num_resources; i++) {
54 		struct resource *r = &dev->resource[i];
55 
56 		if (type == resource_type(r) && num-- == 0)
57 			return r;
58 	}
59 	return NULL;
60 }
61 EXPORT_SYMBOL_GPL(platform_get_resource);
62 
63 /**
64  * devm_platform_ioremap_resource - call devm_ioremap_resource() for a platform
65  *				    device
66  *
67  * @pdev: platform device to use both for memory resource lookup as well as
68  *        resource management
69  * @index: resource index
70  */
71 #ifdef CONFIG_HAS_IOMEM
devm_platform_ioremap_resource(struct platform_device * pdev,unsigned int index)72 void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev,
73 					     unsigned int index)
74 {
75 	struct resource *res;
76 
77 	res = platform_get_resource(pdev, IORESOURCE_MEM, index);
78 	return devm_ioremap_resource(&pdev->dev, res);
79 }
80 EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource);
81 #endif /* CONFIG_HAS_IOMEM */
82 
__platform_get_irq(struct platform_device * dev,unsigned int num)83 static int __platform_get_irq(struct platform_device *dev, unsigned int num)
84 {
85 #ifdef CONFIG_SPARC
86 	/* sparc does not have irqs represented as IORESOURCE_IRQ resources */
87 	if (!dev || num >= dev->archdata.num_irqs)
88 		return -ENXIO;
89 	return dev->archdata.irqs[num];
90 #else
91 	struct resource *r;
92 	if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
93 		int ret;
94 
95 		ret = of_irq_get(dev->dev.of_node, num);
96 		if (ret > 0 || ret == -EPROBE_DEFER)
97 			return ret;
98 	}
99 
100 	r = platform_get_resource(dev, IORESOURCE_IRQ, num);
101 	if (has_acpi_companion(&dev->dev)) {
102 		if (r && r->flags & IORESOURCE_DISABLED) {
103 			int ret;
104 
105 			ret = acpi_irq_get(ACPI_HANDLE(&dev->dev), num, r);
106 			if (ret)
107 				return ret;
108 		}
109 	}
110 
111 	/*
112 	 * The resources may pass trigger flags to the irqs that need
113 	 * to be set up. It so happens that the trigger flags for
114 	 * IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER*
115 	 * settings.
116 	 */
117 	if (r && r->flags & IORESOURCE_BITS) {
118 		struct irq_data *irqd;
119 
120 		irqd = irq_get_irq_data(r->start);
121 		if (!irqd)
122 			return -ENXIO;
123 		irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS);
124 	}
125 
126 	if (r)
127 		return r->start;
128 
129 	/*
130 	 * For the index 0 interrupt, allow falling back to GpioInt
131 	 * resources. While a device could have both Interrupt and GpioInt
132 	 * resources, making this fallback ambiguous, in many common cases
133 	 * the device will only expose one IRQ, and this fallback
134 	 * allows a common code path across either kind of resource.
135 	 */
136 	if (num == 0 && has_acpi_companion(&dev->dev)) {
137 		int ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num);
138 
139 		/* Our callers expect -ENXIO for missing IRQs. */
140 		if (ret >= 0 || ret == -EPROBE_DEFER)
141 			return ret;
142 	}
143 
144 	return -ENXIO;
145 #endif
146 }
147 
148 /**
149  * platform_get_irq - get an IRQ for a device
150  * @dev: platform device
151  * @num: IRQ number index
152  *
153  * Gets an IRQ for a platform device and prints an error message if finding the
154  * IRQ fails. Device drivers should check the return value for errors so as to
155  * not pass a negative integer value to the request_irq() APIs.
156  *
157  * Example:
158  *		int irq = platform_get_irq(pdev, 0);
159  *		if (irq < 0)
160  *			return irq;
161  *
162  * Return: IRQ number on success, negative error number on failure.
163  */
platform_get_irq(struct platform_device * dev,unsigned int num)164 int platform_get_irq(struct platform_device *dev, unsigned int num)
165 {
166 	int ret;
167 
168 	ret = __platform_get_irq(dev, num);
169 	if (ret < 0 && ret != -EPROBE_DEFER)
170 		dev_err(&dev->dev, "IRQ index %u not found\n", num);
171 
172 	return ret;
173 }
174 EXPORT_SYMBOL_GPL(platform_get_irq);
175 
176 /**
177  * platform_get_irq_optional - get an optional IRQ for a device
178  * @dev: platform device
179  * @num: IRQ number index
180  *
181  * Gets an IRQ for a platform device. Device drivers should check the return
182  * value for errors so as to not pass a negative integer value to the
183  * request_irq() APIs. This is the same as platform_get_irq(), except that it
184  * does not print an error message if an IRQ can not be obtained.
185  *
186  * Example:
187  *		int irq = platform_get_irq_optional(pdev, 0);
188  *		if (irq < 0)
189  *			return irq;
190  *
191  * Return: IRQ number on success, negative error number on failure.
192  */
platform_get_irq_optional(struct platform_device * dev,unsigned int num)193 int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
194 {
195 	return __platform_get_irq(dev, num);
196 }
197 EXPORT_SYMBOL_GPL(platform_get_irq_optional);
198 
199 /**
200  * platform_irq_count - Count the number of IRQs a platform device uses
201  * @dev: platform device
202  *
203  * Return: Number of IRQs a platform device uses or EPROBE_DEFER
204  */
platform_irq_count(struct platform_device * dev)205 int platform_irq_count(struct platform_device *dev)
206 {
207 	int ret, nr = 0;
208 
209 	while ((ret = __platform_get_irq(dev, nr)) >= 0)
210 		nr++;
211 
212 	if (ret == -EPROBE_DEFER)
213 		return ret;
214 
215 	return nr;
216 }
217 EXPORT_SYMBOL_GPL(platform_irq_count);
218 
219 /**
220  * platform_get_resource_byname - get a resource for a device by name
221  * @dev: platform device
222  * @type: resource type
223  * @name: resource name
224  */
platform_get_resource_byname(struct platform_device * dev,unsigned int type,const char * name)225 struct resource *platform_get_resource_byname(struct platform_device *dev,
226 					      unsigned int type,
227 					      const char *name)
228 {
229 	int i;
230 
231 	for (i = 0; i < dev->num_resources; i++) {
232 		struct resource *r = &dev->resource[i];
233 
234 		if (unlikely(!r->name))
235 			continue;
236 
237 		if (type == resource_type(r) && !strcmp(r->name, name))
238 			return r;
239 	}
240 	return NULL;
241 }
242 EXPORT_SYMBOL_GPL(platform_get_resource_byname);
243 
__platform_get_irq_byname(struct platform_device * dev,const char * name)244 static int __platform_get_irq_byname(struct platform_device *dev,
245 				     const char *name)
246 {
247 	struct resource *r;
248 
249 	if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
250 		int ret;
251 
252 		ret = of_irq_get_byname(dev->dev.of_node, name);
253 		if (ret > 0 || ret == -EPROBE_DEFER)
254 			return ret;
255 	}
256 
257 	r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
258 	if (r)
259 		return r->start;
260 
261 	return -ENXIO;
262 }
263 
264 /**
265  * platform_get_irq_byname - get an IRQ for a device by name
266  * @dev: platform device
267  * @name: IRQ name
268  *
269  * Get an IRQ like platform_get_irq(), but then by name rather then by index.
270  *
271  * Return: IRQ number on success, negative error number on failure.
272  */
platform_get_irq_byname(struct platform_device * dev,const char * name)273 int platform_get_irq_byname(struct platform_device *dev, const char *name)
274 {
275 	int ret;
276 
277 	ret = __platform_get_irq_byname(dev, name);
278 	if (ret < 0 && ret != -EPROBE_DEFER)
279 		dev_err(&dev->dev, "IRQ %s not found\n", name);
280 
281 	return ret;
282 }
283 EXPORT_SYMBOL_GPL(platform_get_irq_byname);
284 
285 /**
286  * platform_get_irq_byname_optional - get an optional IRQ for a device by name
287  * @dev: platform device
288  * @name: IRQ name
289  *
290  * Get an optional IRQ by name like platform_get_irq_byname(). Except that it
291  * does not print an error message if an IRQ can not be obtained.
292  *
293  * Return: IRQ number on success, negative error number on failure.
294  */
platform_get_irq_byname_optional(struct platform_device * dev,const char * name)295 int platform_get_irq_byname_optional(struct platform_device *dev,
296 				     const char *name)
297 {
298 	return __platform_get_irq_byname(dev, name);
299 }
300 EXPORT_SYMBOL_GPL(platform_get_irq_byname_optional);
301 
302 /**
303  * platform_add_devices - add a numbers of platform devices
304  * @devs: array of platform devices to add
305  * @num: number of platform devices in array
306  */
platform_add_devices(struct platform_device ** devs,int num)307 int platform_add_devices(struct platform_device **devs, int num)
308 {
309 	int i, ret = 0;
310 
311 	for (i = 0; i < num; i++) {
312 		ret = platform_device_register(devs[i]);
313 		if (ret) {
314 			while (--i >= 0)
315 				platform_device_unregister(devs[i]);
316 			break;
317 		}
318 	}
319 
320 	return ret;
321 }
322 EXPORT_SYMBOL_GPL(platform_add_devices);
323 
324 struct platform_object {
325 	struct platform_device pdev;
326 	char name[];
327 };
328 
329 /*
330  * Set up default DMA mask for platform devices if the they weren't
331  * previously set by the architecture / DT.
332  */
setup_pdev_dma_masks(struct platform_device * pdev)333 static void setup_pdev_dma_masks(struct platform_device *pdev)
334 {
335 	if (!pdev->dev.coherent_dma_mask)
336 		pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
337 	if (!pdev->dma_mask)
338 		pdev->dma_mask = DMA_BIT_MASK(32);
339 	if (!pdev->dev.dma_mask)
340 		pdev->dev.dma_mask = &pdev->dma_mask;
341 };
342 
343 /**
344  * platform_device_put - destroy a platform device
345  * @pdev: platform device to free
346  *
347  * Free all memory associated with a platform device.  This function must
348  * _only_ be externally called in error cases.  All other usage is a bug.
349  */
platform_device_put(struct platform_device * pdev)350 void platform_device_put(struct platform_device *pdev)
351 {
352 	if (!IS_ERR_OR_NULL(pdev))
353 		put_device(&pdev->dev);
354 }
355 EXPORT_SYMBOL_GPL(platform_device_put);
356 
platform_device_release(struct device * dev)357 static void platform_device_release(struct device *dev)
358 {
359 	struct platform_object *pa = container_of(dev, struct platform_object,
360 						  pdev.dev);
361 
362 	of_device_node_put(&pa->pdev.dev);
363 	kfree(pa->pdev.dev.platform_data);
364 	kfree(pa->pdev.mfd_cell);
365 	kfree(pa->pdev.resource);
366 	kfree(pa->pdev.driver_override);
367 	kfree(pa);
368 }
369 
370 /**
371  * platform_device_alloc - create a platform device
372  * @name: base name of the device we're adding
373  * @id: instance id
374  *
375  * Create a platform device object which can have other objects attached
376  * to it, and which will have attached objects freed when it is released.
377  */
platform_device_alloc(const char * name,int id)378 struct platform_device *platform_device_alloc(const char *name, int id)
379 {
380 	struct platform_object *pa;
381 
382 	pa = kzalloc(sizeof(*pa) + strlen(name) + 1, GFP_KERNEL);
383 	if (pa) {
384 		strcpy(pa->name, name);
385 		pa->pdev.name = pa->name;
386 		pa->pdev.id = id;
387 		device_initialize(&pa->pdev.dev);
388 		pa->pdev.dev.release = platform_device_release;
389 		setup_pdev_dma_masks(&pa->pdev);
390 	}
391 
392 	return pa ? &pa->pdev : NULL;
393 }
394 EXPORT_SYMBOL_GPL(platform_device_alloc);
395 
396 /**
397  * platform_device_add_resources - add resources to a platform device
398  * @pdev: platform device allocated by platform_device_alloc to add resources to
399  * @res: set of resources that needs to be allocated for the device
400  * @num: number of resources
401  *
402  * Add a copy of the resources to the platform device.  The memory
403  * associated with the resources will be freed when the platform device is
404  * released.
405  */
platform_device_add_resources(struct platform_device * pdev,const struct resource * res,unsigned int num)406 int platform_device_add_resources(struct platform_device *pdev,
407 				  const struct resource *res, unsigned int num)
408 {
409 	struct resource *r = NULL;
410 
411 	if (res) {
412 		r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL);
413 		if (!r)
414 			return -ENOMEM;
415 	}
416 
417 	kfree(pdev->resource);
418 	pdev->resource = r;
419 	pdev->num_resources = num;
420 	return 0;
421 }
422 EXPORT_SYMBOL_GPL(platform_device_add_resources);
423 
424 /**
425  * platform_device_add_data - add platform-specific data to a platform device
426  * @pdev: platform device allocated by platform_device_alloc to add resources to
427  * @data: platform specific data for this platform device
428  * @size: size of platform specific data
429  *
430  * Add a copy of platform specific data to the platform device's
431  * platform_data pointer.  The memory associated with the platform data
432  * will be freed when the platform device is released.
433  */
platform_device_add_data(struct platform_device * pdev,const void * data,size_t size)434 int platform_device_add_data(struct platform_device *pdev, const void *data,
435 			     size_t size)
436 {
437 	void *d = NULL;
438 
439 	if (data) {
440 		d = kmemdup(data, size, GFP_KERNEL);
441 		if (!d)
442 			return -ENOMEM;
443 	}
444 
445 	kfree(pdev->dev.platform_data);
446 	pdev->dev.platform_data = d;
447 	return 0;
448 }
449 EXPORT_SYMBOL_GPL(platform_device_add_data);
450 
451 /**
452  * platform_device_add_properties - add built-in properties to a platform device
453  * @pdev: platform device to add properties to
454  * @properties: null terminated array of properties to add
455  *
456  * The function will take deep copy of @properties and attach the copy to the
457  * platform device. The memory associated with properties will be freed when the
458  * platform device is released.
459  */
platform_device_add_properties(struct platform_device * pdev,const struct property_entry * properties)460 int platform_device_add_properties(struct platform_device *pdev,
461 				   const struct property_entry *properties)
462 {
463 	return device_add_properties(&pdev->dev, properties);
464 }
465 EXPORT_SYMBOL_GPL(platform_device_add_properties);
466 
467 /**
468  * platform_device_add - add a platform device to device hierarchy
469  * @pdev: platform device we're adding
470  *
471  * This is part 2 of platform_device_register(), though may be called
472  * separately _iff_ pdev was allocated by platform_device_alloc().
473  */
platform_device_add(struct platform_device * pdev)474 int platform_device_add(struct platform_device *pdev)
475 {
476 	int i, ret;
477 
478 	if (!pdev)
479 		return -EINVAL;
480 
481 	if (!pdev->dev.parent)
482 		pdev->dev.parent = &platform_bus;
483 
484 	pdev->dev.bus = &platform_bus_type;
485 
486 	switch (pdev->id) {
487 	default:
488 		dev_set_name(&pdev->dev, "%s.%d", pdev->name,  pdev->id);
489 		break;
490 	case PLATFORM_DEVID_NONE:
491 		dev_set_name(&pdev->dev, "%s", pdev->name);
492 		break;
493 	case PLATFORM_DEVID_AUTO:
494 		/*
495 		 * Automatically allocated device ID. We mark it as such so
496 		 * that we remember it must be freed, and we append a suffix
497 		 * to avoid namespace collision with explicit IDs.
498 		 */
499 		ret = ida_simple_get(&platform_devid_ida, 0, 0, GFP_KERNEL);
500 		if (ret < 0)
501 			goto err_out;
502 		pdev->id = ret;
503 		pdev->id_auto = true;
504 		dev_set_name(&pdev->dev, "%s.%d.auto", pdev->name, pdev->id);
505 		break;
506 	}
507 
508 	for (i = 0; i < pdev->num_resources; i++) {
509 		struct resource *p, *r = &pdev->resource[i];
510 
511 		if (r->name == NULL)
512 			r->name = dev_name(&pdev->dev);
513 
514 		p = r->parent;
515 		if (!p) {
516 			if (resource_type(r) == IORESOURCE_MEM)
517 				p = &iomem_resource;
518 			else if (resource_type(r) == IORESOURCE_IO)
519 				p = &ioport_resource;
520 		}
521 
522 		if (p) {
523 			ret = insert_resource(p, r);
524 			if (ret) {
525 				dev_err(&pdev->dev, "failed to claim resource %d: %pR\n", i, r);
526 				goto failed;
527 			}
528 		}
529 	}
530 
531 	pr_debug("Registering platform device '%s'. Parent at %s\n",
532 		 dev_name(&pdev->dev), dev_name(pdev->dev.parent));
533 
534 	ret = device_add(&pdev->dev);
535 	if (ret == 0)
536 		return ret;
537 
538  failed:
539 	if (pdev->id_auto) {
540 		ida_simple_remove(&platform_devid_ida, pdev->id);
541 		pdev->id = PLATFORM_DEVID_AUTO;
542 	}
543 
544 	while (--i >= 0) {
545 		struct resource *r = &pdev->resource[i];
546 		if (r->parent)
547 			release_resource(r);
548 	}
549 
550  err_out:
551 	return ret;
552 }
553 EXPORT_SYMBOL_GPL(platform_device_add);
554 
555 /**
556  * platform_device_del - remove a platform-level device
557  * @pdev: platform device we're removing
558  *
559  * Note that this function will also release all memory- and port-based
560  * resources owned by the device (@dev->resource).  This function must
561  * _only_ be externally called in error cases.  All other usage is a bug.
562  */
platform_device_del(struct platform_device * pdev)563 void platform_device_del(struct platform_device *pdev)
564 {
565 	int i;
566 
567 	if (!IS_ERR_OR_NULL(pdev)) {
568 		device_del(&pdev->dev);
569 
570 		if (pdev->id_auto) {
571 			ida_simple_remove(&platform_devid_ida, pdev->id);
572 			pdev->id = PLATFORM_DEVID_AUTO;
573 		}
574 
575 		for (i = 0; i < pdev->num_resources; i++) {
576 			struct resource *r = &pdev->resource[i];
577 			if (r->parent)
578 				release_resource(r);
579 		}
580 	}
581 }
582 EXPORT_SYMBOL_GPL(platform_device_del);
583 
584 /**
585  * platform_device_register - add a platform-level device
586  * @pdev: platform device we're adding
587  */
platform_device_register(struct platform_device * pdev)588 int platform_device_register(struct platform_device *pdev)
589 {
590 	device_initialize(&pdev->dev);
591 	setup_pdev_dma_masks(pdev);
592 	return platform_device_add(pdev);
593 }
594 EXPORT_SYMBOL_GPL(platform_device_register);
595 
596 /**
597  * platform_device_unregister - unregister a platform-level device
598  * @pdev: platform device we're unregistering
599  *
600  * Unregistration is done in 2 steps. First we release all resources
601  * and remove it from the subsystem, then we drop reference count by
602  * calling platform_device_put().
603  */
platform_device_unregister(struct platform_device * pdev)604 void platform_device_unregister(struct platform_device *pdev)
605 {
606 	platform_device_del(pdev);
607 	platform_device_put(pdev);
608 }
609 EXPORT_SYMBOL_GPL(platform_device_unregister);
610 
611 /**
612  * platform_device_register_full - add a platform-level device with
613  * resources and platform-specific data
614  *
615  * @pdevinfo: data used to create device
616  *
617  * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
618  */
platform_device_register_full(const struct platform_device_info * pdevinfo)619 struct platform_device *platform_device_register_full(
620 		const struct platform_device_info *pdevinfo)
621 {
622 	int ret = -ENOMEM;
623 	struct platform_device *pdev;
624 
625 	pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id);
626 	if (!pdev)
627 		return ERR_PTR(-ENOMEM);
628 
629 	pdev->dev.parent = pdevinfo->parent;
630 	pdev->dev.fwnode = pdevinfo->fwnode;
631 	pdev->dev.of_node = of_node_get(to_of_node(pdev->dev.fwnode));
632 	pdev->dev.of_node_reused = pdevinfo->of_node_reused;
633 
634 	if (pdevinfo->dma_mask) {
635 		/*
636 		 * This memory isn't freed when the device is put,
637 		 * I don't have a nice idea for that though.  Conceptually
638 		 * dma_mask in struct device should not be a pointer.
639 		 * See http://thread.gmane.org/gmane.linux.kernel.pci/9081
640 		 */
641 		pdev->dev.dma_mask =
642 			kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL);
643 		if (!pdev->dev.dma_mask)
644 			goto err;
645 
646 		kmemleak_ignore(pdev->dev.dma_mask);
647 
648 		*pdev->dev.dma_mask = pdevinfo->dma_mask;
649 		pdev->dev.coherent_dma_mask = pdevinfo->dma_mask;
650 	}
651 
652 	ret = platform_device_add_resources(pdev,
653 			pdevinfo->res, pdevinfo->num_res);
654 	if (ret)
655 		goto err;
656 
657 	ret = platform_device_add_data(pdev,
658 			pdevinfo->data, pdevinfo->size_data);
659 	if (ret)
660 		goto err;
661 
662 	if (pdevinfo->properties) {
663 		ret = platform_device_add_properties(pdev,
664 						     pdevinfo->properties);
665 		if (ret)
666 			goto err;
667 	}
668 
669 	ret = platform_device_add(pdev);
670 	if (ret) {
671 err:
672 		ACPI_COMPANION_SET(&pdev->dev, NULL);
673 		kfree(pdev->dev.dma_mask);
674 		platform_device_put(pdev);
675 		return ERR_PTR(ret);
676 	}
677 
678 	return pdev;
679 }
680 EXPORT_SYMBOL_GPL(platform_device_register_full);
681 
platform_drv_probe(struct device * _dev)682 static int platform_drv_probe(struct device *_dev)
683 {
684 	struct platform_driver *drv = to_platform_driver(_dev->driver);
685 	struct platform_device *dev = to_platform_device(_dev);
686 	int ret;
687 
688 	ret = of_clk_set_defaults(_dev->of_node, false);
689 	if (ret < 0)
690 		return ret;
691 
692 	ret = dev_pm_domain_attach(_dev, true);
693 	if (ret)
694 		goto out;
695 
696 	if (drv->probe) {
697 		ret = drv->probe(dev);
698 		if (ret)
699 			dev_pm_domain_detach(_dev, true);
700 	}
701 
702 out:
703 	if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
704 		dev_warn(_dev, "probe deferral not supported\n");
705 		ret = -ENXIO;
706 	}
707 
708 	return ret;
709 }
710 
platform_drv_probe_fail(struct device * _dev)711 static int platform_drv_probe_fail(struct device *_dev)
712 {
713 	return -ENXIO;
714 }
715 
platform_drv_remove(struct device * _dev)716 static int platform_drv_remove(struct device *_dev)
717 {
718 	struct platform_driver *drv = to_platform_driver(_dev->driver);
719 	struct platform_device *dev = to_platform_device(_dev);
720 	int ret = 0;
721 
722 	if (drv->remove)
723 		ret = drv->remove(dev);
724 	dev_pm_domain_detach(_dev, true);
725 
726 	return ret;
727 }
728 
platform_drv_shutdown(struct device * _dev)729 static void platform_drv_shutdown(struct device *_dev)
730 {
731 	struct platform_driver *drv = to_platform_driver(_dev->driver);
732 	struct platform_device *dev = to_platform_device(_dev);
733 
734 	if (drv->shutdown)
735 		drv->shutdown(dev);
736 }
737 
738 /**
739  * __platform_driver_register - register a driver for platform-level devices
740  * @drv: platform driver structure
741  * @owner: owning module/driver
742  */
__platform_driver_register(struct platform_driver * drv,struct module * owner)743 int __platform_driver_register(struct platform_driver *drv,
744 				struct module *owner)
745 {
746 	drv->driver.owner = owner;
747 	drv->driver.bus = &platform_bus_type;
748 	drv->driver.probe = platform_drv_probe;
749 	drv->driver.remove = platform_drv_remove;
750 	drv->driver.shutdown = platform_drv_shutdown;
751 
752 	return driver_register(&drv->driver);
753 }
754 EXPORT_SYMBOL_GPL(__platform_driver_register);
755 
756 /**
757  * platform_driver_unregister - unregister a driver for platform-level devices
758  * @drv: platform driver structure
759  */
platform_driver_unregister(struct platform_driver * drv)760 void platform_driver_unregister(struct platform_driver *drv)
761 {
762 	driver_unregister(&drv->driver);
763 }
764 EXPORT_SYMBOL_GPL(platform_driver_unregister);
765 
766 /**
767  * __platform_driver_probe - register driver for non-hotpluggable device
768  * @drv: platform driver structure
769  * @probe: the driver probe routine, probably from an __init section
770  * @module: module which will be the owner of the driver
771  *
772  * Use this instead of platform_driver_register() when you know the device
773  * is not hotpluggable and has already been registered, and you want to
774  * remove its run-once probe() infrastructure from memory after the driver
775  * has bound to the device.
776  *
777  * One typical use for this would be with drivers for controllers integrated
778  * into system-on-chip processors, where the controller devices have been
779  * configured as part of board setup.
780  *
781  * Note that this is incompatible with deferred probing.
782  *
783  * Returns zero if the driver registered and bound to a device, else returns
784  * a negative error code and with the driver not registered.
785  */
__platform_driver_probe(struct platform_driver * drv,int (* probe)(struct platform_device *),struct module * module)786 int __init_or_module __platform_driver_probe(struct platform_driver *drv,
787 		int (*probe)(struct platform_device *), struct module *module)
788 {
789 	int retval, code;
790 
791 	if (drv->driver.probe_type == PROBE_PREFER_ASYNCHRONOUS) {
792 		pr_err("%s: drivers registered with %s can not be probed asynchronously\n",
793 			 drv->driver.name, __func__);
794 		return -EINVAL;
795 	}
796 
797 	/*
798 	 * We have to run our probes synchronously because we check if
799 	 * we find any devices to bind to and exit with error if there
800 	 * are any.
801 	 */
802 	drv->driver.probe_type = PROBE_FORCE_SYNCHRONOUS;
803 
804 	/*
805 	 * Prevent driver from requesting probe deferral to avoid further
806 	 * futile probe attempts.
807 	 */
808 	drv->prevent_deferred_probe = true;
809 
810 	/* make sure driver won't have bind/unbind attributes */
811 	drv->driver.suppress_bind_attrs = true;
812 
813 	/* temporary section violation during probe() */
814 	drv->probe = probe;
815 	retval = code = __platform_driver_register(drv, module);
816 
817 	/*
818 	 * Fixup that section violation, being paranoid about code scanning
819 	 * the list of drivers in order to probe new devices.  Check to see
820 	 * if the probe was successful, and make sure any forced probes of
821 	 * new devices fail.
822 	 */
823 	spin_lock(&drv->driver.bus->p->klist_drivers.k_lock);
824 	drv->probe = NULL;
825 	if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list))
826 		retval = -ENODEV;
827 	drv->driver.probe = platform_drv_probe_fail;
828 	spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock);
829 
830 	if (code != retval)
831 		platform_driver_unregister(drv);
832 	return retval;
833 }
834 EXPORT_SYMBOL_GPL(__platform_driver_probe);
835 
836 /**
837  * __platform_create_bundle - register driver and create corresponding device
838  * @driver: platform driver structure
839  * @probe: the driver probe routine, probably from an __init section
840  * @res: set of resources that needs to be allocated for the device
841  * @n_res: number of resources
842  * @data: platform specific data for this platform device
843  * @size: size of platform specific data
844  * @module: module which will be the owner of the driver
845  *
846  * Use this in legacy-style modules that probe hardware directly and
847  * register a single platform device and corresponding platform driver.
848  *
849  * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
850  */
__platform_create_bundle(struct platform_driver * driver,int (* probe)(struct platform_device *),struct resource * res,unsigned int n_res,const void * data,size_t size,struct module * module)851 struct platform_device * __init_or_module __platform_create_bundle(
852 			struct platform_driver *driver,
853 			int (*probe)(struct platform_device *),
854 			struct resource *res, unsigned int n_res,
855 			const void *data, size_t size, struct module *module)
856 {
857 	struct platform_device *pdev;
858 	int error;
859 
860 	pdev = platform_device_alloc(driver->driver.name, -1);
861 	if (!pdev) {
862 		error = -ENOMEM;
863 		goto err_out;
864 	}
865 
866 	error = platform_device_add_resources(pdev, res, n_res);
867 	if (error)
868 		goto err_pdev_put;
869 
870 	error = platform_device_add_data(pdev, data, size);
871 	if (error)
872 		goto err_pdev_put;
873 
874 	error = platform_device_add(pdev);
875 	if (error)
876 		goto err_pdev_put;
877 
878 	error = __platform_driver_probe(driver, probe, module);
879 	if (error)
880 		goto err_pdev_del;
881 
882 	return pdev;
883 
884 err_pdev_del:
885 	platform_device_del(pdev);
886 err_pdev_put:
887 	platform_device_put(pdev);
888 err_out:
889 	return ERR_PTR(error);
890 }
891 EXPORT_SYMBOL_GPL(__platform_create_bundle);
892 
893 /**
894  * __platform_register_drivers - register an array of platform drivers
895  * @drivers: an array of drivers to register
896  * @count: the number of drivers to register
897  * @owner: module owning the drivers
898  *
899  * Registers platform drivers specified by an array. On failure to register a
900  * driver, all previously registered drivers will be unregistered. Callers of
901  * this API should use platform_unregister_drivers() to unregister drivers in
902  * the reverse order.
903  *
904  * Returns: 0 on success or a negative error code on failure.
905  */
__platform_register_drivers(struct platform_driver * const * drivers,unsigned int count,struct module * owner)906 int __platform_register_drivers(struct platform_driver * const *drivers,
907 				unsigned int count, struct module *owner)
908 {
909 	unsigned int i;
910 	int err;
911 
912 	for (i = 0; i < count; i++) {
913 		pr_debug("registering platform driver %ps\n", drivers[i]);
914 
915 		err = __platform_driver_register(drivers[i], owner);
916 		if (err < 0) {
917 			pr_err("failed to register platform driver %ps: %d\n",
918 			       drivers[i], err);
919 			goto error;
920 		}
921 	}
922 
923 	return 0;
924 
925 error:
926 	while (i--) {
927 		pr_debug("unregistering platform driver %ps\n", drivers[i]);
928 		platform_driver_unregister(drivers[i]);
929 	}
930 
931 	return err;
932 }
933 EXPORT_SYMBOL_GPL(__platform_register_drivers);
934 
935 /**
936  * platform_unregister_drivers - unregister an array of platform drivers
937  * @drivers: an array of drivers to unregister
938  * @count: the number of drivers to unregister
939  *
940  * Unegisters platform drivers specified by an array. This is typically used
941  * to complement an earlier call to platform_register_drivers(). Drivers are
942  * unregistered in the reverse order in which they were registered.
943  */
platform_unregister_drivers(struct platform_driver * const * drivers,unsigned int count)944 void platform_unregister_drivers(struct platform_driver * const *drivers,
945 				 unsigned int count)
946 {
947 	while (count--) {
948 		pr_debug("unregistering platform driver %ps\n", drivers[count]);
949 		platform_driver_unregister(drivers[count]);
950 	}
951 }
952 EXPORT_SYMBOL_GPL(platform_unregister_drivers);
953 
954 /* modalias support enables more hands-off userspace setup:
955  * (a) environment variable lets new-style hotplug events work once system is
956  *     fully running:  "modprobe $MODALIAS"
957  * (b) sysfs attribute lets new-style coldplug recover from hotplug events
958  *     mishandled before system is fully running:  "modprobe $(cat modalias)"
959  */
modalias_show(struct device * dev,struct device_attribute * a,char * buf)960 static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
961 			     char *buf)
962 {
963 	struct platform_device	*pdev = to_platform_device(dev);
964 	int len;
965 
966 	len = of_device_modalias(dev, buf, PAGE_SIZE);
967 	if (len != -ENODEV)
968 		return len;
969 
970 	len = acpi_device_modalias(dev, buf, PAGE_SIZE -1);
971 	if (len != -ENODEV)
972 		return len;
973 
974 	len = snprintf(buf, PAGE_SIZE, "platform:%s\n", pdev->name);
975 
976 	return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
977 }
978 static DEVICE_ATTR_RO(modalias);
979 
driver_override_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)980 static ssize_t driver_override_store(struct device *dev,
981 				     struct device_attribute *attr,
982 				     const char *buf, size_t count)
983 {
984 	struct platform_device *pdev = to_platform_device(dev);
985 	char *driver_override, *old, *cp;
986 
987 	/* We need to keep extra room for a newline */
988 	if (count >= (PAGE_SIZE - 1))
989 		return -EINVAL;
990 
991 	driver_override = kstrndup(buf, count, GFP_KERNEL);
992 	if (!driver_override)
993 		return -ENOMEM;
994 
995 	cp = strchr(driver_override, '\n');
996 	if (cp)
997 		*cp = '\0';
998 
999 	device_lock(dev);
1000 	old = pdev->driver_override;
1001 	if (strlen(driver_override)) {
1002 		pdev->driver_override = driver_override;
1003 	} else {
1004 		kfree(driver_override);
1005 		pdev->driver_override = NULL;
1006 	}
1007 	device_unlock(dev);
1008 
1009 	kfree(old);
1010 
1011 	return count;
1012 }
1013 
driver_override_show(struct device * dev,struct device_attribute * attr,char * buf)1014 static ssize_t driver_override_show(struct device *dev,
1015 				    struct device_attribute *attr, char *buf)
1016 {
1017 	struct platform_device *pdev = to_platform_device(dev);
1018 	ssize_t len;
1019 
1020 	device_lock(dev);
1021 	len = sprintf(buf, "%s\n", pdev->driver_override);
1022 	device_unlock(dev);
1023 	return len;
1024 }
1025 static DEVICE_ATTR_RW(driver_override);
1026 
1027 
1028 static struct attribute *platform_dev_attrs[] = {
1029 	&dev_attr_modalias.attr,
1030 	&dev_attr_driver_override.attr,
1031 	NULL,
1032 };
1033 ATTRIBUTE_GROUPS(platform_dev);
1034 
platform_uevent(struct device * dev,struct kobj_uevent_env * env)1035 static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
1036 {
1037 	struct platform_device	*pdev = to_platform_device(dev);
1038 	int rc;
1039 
1040 	/* Some devices have extra OF data and an OF-style MODALIAS */
1041 	rc = of_device_uevent_modalias(dev, env);
1042 	if (rc != -ENODEV)
1043 		return rc;
1044 
1045 	rc = acpi_device_uevent_modalias(dev, env);
1046 	if (rc != -ENODEV)
1047 		return rc;
1048 
1049 	add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX,
1050 			pdev->name);
1051 	return 0;
1052 }
1053 
platform_match_id(const struct platform_device_id * id,struct platform_device * pdev)1054 static const struct platform_device_id *platform_match_id(
1055 			const struct platform_device_id *id,
1056 			struct platform_device *pdev)
1057 {
1058 	while (id->name[0]) {
1059 		if (strcmp(pdev->name, id->name) == 0) {
1060 			pdev->id_entry = id;
1061 			return id;
1062 		}
1063 		id++;
1064 	}
1065 	return NULL;
1066 }
1067 
1068 /**
1069  * platform_match - bind platform device to platform driver.
1070  * @dev: device.
1071  * @drv: driver.
1072  *
1073  * Platform device IDs are assumed to be encoded like this:
1074  * "<name><instance>", where <name> is a short description of the type of
1075  * device, like "pci" or "floppy", and <instance> is the enumerated
1076  * instance of the device, like '0' or '42'.  Driver IDs are simply
1077  * "<name>".  So, extract the <name> from the platform_device structure,
1078  * and compare it against the name of the driver. Return whether they match
1079  * or not.
1080  */
platform_match(struct device * dev,struct device_driver * drv)1081 static int platform_match(struct device *dev, struct device_driver *drv)
1082 {
1083 	struct platform_device *pdev = to_platform_device(dev);
1084 	struct platform_driver *pdrv = to_platform_driver(drv);
1085 
1086 	/* When driver_override is set, only bind to the matching driver */
1087 	if (pdev->driver_override)
1088 		return !strcmp(pdev->driver_override, drv->name);
1089 
1090 	/* Attempt an OF style match first */
1091 	if (of_driver_match_device(dev, drv))
1092 		return 1;
1093 
1094 	/* Then try ACPI style match */
1095 	if (acpi_driver_match_device(dev, drv))
1096 		return 1;
1097 
1098 	/* Then try to match against the id table */
1099 	if (pdrv->id_table)
1100 		return platform_match_id(pdrv->id_table, pdev) != NULL;
1101 
1102 	/* fall-back to driver name match */
1103 	return (strcmp(pdev->name, drv->name) == 0);
1104 }
1105 
1106 #ifdef CONFIG_PM_SLEEP
1107 
platform_legacy_suspend(struct device * dev,pm_message_t mesg)1108 static int platform_legacy_suspend(struct device *dev, pm_message_t mesg)
1109 {
1110 	struct platform_driver *pdrv = to_platform_driver(dev->driver);
1111 	struct platform_device *pdev = to_platform_device(dev);
1112 	int ret = 0;
1113 
1114 	if (dev->driver && pdrv->suspend)
1115 		ret = pdrv->suspend(pdev, mesg);
1116 
1117 	return ret;
1118 }
1119 
platform_legacy_resume(struct device * dev)1120 static int platform_legacy_resume(struct device *dev)
1121 {
1122 	struct platform_driver *pdrv = to_platform_driver(dev->driver);
1123 	struct platform_device *pdev = to_platform_device(dev);
1124 	int ret = 0;
1125 
1126 	if (dev->driver && pdrv->resume)
1127 		ret = pdrv->resume(pdev);
1128 
1129 	return ret;
1130 }
1131 
1132 #endif /* CONFIG_PM_SLEEP */
1133 
1134 #ifdef CONFIG_SUSPEND
1135 
platform_pm_suspend(struct device * dev)1136 int platform_pm_suspend(struct device *dev)
1137 {
1138 	struct device_driver *drv = dev->driver;
1139 	int ret = 0;
1140 
1141 	if (!drv)
1142 		return 0;
1143 
1144 	if (drv->pm) {
1145 		if (drv->pm->suspend)
1146 			ret = drv->pm->suspend(dev);
1147 	} else {
1148 		ret = platform_legacy_suspend(dev, PMSG_SUSPEND);
1149 	}
1150 
1151 	return ret;
1152 }
1153 
platform_pm_resume(struct device * dev)1154 int platform_pm_resume(struct device *dev)
1155 {
1156 	struct device_driver *drv = dev->driver;
1157 	int ret = 0;
1158 
1159 	if (!drv)
1160 		return 0;
1161 
1162 	if (drv->pm) {
1163 		if (drv->pm->resume)
1164 			ret = drv->pm->resume(dev);
1165 	} else {
1166 		ret = platform_legacy_resume(dev);
1167 	}
1168 
1169 	return ret;
1170 }
1171 
1172 #endif /* CONFIG_SUSPEND */
1173 
1174 #ifdef CONFIG_HIBERNATE_CALLBACKS
1175 
platform_pm_freeze(struct device * dev)1176 int platform_pm_freeze(struct device *dev)
1177 {
1178 	struct device_driver *drv = dev->driver;
1179 	int ret = 0;
1180 
1181 	if (!drv)
1182 		return 0;
1183 
1184 	if (drv->pm) {
1185 		if (drv->pm->freeze)
1186 			ret = drv->pm->freeze(dev);
1187 	} else {
1188 		ret = platform_legacy_suspend(dev, PMSG_FREEZE);
1189 	}
1190 
1191 	return ret;
1192 }
1193 
platform_pm_thaw(struct device * dev)1194 int platform_pm_thaw(struct device *dev)
1195 {
1196 	struct device_driver *drv = dev->driver;
1197 	int ret = 0;
1198 
1199 	if (!drv)
1200 		return 0;
1201 
1202 	if (drv->pm) {
1203 		if (drv->pm->thaw)
1204 			ret = drv->pm->thaw(dev);
1205 	} else {
1206 		ret = platform_legacy_resume(dev);
1207 	}
1208 
1209 	return ret;
1210 }
1211 
platform_pm_poweroff(struct device * dev)1212 int platform_pm_poweroff(struct device *dev)
1213 {
1214 	struct device_driver *drv = dev->driver;
1215 	int ret = 0;
1216 
1217 	if (!drv)
1218 		return 0;
1219 
1220 	if (drv->pm) {
1221 		if (drv->pm->poweroff)
1222 			ret = drv->pm->poweroff(dev);
1223 	} else {
1224 		ret = platform_legacy_suspend(dev, PMSG_HIBERNATE);
1225 	}
1226 
1227 	return ret;
1228 }
1229 
platform_pm_restore(struct device * dev)1230 int platform_pm_restore(struct device *dev)
1231 {
1232 	struct device_driver *drv = dev->driver;
1233 	int ret = 0;
1234 
1235 	if (!drv)
1236 		return 0;
1237 
1238 	if (drv->pm) {
1239 		if (drv->pm->restore)
1240 			ret = drv->pm->restore(dev);
1241 	} else {
1242 		ret = platform_legacy_resume(dev);
1243 	}
1244 
1245 	return ret;
1246 }
1247 
1248 #endif /* CONFIG_HIBERNATE_CALLBACKS */
1249 
platform_dma_configure(struct device * dev)1250 int platform_dma_configure(struct device *dev)
1251 {
1252 	enum dev_dma_attr attr;
1253 	int ret = 0;
1254 
1255 	if (dev->of_node) {
1256 		ret = of_dma_configure(dev, dev->of_node, true);
1257 	} else if (has_acpi_companion(dev)) {
1258 		attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode));
1259 		ret = acpi_dma_configure(dev, attr);
1260 	}
1261 
1262 	return ret;
1263 }
1264 
1265 static const struct dev_pm_ops platform_dev_pm_ops = {
1266 	.runtime_suspend = pm_generic_runtime_suspend,
1267 	.runtime_resume = pm_generic_runtime_resume,
1268 	USE_PLATFORM_PM_SLEEP_OPS
1269 };
1270 
1271 struct bus_type platform_bus_type = {
1272 	.name		= "platform",
1273 	.dev_groups	= platform_dev_groups,
1274 	.match		= platform_match,
1275 	.uevent		= platform_uevent,
1276 	.dma_configure	= platform_dma_configure,
1277 	.pm		= &platform_dev_pm_ops,
1278 };
1279 EXPORT_SYMBOL_GPL(platform_bus_type);
1280 
1281 /**
1282  * platform_find_device_by_driver - Find a platform device with a given
1283  * driver.
1284  * @start: The device to start the search from.
1285  * @drv: The device driver to look for.
1286  */
platform_find_device_by_driver(struct device * start,const struct device_driver * drv)1287 struct device *platform_find_device_by_driver(struct device *start,
1288 					      const struct device_driver *drv)
1289 {
1290 	return bus_find_device(&platform_bus_type, start, drv,
1291 			       (void *)platform_match);
1292 }
1293 EXPORT_SYMBOL_GPL(platform_find_device_by_driver);
1294 
platform_bus_init(void)1295 int __init platform_bus_init(void)
1296 {
1297 	int error;
1298 
1299 	early_platform_cleanup();
1300 
1301 	error = device_register(&platform_bus);
1302 	if (error) {
1303 		put_device(&platform_bus);
1304 		return error;
1305 	}
1306 	error =  bus_register(&platform_bus_type);
1307 	if (error)
1308 		device_unregister(&platform_bus);
1309 	of_platform_register_reconfig_notifier();
1310 	return error;
1311 }
1312 
1313 static __initdata LIST_HEAD(early_platform_driver_list);
1314 static __initdata LIST_HEAD(early_platform_device_list);
1315 
1316 /**
1317  * early_platform_driver_register - register early platform driver
1318  * @epdrv: early_platform driver structure
1319  * @buf: string passed from early_param()
1320  *
1321  * Helper function for early_platform_init() / early_platform_init_buffer()
1322  */
early_platform_driver_register(struct early_platform_driver * epdrv,char * buf)1323 int __init early_platform_driver_register(struct early_platform_driver *epdrv,
1324 					  char *buf)
1325 {
1326 	char *tmp;
1327 	int n;
1328 
1329 	/* Simply add the driver to the end of the global list.
1330 	 * Drivers will by default be put on the list in compiled-in order.
1331 	 */
1332 	if (!epdrv->list.next) {
1333 		INIT_LIST_HEAD(&epdrv->list);
1334 		list_add_tail(&epdrv->list, &early_platform_driver_list);
1335 	}
1336 
1337 	/* If the user has specified device then make sure the driver
1338 	 * gets prioritized. The driver of the last device specified on
1339 	 * command line will be put first on the list.
1340 	 */
1341 	n = strlen(epdrv->pdrv->driver.name);
1342 	if (buf && !strncmp(buf, epdrv->pdrv->driver.name, n)) {
1343 		list_move(&epdrv->list, &early_platform_driver_list);
1344 
1345 		/* Allow passing parameters after device name */
1346 		if (buf[n] == '\0' || buf[n] == ',')
1347 			epdrv->requested_id = -1;
1348 		else {
1349 			epdrv->requested_id = simple_strtoul(&buf[n + 1],
1350 							     &tmp, 10);
1351 
1352 			if (buf[n] != '.' || (tmp == &buf[n + 1])) {
1353 				epdrv->requested_id = EARLY_PLATFORM_ID_ERROR;
1354 				n = 0;
1355 			} else
1356 				n += strcspn(&buf[n + 1], ",") + 1;
1357 		}
1358 
1359 		if (buf[n] == ',')
1360 			n++;
1361 
1362 		if (epdrv->bufsize) {
1363 			memcpy(epdrv->buffer, &buf[n],
1364 			       min_t(int, epdrv->bufsize, strlen(&buf[n]) + 1));
1365 			epdrv->buffer[epdrv->bufsize - 1] = '\0';
1366 		}
1367 	}
1368 
1369 	return 0;
1370 }
1371 
1372 /**
1373  * early_platform_add_devices - adds a number of early platform devices
1374  * @devs: array of early platform devices to add
1375  * @num: number of early platform devices in array
1376  *
1377  * Used by early architecture code to register early platform devices and
1378  * their platform data.
1379  */
early_platform_add_devices(struct platform_device ** devs,int num)1380 void __init early_platform_add_devices(struct platform_device **devs, int num)
1381 {
1382 	struct device *dev;
1383 	int i;
1384 
1385 	/* simply add the devices to list */
1386 	for (i = 0; i < num; i++) {
1387 		dev = &devs[i]->dev;
1388 
1389 		if (!dev->devres_head.next) {
1390 			pm_runtime_early_init(dev);
1391 			INIT_LIST_HEAD(&dev->devres_head);
1392 			list_add_tail(&dev->devres_head,
1393 				      &early_platform_device_list);
1394 		}
1395 	}
1396 }
1397 
1398 /**
1399  * early_platform_driver_register_all - register early platform drivers
1400  * @class_str: string to identify early platform driver class
1401  *
1402  * Used by architecture code to register all early platform drivers
1403  * for a certain class. If omitted then only early platform drivers
1404  * with matching kernel command line class parameters will be registered.
1405  */
early_platform_driver_register_all(char * class_str)1406 void __init early_platform_driver_register_all(char *class_str)
1407 {
1408 	/* The "class_str" parameter may or may not be present on the kernel
1409 	 * command line. If it is present then there may be more than one
1410 	 * matching parameter.
1411 	 *
1412 	 * Since we register our early platform drivers using early_param()
1413 	 * we need to make sure that they also get registered in the case
1414 	 * when the parameter is missing from the kernel command line.
1415 	 *
1416 	 * We use parse_early_options() to make sure the early_param() gets
1417 	 * called at least once. The early_param() may be called more than
1418 	 * once since the name of the preferred device may be specified on
1419 	 * the kernel command line. early_platform_driver_register() handles
1420 	 * this case for us.
1421 	 */
1422 	parse_early_options(class_str);
1423 }
1424 
1425 /**
1426  * early_platform_match - find early platform device matching driver
1427  * @epdrv: early platform driver structure
1428  * @id: id to match against
1429  */
1430 static struct platform_device * __init
early_platform_match(struct early_platform_driver * epdrv,int id)1431 early_platform_match(struct early_platform_driver *epdrv, int id)
1432 {
1433 	struct platform_device *pd;
1434 
1435 	list_for_each_entry(pd, &early_platform_device_list, dev.devres_head)
1436 		if (platform_match(&pd->dev, &epdrv->pdrv->driver))
1437 			if (pd->id == id)
1438 				return pd;
1439 
1440 	return NULL;
1441 }
1442 
1443 /**
1444  * early_platform_left - check if early platform driver has matching devices
1445  * @epdrv: early platform driver structure
1446  * @id: return true if id or above exists
1447  */
early_platform_left(struct early_platform_driver * epdrv,int id)1448 static int __init early_platform_left(struct early_platform_driver *epdrv,
1449 				       int id)
1450 {
1451 	struct platform_device *pd;
1452 
1453 	list_for_each_entry(pd, &early_platform_device_list, dev.devres_head)
1454 		if (platform_match(&pd->dev, &epdrv->pdrv->driver))
1455 			if (pd->id >= id)
1456 				return 1;
1457 
1458 	return 0;
1459 }
1460 
1461 /**
1462  * early_platform_driver_probe_id - probe drivers matching class_str and id
1463  * @class_str: string to identify early platform driver class
1464  * @id: id to match against
1465  * @nr_probe: number of platform devices to successfully probe before exiting
1466  */
early_platform_driver_probe_id(char * class_str,int id,int nr_probe)1467 static int __init early_platform_driver_probe_id(char *class_str,
1468 						 int id,
1469 						 int nr_probe)
1470 {
1471 	struct early_platform_driver *epdrv;
1472 	struct platform_device *match;
1473 	int match_id;
1474 	int n = 0;
1475 	int left = 0;
1476 
1477 	list_for_each_entry(epdrv, &early_platform_driver_list, list) {
1478 		/* only use drivers matching our class_str */
1479 		if (strcmp(class_str, epdrv->class_str))
1480 			continue;
1481 
1482 		if (id == -2) {
1483 			match_id = epdrv->requested_id;
1484 			left = 1;
1485 
1486 		} else {
1487 			match_id = id;
1488 			left += early_platform_left(epdrv, id);
1489 
1490 			/* skip requested id */
1491 			switch (epdrv->requested_id) {
1492 			case EARLY_PLATFORM_ID_ERROR:
1493 			case EARLY_PLATFORM_ID_UNSET:
1494 				break;
1495 			default:
1496 				if (epdrv->requested_id == id)
1497 					match_id = EARLY_PLATFORM_ID_UNSET;
1498 			}
1499 		}
1500 
1501 		switch (match_id) {
1502 		case EARLY_PLATFORM_ID_ERROR:
1503 			pr_warn("%s: unable to parse %s parameter\n",
1504 				class_str, epdrv->pdrv->driver.name);
1505 			/* fall-through */
1506 		case EARLY_PLATFORM_ID_UNSET:
1507 			match = NULL;
1508 			break;
1509 		default:
1510 			match = early_platform_match(epdrv, match_id);
1511 		}
1512 
1513 		if (match) {
1514 			/*
1515 			 * Set up a sensible init_name to enable
1516 			 * dev_name() and others to be used before the
1517 			 * rest of the driver core is initialized.
1518 			 */
1519 			if (!match->dev.init_name && slab_is_available()) {
1520 				if (match->id != -1)
1521 					match->dev.init_name =
1522 						kasprintf(GFP_KERNEL, "%s.%d",
1523 							  match->name,
1524 							  match->id);
1525 				else
1526 					match->dev.init_name =
1527 						kasprintf(GFP_KERNEL, "%s",
1528 							  match->name);
1529 
1530 				if (!match->dev.init_name)
1531 					return -ENOMEM;
1532 			}
1533 
1534 			if (epdrv->pdrv->probe(match))
1535 				pr_warn("%s: unable to probe %s early.\n",
1536 					class_str, match->name);
1537 			else
1538 				n++;
1539 		}
1540 
1541 		if (n >= nr_probe)
1542 			break;
1543 	}
1544 
1545 	if (left)
1546 		return n;
1547 	else
1548 		return -ENODEV;
1549 }
1550 
1551 /**
1552  * early_platform_driver_probe - probe a class of registered drivers
1553  * @class_str: string to identify early platform driver class
1554  * @nr_probe: number of platform devices to successfully probe before exiting
1555  * @user_only: only probe user specified early platform devices
1556  *
1557  * Used by architecture code to probe registered early platform drivers
1558  * within a certain class. For probe to happen a registered early platform
1559  * device matching a registered early platform driver is needed.
1560  */
early_platform_driver_probe(char * class_str,int nr_probe,int user_only)1561 int __init early_platform_driver_probe(char *class_str,
1562 				       int nr_probe,
1563 				       int user_only)
1564 {
1565 	int k, n, i;
1566 
1567 	n = 0;
1568 	for (i = -2; n < nr_probe; i++) {
1569 		k = early_platform_driver_probe_id(class_str, i, nr_probe - n);
1570 
1571 		if (k < 0)
1572 			break;
1573 
1574 		n += k;
1575 
1576 		if (user_only)
1577 			break;
1578 	}
1579 
1580 	return n;
1581 }
1582 
1583 /**
1584  * early_platform_cleanup - clean up early platform code
1585  */
early_platform_cleanup(void)1586 void __init early_platform_cleanup(void)
1587 {
1588 	struct platform_device *pd, *pd2;
1589 
1590 	/* clean up the devres list used to chain devices */
1591 	list_for_each_entry_safe(pd, pd2, &early_platform_device_list,
1592 				 dev.devres_head) {
1593 		list_del(&pd->dev.devres_head);
1594 		memset(&pd->dev.devres_head, 0, sizeof(pd->dev.devres_head));
1595 	}
1596 }
1597 
1598