1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * device.h - generic, centralized driver model
4 *
5 * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
6 * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
7 * Copyright (c) 2008-2009 Novell Inc.
8 *
9 * See Documentation/driver-api/driver-model/ for more information.
10 */
11
12 #ifndef _DEVICE_H_
13 #define _DEVICE_H_
14
15 #include <linux/dev_printk.h>
16 #include <linux/energy_model.h>
17 #include <linux/ioport.h>
18 #include <linux/kobject.h>
19 #include <linux/klist.h>
20 #include <linux/list.h>
21 #include <linux/lockdep.h>
22 #include <linux/compiler.h>
23 #include <linux/types.h>
24 #include <linux/mutex.h>
25 #include <linux/pm.h>
26 #include <linux/atomic.h>
27 #include <linux/uidgid.h>
28 #include <linux/gfp.h>
29 #include <linux/overflow.h>
30 #include <linux/device/bus.h>
31 #include <linux/device/class.h>
32 #include <linux/device/driver.h>
33 #include <asm/device.h>
34
35 struct device;
36 struct device_private;
37 struct device_driver;
38 struct driver_private;
39 struct module;
40 struct class;
41 struct subsys_private;
42 struct device_node;
43 struct fwnode_handle;
44 struct iommu_ops;
45 struct iommu_group;
46 struct dev_pin_info;
47 struct dev_iommu;
48
49 /**
50 * struct subsys_interface - interfaces to device functions
51 * @name: name of the device function
52 * @subsys: subsytem of the devices to attach to
53 * @node: the list of functions registered at the subsystem
54 * @add_dev: device hookup to device function handler
55 * @remove_dev: device hookup to device function handler
56 *
57 * Simple interfaces attached to a subsystem. Multiple interfaces can
58 * attach to a subsystem and its devices. Unlike drivers, they do not
59 * exclusively claim or control devices. Interfaces usually represent
60 * a specific functionality of a subsystem/class of devices.
61 */
62 struct subsys_interface {
63 const char *name;
64 struct bus_type *subsys;
65 struct list_head node;
66 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
67 void (*remove_dev)(struct device *dev, struct subsys_interface *sif);
68 };
69
70 int subsys_interface_register(struct subsys_interface *sif);
71 void subsys_interface_unregister(struct subsys_interface *sif);
72
73 int subsys_system_register(struct bus_type *subsys,
74 const struct attribute_group **groups);
75 int subsys_virtual_register(struct bus_type *subsys,
76 const struct attribute_group **groups);
77
78 /*
79 * The type of device, "struct device" is embedded in. A class
80 * or bus can contain devices of different types
81 * like "partitions" and "disks", "mouse" and "event".
82 * This identifies the device type and carries type-specific
83 * information, equivalent to the kobj_type of a kobject.
84 * If "name" is specified, the uevent will contain it in
85 * the DEVTYPE variable.
86 */
87 struct device_type {
88 const char *name;
89 const struct attribute_group **groups;
90 int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
91 char *(*devnode)(struct device *dev, umode_t *mode,
92 kuid_t *uid, kgid_t *gid);
93 void (*release)(struct device *dev);
94
95 const struct dev_pm_ops *pm;
96 };
97
98 /* interface for exporting device attributes */
99 struct device_attribute {
100 struct attribute attr;
101 ssize_t (*show)(struct device *dev, struct device_attribute *attr,
102 char *buf);
103 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
104 const char *buf, size_t count);
105 };
106
107 struct dev_ext_attribute {
108 struct device_attribute attr;
109 void *var;
110 };
111
112 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
113 char *buf);
114 ssize_t device_store_ulong(struct device *dev, struct device_attribute *attr,
115 const char *buf, size_t count);
116 ssize_t device_show_int(struct device *dev, struct device_attribute *attr,
117 char *buf);
118 ssize_t device_store_int(struct device *dev, struct device_attribute *attr,
119 const char *buf, size_t count);
120 ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
121 char *buf);
122 ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
123 const char *buf, size_t count);
124
125 #define DEVICE_ATTR(_name, _mode, _show, _store) \
126 struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
127 #define DEVICE_ATTR_PREALLOC(_name, _mode, _show, _store) \
128 struct device_attribute dev_attr_##_name = \
129 __ATTR_PREALLOC(_name, _mode, _show, _store)
130 #define DEVICE_ATTR_RW(_name) \
131 struct device_attribute dev_attr_##_name = __ATTR_RW(_name)
132 #define DEVICE_ATTR_ADMIN_RW(_name) \
133 struct device_attribute dev_attr_##_name = __ATTR_RW_MODE(_name, 0600)
134 #define DEVICE_ATTR_RO(_name) \
135 struct device_attribute dev_attr_##_name = __ATTR_RO(_name)
136 #define DEVICE_ATTR_ADMIN_RO(_name) \
137 struct device_attribute dev_attr_##_name = __ATTR_RO_MODE(_name, 0400)
138 #define DEVICE_ATTR_WO(_name) \
139 struct device_attribute dev_attr_##_name = __ATTR_WO(_name)
140 #define DEVICE_ULONG_ATTR(_name, _mode, _var) \
141 struct dev_ext_attribute dev_attr_##_name = \
142 { __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) }
143 #define DEVICE_INT_ATTR(_name, _mode, _var) \
144 struct dev_ext_attribute dev_attr_##_name = \
145 { __ATTR(_name, _mode, device_show_int, device_store_int), &(_var) }
146 #define DEVICE_BOOL_ATTR(_name, _mode, _var) \
147 struct dev_ext_attribute dev_attr_##_name = \
148 { __ATTR(_name, _mode, device_show_bool, device_store_bool), &(_var) }
149 #define DEVICE_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
150 struct device_attribute dev_attr_##_name = \
151 __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
152
153 int device_create_file(struct device *device,
154 const struct device_attribute *entry);
155 void device_remove_file(struct device *dev,
156 const struct device_attribute *attr);
157 bool device_remove_file_self(struct device *dev,
158 const struct device_attribute *attr);
159 int __must_check device_create_bin_file(struct device *dev,
160 const struct bin_attribute *attr);
161 void device_remove_bin_file(struct device *dev,
162 const struct bin_attribute *attr);
163
164 /* device resource management */
165 typedef void (*dr_release_t)(struct device *dev, void *res);
166 typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data);
167
168 #ifdef CONFIG_DEBUG_DEVRES
169 void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
170 int nid, const char *name) __malloc;
171 #define devres_alloc(release, size, gfp) \
172 __devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release)
173 #define devres_alloc_node(release, size, gfp, nid) \
174 __devres_alloc_node(release, size, gfp, nid, #release)
175 #else
176 void *devres_alloc_node(dr_release_t release, size_t size,
177 gfp_t gfp, int nid) __malloc;
devres_alloc(dr_release_t release,size_t size,gfp_t gfp)178 static inline void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
179 {
180 return devres_alloc_node(release, size, gfp, NUMA_NO_NODE);
181 }
182 #endif
183
184 void devres_for_each_res(struct device *dev, dr_release_t release,
185 dr_match_t match, void *match_data,
186 void (*fn)(struct device *, void *, void *),
187 void *data);
188 void devres_free(void *res);
189 void devres_add(struct device *dev, void *res);
190 void *devres_find(struct device *dev, dr_release_t release,
191 dr_match_t match, void *match_data);
192 void *devres_get(struct device *dev, void *new_res,
193 dr_match_t match, void *match_data);
194 void *devres_remove(struct device *dev, dr_release_t release,
195 dr_match_t match, void *match_data);
196 int devres_destroy(struct device *dev, dr_release_t release,
197 dr_match_t match, void *match_data);
198 int devres_release(struct device *dev, dr_release_t release,
199 dr_match_t match, void *match_data);
200
201 /* devres group */
202 void * __must_check devres_open_group(struct device *dev, void *id, gfp_t gfp);
203 void devres_close_group(struct device *dev, void *id);
204 void devres_remove_group(struct device *dev, void *id);
205 int devres_release_group(struct device *dev, void *id);
206
207 /* managed devm_k.alloc/kfree for device drivers */
208 void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __malloc;
209 void *devm_krealloc(struct device *dev, void *ptr, size_t size,
210 gfp_t gfp) __must_check;
211 __printf(3, 0) char *devm_kvasprintf(struct device *dev, gfp_t gfp,
212 const char *fmt, va_list ap) __malloc;
213 __printf(3, 4) char *devm_kasprintf(struct device *dev, gfp_t gfp,
214 const char *fmt, ...) __malloc;
devm_kzalloc(struct device * dev,size_t size,gfp_t gfp)215 static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
216 {
217 return devm_kmalloc(dev, size, gfp | __GFP_ZERO);
218 }
devm_kmalloc_array(struct device * dev,size_t n,size_t size,gfp_t flags)219 static inline void *devm_kmalloc_array(struct device *dev,
220 size_t n, size_t size, gfp_t flags)
221 {
222 size_t bytes;
223
224 if (unlikely(check_mul_overflow(n, size, &bytes)))
225 return NULL;
226
227 return devm_kmalloc(dev, bytes, flags);
228 }
devm_kcalloc(struct device * dev,size_t n,size_t size,gfp_t flags)229 static inline void *devm_kcalloc(struct device *dev,
230 size_t n, size_t size, gfp_t flags)
231 {
232 return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
233 }
234 void devm_kfree(struct device *dev, const void *p);
235 char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc;
236 const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp);
237 void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp);
238
239 unsigned long devm_get_free_pages(struct device *dev,
240 gfp_t gfp_mask, unsigned int order);
241 void devm_free_pages(struct device *dev, unsigned long addr);
242
243 void __iomem *devm_ioremap_resource(struct device *dev,
244 const struct resource *res);
245 void __iomem *devm_ioremap_resource_wc(struct device *dev,
246 const struct resource *res);
247
248 void __iomem *devm_of_iomap(struct device *dev,
249 struct device_node *node, int index,
250 resource_size_t *size);
251
252 /* allows to add/remove a custom action to devres stack */
253 int devm_add_action(struct device *dev, void (*action)(void *), void *data);
254 void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
255 void devm_release_action(struct device *dev, void (*action)(void *), void *data);
256
devm_add_action_or_reset(struct device * dev,void (* action)(void *),void * data)257 static inline int devm_add_action_or_reset(struct device *dev,
258 void (*action)(void *), void *data)
259 {
260 int ret;
261
262 ret = devm_add_action(dev, action, data);
263 if (ret)
264 action(data);
265
266 return ret;
267 }
268
269 /**
270 * devm_alloc_percpu - Resource-managed alloc_percpu
271 * @dev: Device to allocate per-cpu memory for
272 * @type: Type to allocate per-cpu memory for
273 *
274 * Managed alloc_percpu. Per-cpu memory allocated with this function is
275 * automatically freed on driver detach.
276 *
277 * RETURNS:
278 * Pointer to allocated memory on success, NULL on failure.
279 */
280 #define devm_alloc_percpu(dev, type) \
281 ((typeof(type) __percpu *)__devm_alloc_percpu((dev), sizeof(type), \
282 __alignof__(type)))
283
284 void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
285 size_t align);
286 void devm_free_percpu(struct device *dev, void __percpu *pdata);
287
288 struct device_dma_parameters {
289 /*
290 * a low level driver may set these to teach IOMMU code about
291 * sg limitations.
292 */
293 unsigned int max_segment_size;
294 unsigned long segment_boundary_mask;
295 };
296
297 /**
298 * enum device_link_state - Device link states.
299 * @DL_STATE_NONE: The presence of the drivers is not being tracked.
300 * @DL_STATE_DORMANT: None of the supplier/consumer drivers is present.
301 * @DL_STATE_AVAILABLE: The supplier driver is present, but the consumer is not.
302 * @DL_STATE_CONSUMER_PROBE: The consumer is probing (supplier driver present).
303 * @DL_STATE_ACTIVE: Both the supplier and consumer drivers are present.
304 * @DL_STATE_SUPPLIER_UNBIND: The supplier driver is unbinding.
305 */
306 enum device_link_state {
307 DL_STATE_NONE = -1,
308 DL_STATE_DORMANT = 0,
309 DL_STATE_AVAILABLE,
310 DL_STATE_CONSUMER_PROBE,
311 DL_STATE_ACTIVE,
312 DL_STATE_SUPPLIER_UNBIND,
313 };
314
315 /*
316 * Device link flags.
317 *
318 * STATELESS: The core will not remove this link automatically.
319 * AUTOREMOVE_CONSUMER: Remove the link automatically on consumer driver unbind.
320 * PM_RUNTIME: If set, the runtime PM framework will use this link.
321 * RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation.
322 * AUTOREMOVE_SUPPLIER: Remove the link automatically on supplier driver unbind.
323 * AUTOPROBE_CONSUMER: Probe consumer driver automatically after supplier binds.
324 * MANAGED: The core tracks presence of supplier/consumer drivers (internal).
325 * SYNC_STATE_ONLY: Link only affects sync_state() behavior.
326 */
327 #define DL_FLAG_STATELESS BIT(0)
328 #define DL_FLAG_AUTOREMOVE_CONSUMER BIT(1)
329 #define DL_FLAG_PM_RUNTIME BIT(2)
330 #define DL_FLAG_RPM_ACTIVE BIT(3)
331 #define DL_FLAG_AUTOREMOVE_SUPPLIER BIT(4)
332 #define DL_FLAG_AUTOPROBE_CONSUMER BIT(5)
333 #define DL_FLAG_MANAGED BIT(6)
334 #define DL_FLAG_SYNC_STATE_ONLY BIT(7)
335
336 /**
337 * enum dl_dev_state - Device driver presence tracking information.
338 * @DL_DEV_NO_DRIVER: There is no driver attached to the device.
339 * @DL_DEV_PROBING: A driver is probing.
340 * @DL_DEV_DRIVER_BOUND: The driver has been bound to the device.
341 * @DL_DEV_UNBINDING: The driver is unbinding from the device.
342 */
343 enum dl_dev_state {
344 DL_DEV_NO_DRIVER = 0,
345 DL_DEV_PROBING,
346 DL_DEV_DRIVER_BOUND,
347 DL_DEV_UNBINDING,
348 };
349
350 /**
351 * struct dev_links_info - Device data related to device links.
352 * @suppliers: List of links to supplier devices.
353 * @consumers: List of links to consumer devices.
354 * @needs_suppliers: Hook to global list of devices waiting for suppliers.
355 * @defer_hook: Hook to global list of devices that have deferred sync_state or
356 * deferred fw_devlink.
357 * @need_for_probe: If needs_suppliers is on a list, this indicates if the
358 * suppliers are needed for probe or not.
359 * @status: Driver status information.
360 */
361 struct dev_links_info {
362 struct list_head suppliers;
363 struct list_head consumers;
364 struct list_head needs_suppliers;
365 struct list_head defer_hook;
366 bool need_for_probe;
367 enum dl_dev_state status;
368 };
369
370 /**
371 * struct device - The basic device structure
372 * @parent: The device's "parent" device, the device to which it is attached.
373 * In most cases, a parent device is some sort of bus or host
374 * controller. If parent is NULL, the device, is a top-level device,
375 * which is not usually what you want.
376 * @p: Holds the private data of the driver core portions of the device.
377 * See the comment of the struct device_private for detail.
378 * @kobj: A top-level, abstract class from which other classes are derived.
379 * @init_name: Initial name of the device.
380 * @type: The type of device.
381 * This identifies the device type and carries type-specific
382 * information.
383 * @mutex: Mutex to synchronize calls to its driver.
384 * @lockdep_mutex: An optional debug lock that a subsystem can use as a
385 * peer lock to gain localized lockdep coverage of the device_lock.
386 * @bus: Type of bus device is on.
387 * @driver: Which driver has allocated this
388 * @platform_data: Platform data specific to the device.
389 * Example: For devices on custom boards, as typical of embedded
390 * and SOC based hardware, Linux often uses platform_data to point
391 * to board-specific structures describing devices and how they
392 * are wired. That can include what ports are available, chip
393 * variants, which GPIO pins act in what additional roles, and so
394 * on. This shrinks the "Board Support Packages" (BSPs) and
395 * minimizes board-specific #ifdefs in drivers.
396 * @driver_data: Private pointer for driver specific info.
397 * @links: Links to suppliers and consumers of this device.
398 * @power: For device power management.
399 * See Documentation/driver-api/pm/devices.rst for details.
400 * @pm_domain: Provide callbacks that are executed during system suspend,
401 * hibernation, system resume and during runtime PM transitions
402 * along with subsystem-level and driver-level callbacks.
403 * @em_pd: device's energy model performance domain
404 * @pins: For device pin management.
405 * See Documentation/driver-api/pinctl.rst for details.
406 * @msi_list: Hosts MSI descriptors
407 * @msi_domain: The generic MSI domain this device is using.
408 * @numa_node: NUMA node this device is close to.
409 * @dma_ops: DMA mapping operations for this device.
410 * @dma_mask: Dma mask (if dma'ble device).
411 * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
412 * hardware supports 64-bit addresses for consistent allocations
413 * such descriptors.
414 * @bus_dma_limit: Limit of an upstream bridge or bus which imposes a smaller
415 * DMA limit than the device itself supports.
416 * @dma_range_map: map for DMA memory ranges relative to that of RAM
417 * @dma_parms: A low level driver may set these to teach IOMMU code about
418 * segment limitations.
419 * @dma_pools: Dma pools (if dma'ble device).
420 * @dma_mem: Internal for coherent mem override.
421 * @cma_area: Contiguous memory area for dma allocations
422 * @archdata: For arch-specific additions.
423 * @of_node: Associated device tree node.
424 * @fwnode: Associated device node supplied by platform firmware.
425 * @devt: For creating the sysfs "dev".
426 * @id: device instance
427 * @devres_lock: Spinlock to protect the resource of the device.
428 * @devres_head: The resources list of the device.
429 * @knode_class: The node used to add the device to the class list.
430 * @class: The class of the device.
431 * @groups: Optional attribute groups.
432 * @release: Callback to free the device after all references have
433 * gone away. This should be set by the allocator of the
434 * device (i.e. the bus driver that discovered the device).
435 * @iommu_group: IOMMU group the device belongs to.
436 * @iommu: Per device generic IOMMU runtime data
437 *
438 * @offline_disabled: If set, the device is permanently online.
439 * @offline: Set after successful invocation of bus type's .offline().
440 * @of_node_reused: Set if the device-tree node is shared with an ancestor
441 * device.
442 * @state_synced: The hardware state of this device has been synced to match
443 * the software state of this device by calling the driver/bus
444 * sync_state() callback.
445 * @dma_coherent: this particular device is dma coherent, even if the
446 * architecture supports non-coherent devices.
447 * @dma_ops_bypass: If set to %true then the dma_ops are bypassed for the
448 * streaming DMA operations (->map_* / ->unmap_* / ->sync_*),
449 * and optionall (if the coherent mask is large enough) also
450 * for dma allocations. This flag is managed by the dma ops
451 * instance from ->dma_supported.
452 *
453 * At the lowest level, every device in a Linux system is represented by an
454 * instance of struct device. The device structure contains the information
455 * that the device model core needs to model the system. Most subsystems,
456 * however, track additional information about the devices they host. As a
457 * result, it is rare for devices to be represented by bare device structures;
458 * instead, that structure, like kobject structures, is usually embedded within
459 * a higher-level representation of the device.
460 */
461 struct device {
462 struct kobject kobj;
463 struct device *parent;
464
465 struct device_private *p;
466
467 const char *init_name; /* initial name of the device */
468 const struct device_type *type;
469
470 struct bus_type *bus; /* type of bus device is on */
471 struct device_driver *driver; /* which driver has allocated this
472 device */
473 void *platform_data; /* Platform specific data, device
474 core doesn't touch it */
475 void *driver_data; /* Driver data, set and get with
476 dev_set_drvdata/dev_get_drvdata */
477 #ifdef CONFIG_PROVE_LOCKING
478 struct mutex lockdep_mutex;
479 #endif
480 struct mutex mutex; /* mutex to synchronize calls to
481 * its driver.
482 */
483
484 struct dev_links_info links;
485 struct dev_pm_info power;
486 struct dev_pm_domain *pm_domain;
487
488 #ifdef CONFIG_ENERGY_MODEL
489 struct em_perf_domain *em_pd;
490 #endif
491
492 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
493 struct irq_domain *msi_domain;
494 #endif
495 #ifdef CONFIG_PINCTRL
496 struct dev_pin_info *pins;
497 #endif
498 #ifdef CONFIG_GENERIC_MSI_IRQ
499 struct list_head msi_list;
500 #endif
501 #ifdef CONFIG_DMA_OPS
502 const struct dma_map_ops *dma_ops;
503 #endif
504 u64 *dma_mask; /* dma mask (if dma'able device) */
505 u64 coherent_dma_mask;/* Like dma_mask, but for
506 alloc_coherent mappings as
507 not all hardware supports
508 64 bit addresses for consistent
509 allocations such descriptors. */
510 u64 bus_dma_limit; /* upstream dma constraint */
511 const struct bus_dma_region *dma_range_map;
512
513 struct device_dma_parameters *dma_parms;
514
515 struct list_head dma_pools; /* dma pools (if dma'ble) */
516
517 #ifdef CONFIG_DMA_DECLARE_COHERENT
518 struct dma_coherent_mem *dma_mem; /* internal for coherent mem
519 override */
520 #endif
521 #ifdef CONFIG_DMA_CMA
522 struct cma *cma_area; /* contiguous memory area for dma
523 allocations */
524 #endif
525 /* arch specific additions */
526 struct dev_archdata archdata;
527
528 struct device_node *of_node; /* associated device tree node */
529 struct fwnode_handle *fwnode; /* firmware device node */
530
531 #ifdef CONFIG_NUMA
532 int numa_node; /* NUMA node this device is close to */
533 #endif
534 dev_t devt; /* dev_t, creates the sysfs "dev" */
535 u32 id; /* device instance */
536
537 spinlock_t devres_lock;
538 struct list_head devres_head;
539
540 struct class *class;
541 const struct attribute_group **groups; /* optional groups */
542
543 void (*release)(struct device *dev);
544 struct iommu_group *iommu_group;
545 struct dev_iommu *iommu;
546
547 bool offline_disabled:1;
548 bool offline:1;
549 bool of_node_reused:1;
550 bool state_synced:1;
551 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
552 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
553 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
554 bool dma_coherent:1;
555 #endif
556 #ifdef CONFIG_DMA_OPS_BYPASS
557 bool dma_ops_bypass : 1;
558 #endif
559 };
560
561 /**
562 * struct device_link - Device link representation.
563 * @supplier: The device on the supplier end of the link.
564 * @s_node: Hook to the supplier device's list of links to consumers.
565 * @consumer: The device on the consumer end of the link.
566 * @c_node: Hook to the consumer device's list of links to suppliers.
567 * @link_dev: device used to expose link details in sysfs
568 * @status: The state of the link (with respect to the presence of drivers).
569 * @flags: Link flags.
570 * @rpm_active: Whether or not the consumer device is runtime-PM-active.
571 * @kref: Count repeated addition of the same link.
572 * @rcu_head: An RCU head to use for deferred execution of SRCU callbacks.
573 * @supplier_preactivated: Supplier has been made active before consumer probe.
574 */
575 struct device_link {
576 struct device *supplier;
577 struct list_head s_node;
578 struct device *consumer;
579 struct list_head c_node;
580 struct device link_dev;
581 enum device_link_state status;
582 u32 flags;
583 refcount_t rpm_active;
584 struct kref kref;
585 #ifdef CONFIG_SRCU
586 struct rcu_head rcu_head;
587 #endif
588 bool supplier_preactivated; /* Owned by consumer probe. */
589 };
590
kobj_to_dev(struct kobject * kobj)591 static inline struct device *kobj_to_dev(struct kobject *kobj)
592 {
593 return container_of(kobj, struct device, kobj);
594 }
595
596 /**
597 * device_iommu_mapped - Returns true when the device DMA is translated
598 * by an IOMMU
599 * @dev: Device to perform the check on
600 */
device_iommu_mapped(struct device * dev)601 static inline bool device_iommu_mapped(struct device *dev)
602 {
603 return (dev->iommu_group != NULL);
604 }
605
606 /* Get the wakeup routines, which depend on struct device */
607 #include <linux/pm_wakeup.h>
608
dev_name(const struct device * dev)609 static inline const char *dev_name(const struct device *dev)
610 {
611 /* Use the init name until the kobject becomes available */
612 if (dev->init_name)
613 return dev->init_name;
614
615 return kobject_name(&dev->kobj);
616 }
617
618 __printf(2, 3) int dev_set_name(struct device *dev, const char *name, ...);
619
620 #ifdef CONFIG_NUMA
dev_to_node(struct device * dev)621 static inline int dev_to_node(struct device *dev)
622 {
623 return dev->numa_node;
624 }
set_dev_node(struct device * dev,int node)625 static inline void set_dev_node(struct device *dev, int node)
626 {
627 dev->numa_node = node;
628 }
629 #else
dev_to_node(struct device * dev)630 static inline int dev_to_node(struct device *dev)
631 {
632 return NUMA_NO_NODE;
633 }
set_dev_node(struct device * dev,int node)634 static inline void set_dev_node(struct device *dev, int node)
635 {
636 }
637 #endif
638
dev_get_msi_domain(const struct device * dev)639 static inline struct irq_domain *dev_get_msi_domain(const struct device *dev)
640 {
641 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
642 return dev->msi_domain;
643 #else
644 return NULL;
645 #endif
646 }
647
dev_set_msi_domain(struct device * dev,struct irq_domain * d)648 static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d)
649 {
650 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
651 dev->msi_domain = d;
652 #endif
653 }
654
dev_get_drvdata(const struct device * dev)655 static inline void *dev_get_drvdata(const struct device *dev)
656 {
657 return dev->driver_data;
658 }
659
dev_set_drvdata(struct device * dev,void * data)660 static inline void dev_set_drvdata(struct device *dev, void *data)
661 {
662 dev->driver_data = data;
663 }
664
dev_to_psd(struct device * dev)665 static inline struct pm_subsys_data *dev_to_psd(struct device *dev)
666 {
667 return dev ? dev->power.subsys_data : NULL;
668 }
669
dev_get_uevent_suppress(const struct device * dev)670 static inline unsigned int dev_get_uevent_suppress(const struct device *dev)
671 {
672 return dev->kobj.uevent_suppress;
673 }
674
dev_set_uevent_suppress(struct device * dev,int val)675 static inline void dev_set_uevent_suppress(struct device *dev, int val)
676 {
677 dev->kobj.uevent_suppress = val;
678 }
679
device_is_registered(struct device * dev)680 static inline int device_is_registered(struct device *dev)
681 {
682 return dev->kobj.state_in_sysfs;
683 }
684
device_enable_async_suspend(struct device * dev)685 static inline void device_enable_async_suspend(struct device *dev)
686 {
687 if (!dev->power.is_prepared)
688 dev->power.async_suspend = true;
689 }
690
device_disable_async_suspend(struct device * dev)691 static inline void device_disable_async_suspend(struct device *dev)
692 {
693 if (!dev->power.is_prepared)
694 dev->power.async_suspend = false;
695 }
696
device_async_suspend_enabled(struct device * dev)697 static inline bool device_async_suspend_enabled(struct device *dev)
698 {
699 return !!dev->power.async_suspend;
700 }
701
device_pm_not_required(struct device * dev)702 static inline bool device_pm_not_required(struct device *dev)
703 {
704 return dev->power.no_pm;
705 }
706
device_set_pm_not_required(struct device * dev)707 static inline void device_set_pm_not_required(struct device *dev)
708 {
709 dev->power.no_pm = true;
710 }
711
dev_pm_syscore_device(struct device * dev,bool val)712 static inline void dev_pm_syscore_device(struct device *dev, bool val)
713 {
714 #ifdef CONFIG_PM_SLEEP
715 dev->power.syscore = val;
716 #endif
717 }
718
dev_pm_set_driver_flags(struct device * dev,u32 flags)719 static inline void dev_pm_set_driver_flags(struct device *dev, u32 flags)
720 {
721 dev->power.driver_flags = flags;
722 }
723
dev_pm_test_driver_flags(struct device * dev,u32 flags)724 static inline bool dev_pm_test_driver_flags(struct device *dev, u32 flags)
725 {
726 return !!(dev->power.driver_flags & flags);
727 }
728
device_lock(struct device * dev)729 static inline void device_lock(struct device *dev)
730 {
731 mutex_lock(&dev->mutex);
732 }
733
device_lock_interruptible(struct device * dev)734 static inline int device_lock_interruptible(struct device *dev)
735 {
736 return mutex_lock_interruptible(&dev->mutex);
737 }
738
device_trylock(struct device * dev)739 static inline int device_trylock(struct device *dev)
740 {
741 return mutex_trylock(&dev->mutex);
742 }
743
device_unlock(struct device * dev)744 static inline void device_unlock(struct device *dev)
745 {
746 mutex_unlock(&dev->mutex);
747 }
748
device_lock_assert(struct device * dev)749 static inline void device_lock_assert(struct device *dev)
750 {
751 lockdep_assert_held(&dev->mutex);
752 }
753
dev_of_node(struct device * dev)754 static inline struct device_node *dev_of_node(struct device *dev)
755 {
756 if (!IS_ENABLED(CONFIG_OF) || !dev)
757 return NULL;
758 return dev->of_node;
759 }
760
dev_has_sync_state(struct device * dev)761 static inline bool dev_has_sync_state(struct device *dev)
762 {
763 if (!dev)
764 return false;
765 if (dev->driver && dev->driver->sync_state)
766 return true;
767 if (dev->bus && dev->bus->sync_state)
768 return true;
769 return false;
770 }
771
772 /*
773 * High level routines for use by the bus drivers
774 */
775 int __must_check device_register(struct device *dev);
776 void device_unregister(struct device *dev);
777 void device_initialize(struct device *dev);
778 int __must_check device_add(struct device *dev);
779 void device_del(struct device *dev);
780 int device_for_each_child(struct device *dev, void *data,
781 int (*fn)(struct device *dev, void *data));
782 int device_for_each_child_reverse(struct device *dev, void *data,
783 int (*fn)(struct device *dev, void *data));
784 struct device *device_find_child(struct device *dev, void *data,
785 int (*match)(struct device *dev, void *data));
786 struct device *device_find_child_by_name(struct device *parent,
787 const char *name);
788 int device_rename(struct device *dev, const char *new_name);
789 int device_move(struct device *dev, struct device *new_parent,
790 enum dpm_order dpm_order);
791 int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid);
792 const char *device_get_devnode(struct device *dev, umode_t *mode, kuid_t *uid,
793 kgid_t *gid, const char **tmp);
794 int device_is_dependent(struct device *dev, void *target);
795
device_supports_offline(struct device * dev)796 static inline bool device_supports_offline(struct device *dev)
797 {
798 return dev->bus && dev->bus->offline && dev->bus->online;
799 }
800
801 void lock_device_hotplug(void);
802 void unlock_device_hotplug(void);
803 int lock_device_hotplug_sysfs(void);
804 int device_offline(struct device *dev);
805 int device_online(struct device *dev);
806 void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
807 void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
808 void device_set_of_node_from_dev(struct device *dev, const struct device *dev2);
809
dev_num_vf(struct device * dev)810 static inline int dev_num_vf(struct device *dev)
811 {
812 if (dev->bus && dev->bus->num_vf)
813 return dev->bus->num_vf(dev);
814 return 0;
815 }
816
817 /*
818 * Root device objects for grouping under /sys/devices
819 */
820 struct device *__root_device_register(const char *name, struct module *owner);
821
822 /* This is a macro to avoid include problems with THIS_MODULE */
823 #define root_device_register(name) \
824 __root_device_register(name, THIS_MODULE)
825
826 void root_device_unregister(struct device *root);
827
dev_get_platdata(const struct device * dev)828 static inline void *dev_get_platdata(const struct device *dev)
829 {
830 return dev->platform_data;
831 }
832
833 /*
834 * Manual binding of a device to driver. See drivers/base/bus.c
835 * for information on use.
836 */
837 int __must_check device_bind_driver(struct device *dev);
838 void device_release_driver(struct device *dev);
839 int __must_check device_attach(struct device *dev);
840 int __must_check driver_attach(struct device_driver *drv);
841 void device_initial_probe(struct device *dev);
842 int __must_check device_reprobe(struct device *dev);
843
844 bool device_is_bound(struct device *dev);
845
846 /*
847 * Easy functions for dynamically creating devices on the fly
848 */
849 __printf(5, 6) struct device *
850 device_create(struct class *cls, struct device *parent, dev_t devt,
851 void *drvdata, const char *fmt, ...);
852 __printf(6, 7) struct device *
853 device_create_with_groups(struct class *cls, struct device *parent, dev_t devt,
854 void *drvdata, const struct attribute_group **groups,
855 const char *fmt, ...);
856 void device_destroy(struct class *cls, dev_t devt);
857
858 int __must_check device_add_groups(struct device *dev,
859 const struct attribute_group **groups);
860 void device_remove_groups(struct device *dev,
861 const struct attribute_group **groups);
862
device_add_group(struct device * dev,const struct attribute_group * grp)863 static inline int __must_check device_add_group(struct device *dev,
864 const struct attribute_group *grp)
865 {
866 const struct attribute_group *groups[] = { grp, NULL };
867
868 return device_add_groups(dev, groups);
869 }
870
device_remove_group(struct device * dev,const struct attribute_group * grp)871 static inline void device_remove_group(struct device *dev,
872 const struct attribute_group *grp)
873 {
874 const struct attribute_group *groups[] = { grp, NULL };
875
876 return device_remove_groups(dev, groups);
877 }
878
879 int __must_check devm_device_add_groups(struct device *dev,
880 const struct attribute_group **groups);
881 void devm_device_remove_groups(struct device *dev,
882 const struct attribute_group **groups);
883 int __must_check devm_device_add_group(struct device *dev,
884 const struct attribute_group *grp);
885 void devm_device_remove_group(struct device *dev,
886 const struct attribute_group *grp);
887
888 /*
889 * Platform "fixup" functions - allow the platform to have their say
890 * about devices and actions that the general device layer doesn't
891 * know about.
892 */
893 /* Notify platform of device discovery */
894 extern int (*platform_notify)(struct device *dev);
895
896 extern int (*platform_notify_remove)(struct device *dev);
897
898
899 /*
900 * get_device - atomically increment the reference count for the device.
901 *
902 */
903 struct device *get_device(struct device *dev);
904 void put_device(struct device *dev);
905 bool kill_device(struct device *dev);
906
907 #ifdef CONFIG_DEVTMPFS
908 int devtmpfs_mount(void);
909 #else
devtmpfs_mount(void)910 static inline int devtmpfs_mount(void) { return 0; }
911 #endif
912
913 /* drivers/base/power/shutdown.c */
914 void device_shutdown(void);
915
916 /* debugging and troubleshooting/diagnostic helpers. */
917 const char *dev_driver_string(const struct device *dev);
918
919 /* Device links interface. */
920 struct device_link *device_link_add(struct device *consumer,
921 struct device *supplier, u32 flags);
922 void device_link_del(struct device_link *link);
923 void device_link_remove(void *consumer, struct device *supplier);
924 void device_links_supplier_sync_state_pause(void);
925 void device_links_supplier_sync_state_resume(void);
926
927 extern __printf(3, 4)
928 int dev_err_probe(const struct device *dev, int err, const char *fmt, ...);
929
930 /* Create alias, so I can be autoloaded. */
931 #define MODULE_ALIAS_CHARDEV(major,minor) \
932 MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor))
933 #define MODULE_ALIAS_CHARDEV_MAJOR(major) \
934 MODULE_ALIAS("char-major-" __stringify(major) "-*")
935
936 #ifdef CONFIG_SYSFS_DEPRECATED
937 extern long sysfs_deprecated;
938 #else
939 #define sysfs_deprecated 0
940 #endif
941
942 #endif /* _DEVICE_H_ */
943