1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * device.h - generic, centralized driver model
4  *
5  * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
6  * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
7  * Copyright (c) 2008-2009 Novell Inc.
8  *
9  * See Documentation/driver-api/driver-model/ for more information.
10  */
11 
12 #ifndef _DEVICE_H_
13 #define _DEVICE_H_
14 
15 #include <linux/dev_printk.h>
16 #include <linux/energy_model.h>
17 #include <linux/ioport.h>
18 #include <linux/kobject.h>
19 #include <linux/klist.h>
20 #include <linux/list.h>
21 #include <linux/lockdep.h>
22 #include <linux/compiler.h>
23 #include <linux/types.h>
24 #include <linux/mutex.h>
25 #include <linux/pm.h>
26 #include <linux/atomic.h>
27 #include <linux/uidgid.h>
28 #include <linux/gfp.h>
29 #include <linux/overflow.h>
30 #include <linux/device/bus.h>
31 #include <linux/device/class.h>
32 #include <linux/device/driver.h>
33 #include <asm/device.h>
34 
35 struct device;
36 struct device_private;
37 struct device_driver;
38 struct driver_private;
39 struct module;
40 struct class;
41 struct subsys_private;
42 struct device_node;
43 struct fwnode_handle;
44 struct iommu_ops;
45 struct iommu_group;
46 struct dev_pin_info;
47 struct dev_iommu;
48 
49 /**
50  * struct subsys_interface - interfaces to device functions
51  * @name:       name of the device function
52  * @subsys:     subsystem of the devices to attach to
53  * @node:       the list of functions registered at the subsystem
54  * @add_dev:    device hookup to device function handler
55  * @remove_dev: device hookup to device function handler
56  *
57  * Simple interfaces attached to a subsystem. Multiple interfaces can
58  * attach to a subsystem and its devices. Unlike drivers, they do not
59  * exclusively claim or control devices. Interfaces usually represent
60  * a specific functionality of a subsystem/class of devices.
61  */
62 struct subsys_interface {
63 	const char *name;
64 	struct bus_type *subsys;
65 	struct list_head node;
66 	int (*add_dev)(struct device *dev, struct subsys_interface *sif);
67 	void (*remove_dev)(struct device *dev, struct subsys_interface *sif);
68 };
69 
70 int subsys_interface_register(struct subsys_interface *sif);
71 void subsys_interface_unregister(struct subsys_interface *sif);
72 
73 int subsys_system_register(struct bus_type *subsys,
74 			   const struct attribute_group **groups);
75 int subsys_virtual_register(struct bus_type *subsys,
76 			    const struct attribute_group **groups);
77 
78 /*
79  * The type of device, "struct device" is embedded in. A class
80  * or bus can contain devices of different types
81  * like "partitions" and "disks", "mouse" and "event".
82  * This identifies the device type and carries type-specific
83  * information, equivalent to the kobj_type of a kobject.
84  * If "name" is specified, the uevent will contain it in
85  * the DEVTYPE variable.
86  */
87 struct device_type {
88 	const char *name;
89 	const struct attribute_group **groups;
90 	int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
91 	char *(*devnode)(struct device *dev, umode_t *mode,
92 			 kuid_t *uid, kgid_t *gid);
93 	void (*release)(struct device *dev);
94 
95 	const struct dev_pm_ops *pm;
96 };
97 
98 /* interface for exporting device attributes */
99 struct device_attribute {
100 	struct attribute	attr;
101 	ssize_t (*show)(struct device *dev, struct device_attribute *attr,
102 			char *buf);
103 	ssize_t (*store)(struct device *dev, struct device_attribute *attr,
104 			 const char *buf, size_t count);
105 };
106 
107 struct dev_ext_attribute {
108 	struct device_attribute attr;
109 	void *var;
110 };
111 
112 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
113 			  char *buf);
114 ssize_t device_store_ulong(struct device *dev, struct device_attribute *attr,
115 			   const char *buf, size_t count);
116 ssize_t device_show_int(struct device *dev, struct device_attribute *attr,
117 			char *buf);
118 ssize_t device_store_int(struct device *dev, struct device_attribute *attr,
119 			 const char *buf, size_t count);
120 ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
121 			char *buf);
122 ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
123 			 const char *buf, size_t count);
124 
125 #define DEVICE_ATTR(_name, _mode, _show, _store) \
126 	struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
127 #define DEVICE_ATTR_PREALLOC(_name, _mode, _show, _store) \
128 	struct device_attribute dev_attr_##_name = \
129 		__ATTR_PREALLOC(_name, _mode, _show, _store)
130 #define DEVICE_ATTR_RW(_name) \
131 	struct device_attribute dev_attr_##_name = __ATTR_RW(_name)
132 #define DEVICE_ATTR_ADMIN_RW(_name) \
133 	struct device_attribute dev_attr_##_name = __ATTR_RW_MODE(_name, 0600)
134 #define DEVICE_ATTR_RO(_name) \
135 	struct device_attribute dev_attr_##_name = __ATTR_RO(_name)
136 #define DEVICE_ATTR_ADMIN_RO(_name) \
137 	struct device_attribute dev_attr_##_name = __ATTR_RO_MODE(_name, 0400)
138 #define DEVICE_ATTR_WO(_name) \
139 	struct device_attribute dev_attr_##_name = __ATTR_WO(_name)
140 #define DEVICE_ULONG_ATTR(_name, _mode, _var) \
141 	struct dev_ext_attribute dev_attr_##_name = \
142 		{ __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) }
143 #define DEVICE_INT_ATTR(_name, _mode, _var) \
144 	struct dev_ext_attribute dev_attr_##_name = \
145 		{ __ATTR(_name, _mode, device_show_int, device_store_int), &(_var) }
146 #define DEVICE_BOOL_ATTR(_name, _mode, _var) \
147 	struct dev_ext_attribute dev_attr_##_name = \
148 		{ __ATTR(_name, _mode, device_show_bool, device_store_bool), &(_var) }
149 #define DEVICE_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
150 	struct device_attribute dev_attr_##_name =		\
151 		__ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
152 
153 int device_create_file(struct device *device,
154 		       const struct device_attribute *entry);
155 void device_remove_file(struct device *dev,
156 			const struct device_attribute *attr);
157 bool device_remove_file_self(struct device *dev,
158 			     const struct device_attribute *attr);
159 int __must_check device_create_bin_file(struct device *dev,
160 					const struct bin_attribute *attr);
161 void device_remove_bin_file(struct device *dev,
162 			    const struct bin_attribute *attr);
163 
164 /* device resource management */
165 typedef void (*dr_release_t)(struct device *dev, void *res);
166 typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data);
167 
168 #ifdef CONFIG_DEBUG_DEVRES
169 void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
170 			  int nid, const char *name) __malloc;
171 #define devres_alloc(release, size, gfp) \
172 	__devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release)
173 #define devres_alloc_node(release, size, gfp, nid) \
174 	__devres_alloc_node(release, size, gfp, nid, #release)
175 #else
176 void *devres_alloc_node(dr_release_t release, size_t size,
177 			gfp_t gfp, int nid) __malloc;
devres_alloc(dr_release_t release,size_t size,gfp_t gfp)178 static inline void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
179 {
180 	return devres_alloc_node(release, size, gfp, NUMA_NO_NODE);
181 }
182 #endif
183 
184 void devres_for_each_res(struct device *dev, dr_release_t release,
185 			 dr_match_t match, void *match_data,
186 			 void (*fn)(struct device *, void *, void *),
187 			 void *data);
188 void devres_free(void *res);
189 void devres_add(struct device *dev, void *res);
190 void *devres_find(struct device *dev, dr_release_t release,
191 		  dr_match_t match, void *match_data);
192 void *devres_get(struct device *dev, void *new_res,
193 		 dr_match_t match, void *match_data);
194 void *devres_remove(struct device *dev, dr_release_t release,
195 		    dr_match_t match, void *match_data);
196 int devres_destroy(struct device *dev, dr_release_t release,
197 		   dr_match_t match, void *match_data);
198 int devres_release(struct device *dev, dr_release_t release,
199 		   dr_match_t match, void *match_data);
200 
201 /* devres group */
202 void * __must_check devres_open_group(struct device *dev, void *id, gfp_t gfp);
203 void devres_close_group(struct device *dev, void *id);
204 void devres_remove_group(struct device *dev, void *id);
205 int devres_release_group(struct device *dev, void *id);
206 
207 /* managed devm_k.alloc/kfree for device drivers */
208 void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __malloc;
209 void *devm_krealloc(struct device *dev, void *ptr, size_t size,
210 		    gfp_t gfp) __must_check;
211 __printf(3, 0) char *devm_kvasprintf(struct device *dev, gfp_t gfp,
212 				     const char *fmt, va_list ap) __malloc;
213 __printf(3, 4) char *devm_kasprintf(struct device *dev, gfp_t gfp,
214 				    const char *fmt, ...) __malloc;
devm_kzalloc(struct device * dev,size_t size,gfp_t gfp)215 static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
216 {
217 	return devm_kmalloc(dev, size, gfp | __GFP_ZERO);
218 }
devm_kmalloc_array(struct device * dev,size_t n,size_t size,gfp_t flags)219 static inline void *devm_kmalloc_array(struct device *dev,
220 				       size_t n, size_t size, gfp_t flags)
221 {
222 	size_t bytes;
223 
224 	if (unlikely(check_mul_overflow(n, size, &bytes)))
225 		return NULL;
226 
227 	return devm_kmalloc(dev, bytes, flags);
228 }
devm_kcalloc(struct device * dev,size_t n,size_t size,gfp_t flags)229 static inline void *devm_kcalloc(struct device *dev,
230 				 size_t n, size_t size, gfp_t flags)
231 {
232 	return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
233 }
234 void devm_kfree(struct device *dev, const void *p);
235 char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc;
236 const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp);
237 void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp);
238 
239 unsigned long devm_get_free_pages(struct device *dev,
240 				  gfp_t gfp_mask, unsigned int order);
241 void devm_free_pages(struct device *dev, unsigned long addr);
242 
243 void __iomem *devm_ioremap_resource(struct device *dev,
244 				    const struct resource *res);
245 void __iomem *devm_ioremap_resource_wc(struct device *dev,
246 				       const struct resource *res);
247 
248 void __iomem *devm_of_iomap(struct device *dev,
249 			    struct device_node *node, int index,
250 			    resource_size_t *size);
251 
252 /* allows to add/remove a custom action to devres stack */
253 int devm_add_action(struct device *dev, void (*action)(void *), void *data);
254 void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
255 void devm_release_action(struct device *dev, void (*action)(void *), void *data);
256 
devm_add_action_or_reset(struct device * dev,void (* action)(void *),void * data)257 static inline int devm_add_action_or_reset(struct device *dev,
258 					   void (*action)(void *), void *data)
259 {
260 	int ret;
261 
262 	ret = devm_add_action(dev, action, data);
263 	if (ret)
264 		action(data);
265 
266 	return ret;
267 }
268 
269 /**
270  * devm_alloc_percpu - Resource-managed alloc_percpu
271  * @dev: Device to allocate per-cpu memory for
272  * @type: Type to allocate per-cpu memory for
273  *
274  * Managed alloc_percpu. Per-cpu memory allocated with this function is
275  * automatically freed on driver detach.
276  *
277  * RETURNS:
278  * Pointer to allocated memory on success, NULL on failure.
279  */
280 #define devm_alloc_percpu(dev, type)      \
281 	((typeof(type) __percpu *)__devm_alloc_percpu((dev), sizeof(type), \
282 						      __alignof__(type)))
283 
284 void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
285 				   size_t align);
286 void devm_free_percpu(struct device *dev, void __percpu *pdata);
287 
288 struct device_dma_parameters {
289 	/*
290 	 * a low level driver may set these to teach IOMMU code about
291 	 * sg limitations.
292 	 */
293 	unsigned int max_segment_size;
294 	unsigned int min_align_mask;
295 	unsigned long segment_boundary_mask;
296 };
297 
298 /**
299  * enum device_link_state - Device link states.
300  * @DL_STATE_NONE: The presence of the drivers is not being tracked.
301  * @DL_STATE_DORMANT: None of the supplier/consumer drivers is present.
302  * @DL_STATE_AVAILABLE: The supplier driver is present, but the consumer is not.
303  * @DL_STATE_CONSUMER_PROBE: The consumer is probing (supplier driver present).
304  * @DL_STATE_ACTIVE: Both the supplier and consumer drivers are present.
305  * @DL_STATE_SUPPLIER_UNBIND: The supplier driver is unbinding.
306  */
307 enum device_link_state {
308 	DL_STATE_NONE = -1,
309 	DL_STATE_DORMANT = 0,
310 	DL_STATE_AVAILABLE,
311 	DL_STATE_CONSUMER_PROBE,
312 	DL_STATE_ACTIVE,
313 	DL_STATE_SUPPLIER_UNBIND,
314 };
315 
316 /*
317  * Device link flags.
318  *
319  * STATELESS: The core will not remove this link automatically.
320  * AUTOREMOVE_CONSUMER: Remove the link automatically on consumer driver unbind.
321  * PM_RUNTIME: If set, the runtime PM framework will use this link.
322  * RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation.
323  * AUTOREMOVE_SUPPLIER: Remove the link automatically on supplier driver unbind.
324  * AUTOPROBE_CONSUMER: Probe consumer driver automatically after supplier binds.
325  * MANAGED: The core tracks presence of supplier/consumer drivers (internal).
326  * SYNC_STATE_ONLY: Link only affects sync_state() behavior.
327  * INFERRED: Inferred from data (eg: firmware) and not from driver actions.
328  */
329 #define DL_FLAG_STATELESS		BIT(0)
330 #define DL_FLAG_AUTOREMOVE_CONSUMER	BIT(1)
331 #define DL_FLAG_PM_RUNTIME		BIT(2)
332 #define DL_FLAG_RPM_ACTIVE		BIT(3)
333 #define DL_FLAG_AUTOREMOVE_SUPPLIER	BIT(4)
334 #define DL_FLAG_AUTOPROBE_CONSUMER	BIT(5)
335 #define DL_FLAG_MANAGED			BIT(6)
336 #define DL_FLAG_SYNC_STATE_ONLY		BIT(7)
337 #define DL_FLAG_INFERRED		BIT(8)
338 
339 /**
340  * enum dl_dev_state - Device driver presence tracking information.
341  * @DL_DEV_NO_DRIVER: There is no driver attached to the device.
342  * @DL_DEV_PROBING: A driver is probing.
343  * @DL_DEV_DRIVER_BOUND: The driver has been bound to the device.
344  * @DL_DEV_UNBINDING: The driver is unbinding from the device.
345  */
346 enum dl_dev_state {
347 	DL_DEV_NO_DRIVER = 0,
348 	DL_DEV_PROBING,
349 	DL_DEV_DRIVER_BOUND,
350 	DL_DEV_UNBINDING,
351 };
352 
353 /**
354  * struct dev_links_info - Device data related to device links.
355  * @suppliers: List of links to supplier devices.
356  * @consumers: List of links to consumer devices.
357  * @defer_sync: Hook to global list of devices that have deferred sync_state.
358  * @status: Driver status information.
359  */
360 struct dev_links_info {
361 	struct list_head suppliers;
362 	struct list_head consumers;
363 	struct list_head defer_sync;
364 	enum dl_dev_state status;
365 };
366 
367 /**
368  * struct device - The basic device structure
369  * @parent:	The device's "parent" device, the device to which it is attached.
370  * 		In most cases, a parent device is some sort of bus or host
371  * 		controller. If parent is NULL, the device, is a top-level device,
372  * 		which is not usually what you want.
373  * @p:		Holds the private data of the driver core portions of the device.
374  * 		See the comment of the struct device_private for detail.
375  * @kobj:	A top-level, abstract class from which other classes are derived.
376  * @init_name:	Initial name of the device.
377  * @type:	The type of device.
378  * 		This identifies the device type and carries type-specific
379  * 		information.
380  * @mutex:	Mutex to synchronize calls to its driver.
381  * @lockdep_mutex: An optional debug lock that a subsystem can use as a
382  * 		peer lock to gain localized lockdep coverage of the device_lock.
383  * @bus:	Type of bus device is on.
384  * @driver:	Which driver has allocated this
385  * @platform_data: Platform data specific to the device.
386  * 		Example: For devices on custom boards, as typical of embedded
387  * 		and SOC based hardware, Linux often uses platform_data to point
388  * 		to board-specific structures describing devices and how they
389  * 		are wired.  That can include what ports are available, chip
390  * 		variants, which GPIO pins act in what additional roles, and so
391  * 		on.  This shrinks the "Board Support Packages" (BSPs) and
392  * 		minimizes board-specific #ifdefs in drivers.
393  * @driver_data: Private pointer for driver specific info.
394  * @links:	Links to suppliers and consumers of this device.
395  * @power:	For device power management.
396  *		See Documentation/driver-api/pm/devices.rst for details.
397  * @pm_domain:	Provide callbacks that are executed during system suspend,
398  * 		hibernation, system resume and during runtime PM transitions
399  * 		along with subsystem-level and driver-level callbacks.
400  * @em_pd:	device's energy model performance domain
401  * @pins:	For device pin management.
402  *		See Documentation/driver-api/pinctl.rst for details.
403  * @msi_list:	Hosts MSI descriptors
404  * @msi_domain: The generic MSI domain this device is using.
405  * @numa_node:	NUMA node this device is close to.
406  * @dma_ops:    DMA mapping operations for this device.
407  * @dma_mask:	Dma mask (if dma'ble device).
408  * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
409  * 		hardware supports 64-bit addresses for consistent allocations
410  * 		such descriptors.
411  * @bus_dma_limit: Limit of an upstream bridge or bus which imposes a smaller
412  *		DMA limit than the device itself supports.
413  * @dma_range_map: map for DMA memory ranges relative to that of RAM
414  * @dma_parms:	A low level driver may set these to teach IOMMU code about
415  * 		segment limitations.
416  * @dma_pools:	Dma pools (if dma'ble device).
417  * @dma_mem:	Internal for coherent mem override.
418  * @cma_area:	Contiguous memory area for dma allocations
419  * @archdata:	For arch-specific additions.
420  * @of_node:	Associated device tree node.
421  * @fwnode:	Associated device node supplied by platform firmware.
422  * @devt:	For creating the sysfs "dev".
423  * @id:		device instance
424  * @devres_lock: Spinlock to protect the resource of the device.
425  * @devres_head: The resources list of the device.
426  * @knode_class: The node used to add the device to the class list.
427  * @class:	The class of the device.
428  * @groups:	Optional attribute groups.
429  * @release:	Callback to free the device after all references have
430  * 		gone away. This should be set by the allocator of the
431  * 		device (i.e. the bus driver that discovered the device).
432  * @iommu_group: IOMMU group the device belongs to.
433  * @iommu:	Per device generic IOMMU runtime data
434  *
435  * @offline_disabled: If set, the device is permanently online.
436  * @offline:	Set after successful invocation of bus type's .offline().
437  * @of_node_reused: Set if the device-tree node is shared with an ancestor
438  *              device.
439  * @state_synced: The hardware state of this device has been synced to match
440  *		  the software state of this device by calling the driver/bus
441  *		  sync_state() callback.
442  * @can_match:	The device has matched with a driver at least once or it is in
443  *		a bus (like AMBA) which can't check for matching drivers until
444  *		other devices probe successfully.
445  * @dma_coherent: this particular device is dma coherent, even if the
446  *		architecture supports non-coherent devices.
447  * @dma_ops_bypass: If set to %true then the dma_ops are bypassed for the
448  *		streaming DMA operations (->map_* / ->unmap_* / ->sync_*),
449  *		and optionall (if the coherent mask is large enough) also
450  *		for dma allocations.  This flag is managed by the dma ops
451  *		instance from ->dma_supported.
452  *
453  * At the lowest level, every device in a Linux system is represented by an
454  * instance of struct device. The device structure contains the information
455  * that the device model core needs to model the system. Most subsystems,
456  * however, track additional information about the devices they host. As a
457  * result, it is rare for devices to be represented by bare device structures;
458  * instead, that structure, like kobject structures, is usually embedded within
459  * a higher-level representation of the device.
460  */
461 struct device {
462 	struct kobject kobj;
463 	struct device		*parent;
464 
465 	struct device_private	*p;
466 
467 	const char		*init_name; /* initial name of the device */
468 	const struct device_type *type;
469 
470 	struct bus_type	*bus;		/* type of bus device is on */
471 	struct device_driver *driver;	/* which driver has allocated this
472 					   device */
473 	void		*platform_data;	/* Platform specific data, device
474 					   core doesn't touch it */
475 	void		*driver_data;	/* Driver data, set and get with
476 					   dev_set_drvdata/dev_get_drvdata */
477 #ifdef CONFIG_PROVE_LOCKING
478 	struct mutex		lockdep_mutex;
479 #endif
480 	struct mutex		mutex;	/* mutex to synchronize calls to
481 					 * its driver.
482 					 */
483 
484 	struct dev_links_info	links;
485 	struct dev_pm_info	power;
486 	struct dev_pm_domain	*pm_domain;
487 
488 #ifdef CONFIG_ENERGY_MODEL
489 	struct em_perf_domain	*em_pd;
490 #endif
491 
492 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
493 	struct irq_domain	*msi_domain;
494 #endif
495 #ifdef CONFIG_PINCTRL
496 	struct dev_pin_info	*pins;
497 #endif
498 #ifdef CONFIG_GENERIC_MSI_IRQ
499 	struct list_head	msi_list;
500 #endif
501 #ifdef CONFIG_DMA_OPS
502 	const struct dma_map_ops *dma_ops;
503 #endif
504 	u64		*dma_mask;	/* dma mask (if dma'able device) */
505 	u64		coherent_dma_mask;/* Like dma_mask, but for
506 					     alloc_coherent mappings as
507 					     not all hardware supports
508 					     64 bit addresses for consistent
509 					     allocations such descriptors. */
510 	u64		bus_dma_limit;	/* upstream dma constraint */
511 	const struct bus_dma_region *dma_range_map;
512 
513 	struct device_dma_parameters *dma_parms;
514 
515 	struct list_head	dma_pools;	/* dma pools (if dma'ble) */
516 
517 #ifdef CONFIG_DMA_DECLARE_COHERENT
518 	struct dma_coherent_mem	*dma_mem; /* internal for coherent mem
519 					     override */
520 #endif
521 #ifdef CONFIG_DMA_CMA
522 	struct cma *cma_area;		/* contiguous memory area for dma
523 					   allocations */
524 #endif
525 	/* arch specific additions */
526 	struct dev_archdata	archdata;
527 
528 	struct device_node	*of_node; /* associated device tree node */
529 	struct fwnode_handle	*fwnode; /* firmware device node */
530 
531 #ifdef CONFIG_NUMA
532 	int		numa_node;	/* NUMA node this device is close to */
533 #endif
534 	dev_t			devt;	/* dev_t, creates the sysfs "dev" */
535 	u32			id;	/* device instance */
536 
537 	spinlock_t		devres_lock;
538 	struct list_head	devres_head;
539 
540 	struct class		*class;
541 	const struct attribute_group **groups;	/* optional groups */
542 
543 	void	(*release)(struct device *dev);
544 	struct iommu_group	*iommu_group;
545 	struct dev_iommu	*iommu;
546 
547 	bool			offline_disabled:1;
548 	bool			offline:1;
549 	bool			of_node_reused:1;
550 	bool			state_synced:1;
551 	bool			can_match:1;
552 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
553     defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
554     defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
555 	bool			dma_coherent:1;
556 #endif
557 #ifdef CONFIG_DMA_OPS_BYPASS
558 	bool			dma_ops_bypass : 1;
559 #endif
560 };
561 
562 /**
563  * struct device_link - Device link representation.
564  * @supplier: The device on the supplier end of the link.
565  * @s_node: Hook to the supplier device's list of links to consumers.
566  * @consumer: The device on the consumer end of the link.
567  * @c_node: Hook to the consumer device's list of links to suppliers.
568  * @link_dev: device used to expose link details in sysfs
569  * @status: The state of the link (with respect to the presence of drivers).
570  * @flags: Link flags.
571  * @rpm_active: Whether or not the consumer device is runtime-PM-active.
572  * @kref: Count repeated addition of the same link.
573  * @rcu_head: An RCU head to use for deferred execution of SRCU callbacks.
574  * @supplier_preactivated: Supplier has been made active before consumer probe.
575  */
576 struct device_link {
577 	struct device *supplier;
578 	struct list_head s_node;
579 	struct device *consumer;
580 	struct list_head c_node;
581 	struct device link_dev;
582 	enum device_link_state status;
583 	u32 flags;
584 	refcount_t rpm_active;
585 	struct kref kref;
586 #ifdef CONFIG_SRCU
587 	struct rcu_head rcu_head;
588 #endif
589 	bool supplier_preactivated; /* Owned by consumer probe. */
590 };
591 
kobj_to_dev(struct kobject * kobj)592 static inline struct device *kobj_to_dev(struct kobject *kobj)
593 {
594 	return container_of(kobj, struct device, kobj);
595 }
596 
597 /**
598  * device_iommu_mapped - Returns true when the device DMA is translated
599  *			 by an IOMMU
600  * @dev: Device to perform the check on
601  */
device_iommu_mapped(struct device * dev)602 static inline bool device_iommu_mapped(struct device *dev)
603 {
604 	return (dev->iommu_group != NULL);
605 }
606 
607 /* Get the wakeup routines, which depend on struct device */
608 #include <linux/pm_wakeup.h>
609 
dev_name(const struct device * dev)610 static inline const char *dev_name(const struct device *dev)
611 {
612 	/* Use the init name until the kobject becomes available */
613 	if (dev->init_name)
614 		return dev->init_name;
615 
616 	return kobject_name(&dev->kobj);
617 }
618 
619 /**
620  * dev_bus_name - Return a device's bus/class name, if at all possible
621  * @dev: struct device to get the bus/class name of
622  *
623  * Will return the name of the bus/class the device is attached to.  If it is
624  * not attached to a bus/class, an empty string will be returned.
625  */
dev_bus_name(const struct device * dev)626 static inline const char *dev_bus_name(const struct device *dev)
627 {
628 	return dev->bus ? dev->bus->name : (dev->class ? dev->class->name : "");
629 }
630 
631 __printf(2, 3) int dev_set_name(struct device *dev, const char *name, ...);
632 
633 #ifdef CONFIG_NUMA
dev_to_node(struct device * dev)634 static inline int dev_to_node(struct device *dev)
635 {
636 	return dev->numa_node;
637 }
set_dev_node(struct device * dev,int node)638 static inline void set_dev_node(struct device *dev, int node)
639 {
640 	dev->numa_node = node;
641 }
642 #else
dev_to_node(struct device * dev)643 static inline int dev_to_node(struct device *dev)
644 {
645 	return NUMA_NO_NODE;
646 }
set_dev_node(struct device * dev,int node)647 static inline void set_dev_node(struct device *dev, int node)
648 {
649 }
650 #endif
651 
dev_get_msi_domain(const struct device * dev)652 static inline struct irq_domain *dev_get_msi_domain(const struct device *dev)
653 {
654 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
655 	return dev->msi_domain;
656 #else
657 	return NULL;
658 #endif
659 }
660 
dev_set_msi_domain(struct device * dev,struct irq_domain * d)661 static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d)
662 {
663 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
664 	dev->msi_domain = d;
665 #endif
666 }
667 
dev_get_drvdata(const struct device * dev)668 static inline void *dev_get_drvdata(const struct device *dev)
669 {
670 	return dev->driver_data;
671 }
672 
dev_set_drvdata(struct device * dev,void * data)673 static inline void dev_set_drvdata(struct device *dev, void *data)
674 {
675 	dev->driver_data = data;
676 }
677 
dev_to_psd(struct device * dev)678 static inline struct pm_subsys_data *dev_to_psd(struct device *dev)
679 {
680 	return dev ? dev->power.subsys_data : NULL;
681 }
682 
dev_get_uevent_suppress(const struct device * dev)683 static inline unsigned int dev_get_uevent_suppress(const struct device *dev)
684 {
685 	return dev->kobj.uevent_suppress;
686 }
687 
dev_set_uevent_suppress(struct device * dev,int val)688 static inline void dev_set_uevent_suppress(struct device *dev, int val)
689 {
690 	dev->kobj.uevent_suppress = val;
691 }
692 
device_is_registered(struct device * dev)693 static inline int device_is_registered(struct device *dev)
694 {
695 	return dev->kobj.state_in_sysfs;
696 }
697 
device_enable_async_suspend(struct device * dev)698 static inline void device_enable_async_suspend(struct device *dev)
699 {
700 	if (!dev->power.is_prepared)
701 		dev->power.async_suspend = true;
702 }
703 
device_disable_async_suspend(struct device * dev)704 static inline void device_disable_async_suspend(struct device *dev)
705 {
706 	if (!dev->power.is_prepared)
707 		dev->power.async_suspend = false;
708 }
709 
device_async_suspend_enabled(struct device * dev)710 static inline bool device_async_suspend_enabled(struct device *dev)
711 {
712 	return !!dev->power.async_suspend;
713 }
714 
device_pm_not_required(struct device * dev)715 static inline bool device_pm_not_required(struct device *dev)
716 {
717 	return dev->power.no_pm;
718 }
719 
device_set_pm_not_required(struct device * dev)720 static inline void device_set_pm_not_required(struct device *dev)
721 {
722 	dev->power.no_pm = true;
723 }
724 
dev_pm_syscore_device(struct device * dev,bool val)725 static inline void dev_pm_syscore_device(struct device *dev, bool val)
726 {
727 #ifdef CONFIG_PM_SLEEP
728 	dev->power.syscore = val;
729 #endif
730 }
731 
dev_pm_set_driver_flags(struct device * dev,u32 flags)732 static inline void dev_pm_set_driver_flags(struct device *dev, u32 flags)
733 {
734 	dev->power.driver_flags = flags;
735 }
736 
dev_pm_test_driver_flags(struct device * dev,u32 flags)737 static inline bool dev_pm_test_driver_flags(struct device *dev, u32 flags)
738 {
739 	return !!(dev->power.driver_flags & flags);
740 }
741 
device_lock(struct device * dev)742 static inline void device_lock(struct device *dev)
743 {
744 	mutex_lock(&dev->mutex);
745 }
746 
device_lock_interruptible(struct device * dev)747 static inline int device_lock_interruptible(struct device *dev)
748 {
749 	return mutex_lock_interruptible(&dev->mutex);
750 }
751 
device_trylock(struct device * dev)752 static inline int device_trylock(struct device *dev)
753 {
754 	return mutex_trylock(&dev->mutex);
755 }
756 
device_unlock(struct device * dev)757 static inline void device_unlock(struct device *dev)
758 {
759 	mutex_unlock(&dev->mutex);
760 }
761 
device_lock_assert(struct device * dev)762 static inline void device_lock_assert(struct device *dev)
763 {
764 	lockdep_assert_held(&dev->mutex);
765 }
766 
dev_of_node(struct device * dev)767 static inline struct device_node *dev_of_node(struct device *dev)
768 {
769 	if (!IS_ENABLED(CONFIG_OF) || !dev)
770 		return NULL;
771 	return dev->of_node;
772 }
773 
dev_has_sync_state(struct device * dev)774 static inline bool dev_has_sync_state(struct device *dev)
775 {
776 	if (!dev)
777 		return false;
778 	if (dev->driver && dev->driver->sync_state)
779 		return true;
780 	if (dev->bus && dev->bus->sync_state)
781 		return true;
782 	return false;
783 }
784 
785 /*
786  * High level routines for use by the bus drivers
787  */
788 int __must_check device_register(struct device *dev);
789 void device_unregister(struct device *dev);
790 void device_initialize(struct device *dev);
791 int __must_check device_add(struct device *dev);
792 void device_del(struct device *dev);
793 int device_for_each_child(struct device *dev, void *data,
794 			  int (*fn)(struct device *dev, void *data));
795 int device_for_each_child_reverse(struct device *dev, void *data,
796 				  int (*fn)(struct device *dev, void *data));
797 struct device *device_find_child(struct device *dev, void *data,
798 				 int (*match)(struct device *dev, void *data));
799 struct device *device_find_child_by_name(struct device *parent,
800 					 const char *name);
801 int device_rename(struct device *dev, const char *new_name);
802 int device_move(struct device *dev, struct device *new_parent,
803 		enum dpm_order dpm_order);
804 int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid);
805 const char *device_get_devnode(struct device *dev, umode_t *mode, kuid_t *uid,
806 			       kgid_t *gid, const char **tmp);
807 int device_is_dependent(struct device *dev, void *target);
808 
device_supports_offline(struct device * dev)809 static inline bool device_supports_offline(struct device *dev)
810 {
811 	return dev->bus && dev->bus->offline && dev->bus->online;
812 }
813 
814 void lock_device_hotplug(void);
815 void unlock_device_hotplug(void);
816 int lock_device_hotplug_sysfs(void);
817 int device_offline(struct device *dev);
818 int device_online(struct device *dev);
819 void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
820 void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
821 void device_set_of_node_from_dev(struct device *dev, const struct device *dev2);
822 
dev_num_vf(struct device * dev)823 static inline int dev_num_vf(struct device *dev)
824 {
825 	if (dev->bus && dev->bus->num_vf)
826 		return dev->bus->num_vf(dev);
827 	return 0;
828 }
829 
830 /*
831  * Root device objects for grouping under /sys/devices
832  */
833 struct device *__root_device_register(const char *name, struct module *owner);
834 
835 /* This is a macro to avoid include problems with THIS_MODULE */
836 #define root_device_register(name) \
837 	__root_device_register(name, THIS_MODULE)
838 
839 void root_device_unregister(struct device *root);
840 
dev_get_platdata(const struct device * dev)841 static inline void *dev_get_platdata(const struct device *dev)
842 {
843 	return dev->platform_data;
844 }
845 
846 /*
847  * Manual binding of a device to driver. See drivers/base/bus.c
848  * for information on use.
849  */
850 int __must_check device_bind_driver(struct device *dev);
851 void device_release_driver(struct device *dev);
852 int  __must_check device_attach(struct device *dev);
853 int __must_check driver_attach(struct device_driver *drv);
854 void device_initial_probe(struct device *dev);
855 int __must_check device_reprobe(struct device *dev);
856 
857 bool device_is_bound(struct device *dev);
858 
859 /*
860  * Easy functions for dynamically creating devices on the fly
861  */
862 __printf(5, 6) struct device *
863 device_create(struct class *cls, struct device *parent, dev_t devt,
864 	      void *drvdata, const char *fmt, ...);
865 __printf(6, 7) struct device *
866 device_create_with_groups(struct class *cls, struct device *parent, dev_t devt,
867 			  void *drvdata, const struct attribute_group **groups,
868 			  const char *fmt, ...);
869 void device_destroy(struct class *cls, dev_t devt);
870 
871 int __must_check device_add_groups(struct device *dev,
872 				   const struct attribute_group **groups);
873 void device_remove_groups(struct device *dev,
874 			  const struct attribute_group **groups);
875 
device_add_group(struct device * dev,const struct attribute_group * grp)876 static inline int __must_check device_add_group(struct device *dev,
877 					const struct attribute_group *grp)
878 {
879 	const struct attribute_group *groups[] = { grp, NULL };
880 
881 	return device_add_groups(dev, groups);
882 }
883 
device_remove_group(struct device * dev,const struct attribute_group * grp)884 static inline void device_remove_group(struct device *dev,
885 				       const struct attribute_group *grp)
886 {
887 	const struct attribute_group *groups[] = { grp, NULL };
888 
889 	return device_remove_groups(dev, groups);
890 }
891 
892 int __must_check devm_device_add_groups(struct device *dev,
893 					const struct attribute_group **groups);
894 void devm_device_remove_groups(struct device *dev,
895 			       const struct attribute_group **groups);
896 int __must_check devm_device_add_group(struct device *dev,
897 				       const struct attribute_group *grp);
898 void devm_device_remove_group(struct device *dev,
899 			      const struct attribute_group *grp);
900 
901 /*
902  * Platform "fixup" functions - allow the platform to have their say
903  * about devices and actions that the general device layer doesn't
904  * know about.
905  */
906 /* Notify platform of device discovery */
907 extern int (*platform_notify)(struct device *dev);
908 
909 extern int (*platform_notify_remove)(struct device *dev);
910 
911 
912 /*
913  * get_device - atomically increment the reference count for the device.
914  *
915  */
916 struct device *get_device(struct device *dev);
917 void put_device(struct device *dev);
918 bool kill_device(struct device *dev);
919 
920 #ifdef CONFIG_DEVTMPFS
921 int devtmpfs_mount(void);
922 #else
devtmpfs_mount(void)923 static inline int devtmpfs_mount(void) { return 0; }
924 #endif
925 
926 /* drivers/base/power/shutdown.c */
927 void device_shutdown(void);
928 
929 /* debugging and troubleshooting/diagnostic helpers. */
930 const char *dev_driver_string(const struct device *dev);
931 
932 /* Device links interface. */
933 struct device_link *device_link_add(struct device *consumer,
934 				    struct device *supplier, u32 flags);
935 void device_link_del(struct device_link *link);
936 void device_link_remove(void *consumer, struct device *supplier);
937 void device_links_supplier_sync_state_pause(void);
938 void device_links_supplier_sync_state_resume(void);
939 
940 extern __printf(3, 4)
941 int dev_err_probe(const struct device *dev, int err, const char *fmt, ...);
942 
943 /* Create alias, so I can be autoloaded. */
944 #define MODULE_ALIAS_CHARDEV(major,minor) \
945 	MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor))
946 #define MODULE_ALIAS_CHARDEV_MAJOR(major) \
947 	MODULE_ALIAS("char-major-" __stringify(major) "-*")
948 
949 #ifdef CONFIG_SYSFS_DEPRECATED
950 extern long sysfs_deprecated;
951 #else
952 #define sysfs_deprecated 0
953 #endif
954 
955 #endif /* _DEVICE_H_ */
956