xref: /linux/drivers/gpu/drm/drm_managed.c (revision 42d6196f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020 Intel
4  *
5  * Based on drivers/base/devres.c
6  */
7 
8 #include <drm/drm_managed.h>
9 
10 #include <linux/list.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/spinlock.h>
14 
15 #include <drm/drm_device.h>
16 #include <drm/drm_print.h>
17 
18 #include "drm_internal.h"
19 
20 /**
21  * DOC: managed resources
22  *
23  * Inspired by struct &device managed resources, but tied to the lifetime of
24  * struct &drm_device, which can outlive the underlying physical device, usually
25  * when userspace has some open files and other handles to resources still open.
26  *
27  * Release actions can be added with drmm_add_action(), memory allocations can
28  * be done directly with drmm_kmalloc() and the related functions. Everything
29  * will be released on the final drm_dev_put() in reverse order of how the
30  * release actions have been added and memory has been allocated since driver
31  * loading started with devm_drm_dev_alloc().
32  *
33  * Note that release actions and managed memory can also be added and removed
34  * during the lifetime of the driver, all the functions are fully concurrent
35  * safe. But it is recommended to use managed resources only for resources that
36  * change rarely, if ever, during the lifetime of the &drm_device instance.
37  */
38 
39 struct drmres_node {
40 	struct list_head	entry;
41 	drmres_release_t	release;
42 	const char		*name;
43 	size_t			size;
44 };
45 
46 struct drmres {
47 	struct drmres_node		node;
48 	/*
49 	 * Some archs want to perform DMA into kmalloc caches
50 	 * and need a guaranteed alignment larger than
51 	 * the alignment of a 64-bit integer.
52 	 * Thus we use ARCH_DMA_MINALIGN for data[] which will force the same
53 	 * alignment for struct drmres when allocated by kmalloc().
54 	 */
55 	u8 __aligned(ARCH_DMA_MINALIGN) data[];
56 };
57 
free_dr(struct drmres * dr)58 static void free_dr(struct drmres *dr)
59 {
60 	kfree_const(dr->node.name);
61 	kfree(dr);
62 }
63 
drm_managed_release(struct drm_device * dev)64 void drm_managed_release(struct drm_device *dev)
65 {
66 	struct drmres *dr, *tmp;
67 
68 	drm_dbg_drmres(dev, "drmres release begin\n");
69 	list_for_each_entry_safe(dr, tmp, &dev->managed.resources, node.entry) {
70 		drm_dbg_drmres(dev, "REL %p %s (%zu bytes)\n",
71 			       dr, dr->node.name, dr->node.size);
72 
73 		if (dr->node.release)
74 			dr->node.release(dev, dr->node.size ? *(void **)&dr->data : NULL);
75 
76 		list_del(&dr->node.entry);
77 		free_dr(dr);
78 	}
79 	drm_dbg_drmres(dev, "drmres release end\n");
80 }
81 
82 /*
83  * Always inline so that kmalloc_track_caller tracks the actual interesting
84  * caller outside of drm_managed.c.
85  */
alloc_dr(drmres_release_t release,size_t size,gfp_t gfp,int nid)86 static __always_inline struct drmres * alloc_dr(drmres_release_t release,
87 						size_t size, gfp_t gfp, int nid)
88 {
89 	size_t tot_size;
90 	struct drmres *dr;
91 
92 	/* We must catch any near-SIZE_MAX cases that could overflow. */
93 	if (unlikely(check_add_overflow(sizeof(*dr), size, &tot_size)))
94 		return NULL;
95 
96 	dr = kmalloc_node_track_caller(tot_size, gfp, nid);
97 	if (unlikely(!dr))
98 		return NULL;
99 
100 	memset(dr, 0, offsetof(struct drmres, data));
101 
102 	INIT_LIST_HEAD(&dr->node.entry);
103 	dr->node.release = release;
104 	dr->node.size = size;
105 
106 	return dr;
107 }
108 
del_dr(struct drm_device * dev,struct drmres * dr)109 static void del_dr(struct drm_device *dev, struct drmres *dr)
110 {
111 	list_del_init(&dr->node.entry);
112 
113 	drm_dbg_drmres(dev, "DEL %p %s (%lu bytes)\n",
114 		       dr, dr->node.name, (unsigned long) dr->node.size);
115 }
116 
add_dr(struct drm_device * dev,struct drmres * dr)117 static void add_dr(struct drm_device *dev, struct drmres *dr)
118 {
119 	unsigned long flags;
120 
121 	spin_lock_irqsave(&dev->managed.lock, flags);
122 	list_add(&dr->node.entry, &dev->managed.resources);
123 	spin_unlock_irqrestore(&dev->managed.lock, flags);
124 
125 	drm_dbg_drmres(dev, "ADD %p %s (%lu bytes)\n",
126 		       dr, dr->node.name, (unsigned long) dr->node.size);
127 }
128 
drmm_add_final_kfree(struct drm_device * dev,void * container)129 void drmm_add_final_kfree(struct drm_device *dev, void *container)
130 {
131 	WARN_ON(dev->managed.final_kfree);
132 	WARN_ON(dev < (struct drm_device *) container);
133 	WARN_ON(dev + 1 > (struct drm_device *) (container + ksize(container)));
134 	dev->managed.final_kfree = container;
135 }
136 
__drmm_add_action(struct drm_device * dev,drmres_release_t action,void * data,const char * name)137 int __drmm_add_action(struct drm_device *dev,
138 		      drmres_release_t action,
139 		      void *data, const char *name)
140 {
141 	struct drmres *dr;
142 	void **void_ptr;
143 
144 	dr = alloc_dr(action, data ? sizeof(void*) : 0,
145 		      GFP_KERNEL | __GFP_ZERO,
146 		      dev_to_node(dev->dev));
147 	if (!dr) {
148 		drm_dbg_drmres(dev, "failed to add action %s for %p\n",
149 			       name, data);
150 		return -ENOMEM;
151 	}
152 
153 	dr->node.name = kstrdup_const(name, GFP_KERNEL);
154 	if (data) {
155 		void_ptr = (void **)&dr->data;
156 		*void_ptr = data;
157 	}
158 
159 	add_dr(dev, dr);
160 
161 	return 0;
162 }
163 EXPORT_SYMBOL(__drmm_add_action);
164 
__drmm_add_action_or_reset(struct drm_device * dev,drmres_release_t action,void * data,const char * name)165 int __drmm_add_action_or_reset(struct drm_device *dev,
166 			       drmres_release_t action,
167 			       void *data, const char *name)
168 {
169 	int ret;
170 
171 	ret = __drmm_add_action(dev, action, data, name);
172 	if (ret)
173 		action(dev, data);
174 
175 	return ret;
176 }
177 EXPORT_SYMBOL(__drmm_add_action_or_reset);
178 
179 /**
180  * drmm_release_action - release a managed action from a &drm_device
181  * @dev: DRM device
182  * @action: function which would be called when @dev is released
183  * @data: opaque pointer, passed to @action
184  *
185  * This function calls the @action previously added by drmm_add_action()
186  * immediately.
187  * The @action is removed from the list of cleanup actions for @dev,
188  * which means that it won't be called in the final drm_dev_put().
189  */
drmm_release_action(struct drm_device * dev,drmres_release_t action,void * data)190 void drmm_release_action(struct drm_device *dev,
191 			 drmres_release_t action,
192 			 void *data)
193 {
194 	struct drmres *dr_match = NULL, *dr;
195 	unsigned long flags;
196 
197 	spin_lock_irqsave(&dev->managed.lock, flags);
198 	list_for_each_entry_reverse(dr, &dev->managed.resources, node.entry) {
199 		if (dr->node.release == action) {
200 			if (!data || (data && *(void **)dr->data == data)) {
201 				dr_match = dr;
202 				del_dr(dev, dr_match);
203 				break;
204 			}
205 		}
206 	}
207 	spin_unlock_irqrestore(&dev->managed.lock, flags);
208 
209 	if (WARN_ON(!dr_match))
210 		return;
211 
212 	action(dev, data);
213 
214 	free_dr(dr_match);
215 }
216 EXPORT_SYMBOL(drmm_release_action);
217 
218 /**
219  * drmm_kmalloc - &drm_device managed kmalloc()
220  * @dev: DRM device
221  * @size: size of the memory allocation
222  * @gfp: GFP allocation flags
223  *
224  * This is a &drm_device managed version of kmalloc(). The allocated memory is
225  * automatically freed on the final drm_dev_put(). Memory can also be freed
226  * before the final drm_dev_put() by calling drmm_kfree().
227  */
drmm_kmalloc(struct drm_device * dev,size_t size,gfp_t gfp)228 void *drmm_kmalloc(struct drm_device *dev, size_t size, gfp_t gfp)
229 {
230 	struct drmres *dr;
231 
232 	dr = alloc_dr(NULL, size, gfp, dev_to_node(dev->dev));
233 	if (!dr) {
234 		drm_dbg_drmres(dev, "failed to allocate %zu bytes, %u flags\n",
235 			       size, gfp);
236 		return NULL;
237 	}
238 	dr->node.name = kstrdup_const("kmalloc", gfp);
239 
240 	add_dr(dev, dr);
241 
242 	return dr->data;
243 }
244 EXPORT_SYMBOL(drmm_kmalloc);
245 
246 /**
247  * drmm_kstrdup - &drm_device managed kstrdup()
248  * @dev: DRM device
249  * @s: 0-terminated string to be duplicated
250  * @gfp: GFP allocation flags
251  *
252  * This is a &drm_device managed version of kstrdup(). The allocated memory is
253  * automatically freed on the final drm_dev_put() and works exactly like a
254  * memory allocation obtained by drmm_kmalloc().
255  */
drmm_kstrdup(struct drm_device * dev,const char * s,gfp_t gfp)256 char *drmm_kstrdup(struct drm_device *dev, const char *s, gfp_t gfp)
257 {
258 	size_t size;
259 	char *buf;
260 
261 	if (!s)
262 		return NULL;
263 
264 	size = strlen(s) + 1;
265 	buf = drmm_kmalloc(dev, size, gfp);
266 	if (buf)
267 		memcpy(buf, s, size);
268 	return buf;
269 }
270 EXPORT_SYMBOL_GPL(drmm_kstrdup);
271 
272 /**
273  * drmm_kfree - &drm_device managed kfree()
274  * @dev: DRM device
275  * @data: memory allocation to be freed
276  *
277  * This is a &drm_device managed version of kfree() which can be used to
278  * release memory allocated through drmm_kmalloc() or any of its related
279  * functions before the final drm_dev_put() of @dev.
280  */
drmm_kfree(struct drm_device * dev,void * data)281 void drmm_kfree(struct drm_device *dev, void *data)
282 {
283 	struct drmres *dr_match = NULL, *dr;
284 	unsigned long flags;
285 
286 	if (!data)
287 		return;
288 
289 	spin_lock_irqsave(&dev->managed.lock, flags);
290 	list_for_each_entry(dr, &dev->managed.resources, node.entry) {
291 		if (dr->data == data) {
292 			dr_match = dr;
293 			del_dr(dev, dr_match);
294 			break;
295 		}
296 	}
297 	spin_unlock_irqrestore(&dev->managed.lock, flags);
298 
299 	if (WARN_ON(!dr_match))
300 		return;
301 
302 	free_dr(dr_match);
303 }
304 EXPORT_SYMBOL(drmm_kfree);
305 
__drmm_mutex_release(struct drm_device * dev,void * res)306 void __drmm_mutex_release(struct drm_device *dev, void *res)
307 {
308 	struct mutex *lock = res;
309 
310 	mutex_destroy(lock);
311 }
312 EXPORT_SYMBOL(__drmm_mutex_release);
313