1*677dec6eSriastradh /*	$NetBSD: drm_vma_manager.h,v 1.3 2021/12/18 23:45:46 riastradh Exp $	*/
2d350ecf5Sriastradh 
3cb459498Sriastradh #ifndef __DRM_VMA_MANAGER_H__
4cb459498Sriastradh #define __DRM_VMA_MANAGER_H__
5cb459498Sriastradh 
6cb459498Sriastradh /*
7cb459498Sriastradh  * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
8cb459498Sriastradh  *
9cb459498Sriastradh  * Permission is hereby granted, free of charge, to any person obtaining a
10cb459498Sriastradh  * copy of this software and associated documentation files (the "Software"),
11cb459498Sriastradh  * to deal in the Software without restriction, including without limitation
12cb459498Sriastradh  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13cb459498Sriastradh  * and/or sell copies of the Software, and to permit persons to whom the
14cb459498Sriastradh  * Software is furnished to do so, subject to the following conditions:
15cb459498Sriastradh  *
16cb459498Sriastradh  * The above copyright notice and this permission notice shall be included in
17cb459498Sriastradh  * all copies or substantial portions of the Software.
18cb459498Sriastradh  *
19cb459498Sriastradh  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20cb459498Sriastradh  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21cb459498Sriastradh  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22cb459498Sriastradh  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23cb459498Sriastradh  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24cb459498Sriastradh  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25cb459498Sriastradh  * OTHER DEALINGS IN THE SOFTWARE.
26cb459498Sriastradh  */
27cb459498Sriastradh 
28cb459498Sriastradh #include <drm/drm_mm.h>
29cb459498Sriastradh #include <linux/mm.h>
30cb459498Sriastradh #include <linux/rbtree.h>
31cb459498Sriastradh #include <linux/spinlock.h>
32cb459498Sriastradh #include <linux/types.h>
33cb459498Sriastradh 
34*677dec6eSriastradh /* We make up offsets for buffer objects so we can recognize them at
35*677dec6eSriastradh  * mmap time. pgoff in mmap is an unsigned long, so we need to make sure
36*677dec6eSriastradh  * that the faked up offset will fit
37*677dec6eSriastradh  */
38*677dec6eSriastradh #if BITS_PER_LONG == 64
39*677dec6eSriastradh #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
40*677dec6eSriastradh #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 256)
41*677dec6eSriastradh #else
42*677dec6eSriastradh #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
43*677dec6eSriastradh #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
44*677dec6eSriastradh #endif
45*677dec6eSriastradh 
46*677dec6eSriastradh struct drm_file;
47*677dec6eSriastradh 
48cb459498Sriastradh struct drm_vma_offset_file {
49cb459498Sriastradh 	struct rb_node vm_rb;
50*677dec6eSriastradh 	struct drm_file *vm_tag;
51cb459498Sriastradh 	unsigned long vm_count;
52cb459498Sriastradh };
53cb459498Sriastradh 
54cb459498Sriastradh struct drm_vma_offset_node {
55cb459498Sriastradh 	rwlock_t vm_lock;
56cb459498Sriastradh 	struct drm_mm_node vm_node;
57cb459498Sriastradh 	struct rb_root vm_files;
58*677dec6eSriastradh 	bool readonly:1;
59cb459498Sriastradh };
60cb459498Sriastradh 
61cb459498Sriastradh struct drm_vma_offset_manager {
62cb459498Sriastradh 	rwlock_t vm_lock;
63cb459498Sriastradh 	struct drm_mm vm_addr_space_mm;
64cb459498Sriastradh };
65cb459498Sriastradh 
66cb459498Sriastradh void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
67cb459498Sriastradh 				 unsigned long page_offset, unsigned long size);
68cb459498Sriastradh void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr);
69cb459498Sriastradh 
70cb459498Sriastradh struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
71cb459498Sriastradh 							   unsigned long start,
72cb459498Sriastradh 							   unsigned long pages);
73cb459498Sriastradh int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
74cb459498Sriastradh 		       struct drm_vma_offset_node *node, unsigned long pages);
75cb459498Sriastradh void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
76cb459498Sriastradh 			   struct drm_vma_offset_node *node);
77cb459498Sriastradh 
78*677dec6eSriastradh int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag);
79*677dec6eSriastradh void drm_vma_node_revoke(struct drm_vma_offset_node *node,
80*677dec6eSriastradh 			 struct drm_file *tag);
81cb459498Sriastradh bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
82*677dec6eSriastradh 			     struct drm_file *tag);
83cb459498Sriastradh 
84cb459498Sriastradh /**
85d350ecf5Sriastradh  * drm_vma_offset_exact_lookup_locked() - Look up node by exact address
86cb459498Sriastradh  * @mgr: Manager object
87cb459498Sriastradh  * @start: Start address (page-based, not byte-based)
88cb459498Sriastradh  * @pages: Size of object (page-based)
89cb459498Sriastradh  *
90d350ecf5Sriastradh  * Same as drm_vma_offset_lookup_locked() but does not allow any offset into the node.
91cb459498Sriastradh  * It only returns the exact object with the given start address.
92cb459498Sriastradh  *
93cb459498Sriastradh  * RETURNS:
94cb459498Sriastradh  * Node at exact start address @start.
95cb459498Sriastradh  */
96cb459498Sriastradh static inline struct drm_vma_offset_node *
drm_vma_offset_exact_lookup_locked(struct drm_vma_offset_manager * mgr,unsigned long start,unsigned long pages)97d350ecf5Sriastradh drm_vma_offset_exact_lookup_locked(struct drm_vma_offset_manager *mgr,
98cb459498Sriastradh 				   unsigned long start,
99cb459498Sriastradh 				   unsigned long pages)
100cb459498Sriastradh {
101cb459498Sriastradh 	struct drm_vma_offset_node *node;
102cb459498Sriastradh 
103d350ecf5Sriastradh 	node = drm_vma_offset_lookup_locked(mgr, start, pages);
104cb459498Sriastradh 	return (node && node->vm_node.start == start) ? node : NULL;
105cb459498Sriastradh }
106cb459498Sriastradh 
107cb459498Sriastradh /**
108cb459498Sriastradh  * drm_vma_offset_lock_lookup() - Lock lookup for extended private use
109cb459498Sriastradh  * @mgr: Manager object
110cb459498Sriastradh  *
111d350ecf5Sriastradh  * Lock VMA manager for extended lookups. Only locked VMA function calls
112cb459498Sriastradh  * are allowed while holding this lock. All other contexts are blocked from VMA
113cb459498Sriastradh  * until the lock is released via drm_vma_offset_unlock_lookup().
114cb459498Sriastradh  *
115cb459498Sriastradh  * Use this if you need to take a reference to the objects returned by
116cb459498Sriastradh  * drm_vma_offset_lookup_locked() before releasing this lock again.
117cb459498Sriastradh  *
118cb459498Sriastradh  * This lock must not be used for anything else than extended lookups. You must
119cb459498Sriastradh  * not call any other VMA helpers while holding this lock.
120cb459498Sriastradh  *
121cb459498Sriastradh  * Note: You're in atomic-context while holding this lock!
122cb459498Sriastradh  */
drm_vma_offset_lock_lookup(struct drm_vma_offset_manager * mgr)123cb459498Sriastradh static inline void drm_vma_offset_lock_lookup(struct drm_vma_offset_manager *mgr)
124cb459498Sriastradh {
125cb459498Sriastradh 	read_lock(&mgr->vm_lock);
126cb459498Sriastradh }
127cb459498Sriastradh 
128cb459498Sriastradh /**
129cb459498Sriastradh  * drm_vma_offset_unlock_lookup() - Unlock lookup for extended private use
130cb459498Sriastradh  * @mgr: Manager object
131cb459498Sriastradh  *
132cb459498Sriastradh  * Release lookup-lock. See drm_vma_offset_lock_lookup() for more information.
133cb459498Sriastradh  */
drm_vma_offset_unlock_lookup(struct drm_vma_offset_manager * mgr)134cb459498Sriastradh static inline void drm_vma_offset_unlock_lookup(struct drm_vma_offset_manager *mgr)
135cb459498Sriastradh {
136cb459498Sriastradh 	read_unlock(&mgr->vm_lock);
137cb459498Sriastradh }
138cb459498Sriastradh 
139cb459498Sriastradh /**
140cb459498Sriastradh  * drm_vma_node_reset() - Initialize or reset node object
141cb459498Sriastradh  * @node: Node to initialize or reset
142cb459498Sriastradh  *
143cb459498Sriastradh  * Reset a node to its initial state. This must be called before using it with
144cb459498Sriastradh  * any VMA offset manager.
145cb459498Sriastradh  *
146cb459498Sriastradh  * This must not be called on an already allocated node, or you will leak
147cb459498Sriastradh  * memory.
148cb459498Sriastradh  */
drm_vma_node_reset(struct drm_vma_offset_node * node)149cb459498Sriastradh static inline void drm_vma_node_reset(struct drm_vma_offset_node *node)
150cb459498Sriastradh {
151cb459498Sriastradh 	memset(node, 0, sizeof(*node));
152cb459498Sriastradh 	node->vm_files = RB_ROOT;
153cb459498Sriastradh 	rwlock_init(&node->vm_lock);
154cb459498Sriastradh }
155cb459498Sriastradh 
156cb459498Sriastradh /**
157cb459498Sriastradh  * drm_vma_node_start() - Return start address for page-based addressing
158cb459498Sriastradh  * @node: Node to inspect
159cb459498Sriastradh  *
160cb459498Sriastradh  * Return the start address of the given node. This can be used as offset into
161cb459498Sriastradh  * the linear VM space that is provided by the VMA offset manager. Note that
162cb459498Sriastradh  * this can only be used for page-based addressing. If you need a proper offset
163cb459498Sriastradh  * for user-space mappings, you must apply "<< PAGE_SHIFT" or use the
164cb459498Sriastradh  * drm_vma_node_offset_addr() helper instead.
165cb459498Sriastradh  *
166cb459498Sriastradh  * RETURNS:
167cb459498Sriastradh  * Start address of @node for page-based addressing. 0 if the node does not
168cb459498Sriastradh  * have an offset allocated.
169cb459498Sriastradh  */
drm_vma_node_start(const struct drm_vma_offset_node * node)170*677dec6eSriastradh static inline unsigned long drm_vma_node_start(const struct drm_vma_offset_node *node)
171cb459498Sriastradh {
172cb459498Sriastradh 	return node->vm_node.start;
173cb459498Sriastradh }
174cb459498Sriastradh 
175cb459498Sriastradh /**
176cb459498Sriastradh  * drm_vma_node_size() - Return size (page-based)
177cb459498Sriastradh  * @node: Node to inspect
178cb459498Sriastradh  *
179cb459498Sriastradh  * Return the size as number of pages for the given node. This is the same size
180cb459498Sriastradh  * that was passed to drm_vma_offset_add(). If no offset is allocated for the
181cb459498Sriastradh  * node, this is 0.
182cb459498Sriastradh  *
183cb459498Sriastradh  * RETURNS:
184cb459498Sriastradh  * Size of @node as number of pages. 0 if the node does not have an offset
185cb459498Sriastradh  * allocated.
186cb459498Sriastradh  */
drm_vma_node_size(struct drm_vma_offset_node * node)187cb459498Sriastradh static inline unsigned long drm_vma_node_size(struct drm_vma_offset_node *node)
188cb459498Sriastradh {
189cb459498Sriastradh 	return node->vm_node.size;
190cb459498Sriastradh }
191cb459498Sriastradh 
192cb459498Sriastradh /**
193cb459498Sriastradh  * drm_vma_node_offset_addr() - Return sanitized offset for user-space mmaps
194cb459498Sriastradh  * @node: Linked offset node
195cb459498Sriastradh  *
196cb459498Sriastradh  * Same as drm_vma_node_start() but returns the address as a valid offset that
197cb459498Sriastradh  * can be used for user-space mappings during mmap().
198cb459498Sriastradh  * This must not be called on unlinked nodes.
199cb459498Sriastradh  *
200cb459498Sriastradh  * RETURNS:
201cb459498Sriastradh  * Offset of @node for byte-based addressing. 0 if the node does not have an
202cb459498Sriastradh  * object allocated.
203cb459498Sriastradh  */
drm_vma_node_offset_addr(struct drm_vma_offset_node * node)204cb459498Sriastradh static inline __u64 drm_vma_node_offset_addr(struct drm_vma_offset_node *node)
205cb459498Sriastradh {
206cb459498Sriastradh 	return ((__u64)node->vm_node.start) << PAGE_SHIFT;
207cb459498Sriastradh }
208cb459498Sriastradh 
209cb459498Sriastradh /**
210cb459498Sriastradh  * drm_vma_node_unmap() - Unmap offset node
211cb459498Sriastradh  * @node: Offset node
212cb459498Sriastradh  * @file_mapping: Address space to unmap @node from
213cb459498Sriastradh  *
214cb459498Sriastradh  * Unmap all userspace mappings for a given offset node. The mappings must be
215cb459498Sriastradh  * associated with the @file_mapping address-space. If no offset exists
216cb459498Sriastradh  * nothing is done.
217cb459498Sriastradh  *
218cb459498Sriastradh  * This call is unlocked. The caller must guarantee that drm_vma_offset_remove()
219cb459498Sriastradh  * is not called on this node concurrently.
220cb459498Sriastradh  */
drm_vma_node_unmap(struct drm_vma_offset_node * node,struct address_space * file_mapping)221cb459498Sriastradh static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node,
222cb459498Sriastradh 				      struct address_space *file_mapping)
223cb459498Sriastradh {
224*677dec6eSriastradh 	if (drm_mm_node_allocated(&node->vm_node))
225cb459498Sriastradh 		unmap_mapping_range(file_mapping,
226cb459498Sriastradh 				    drm_vma_node_offset_addr(node),
227cb459498Sriastradh 				    drm_vma_node_size(node) << PAGE_SHIFT, 1);
228cb459498Sriastradh }
229cb459498Sriastradh 
230cb459498Sriastradh /**
231cb459498Sriastradh  * drm_vma_node_verify_access() - Access verification helper for TTM
232cb459498Sriastradh  * @node: Offset node
233*677dec6eSriastradh  * @tag: Tag of file to check
234cb459498Sriastradh  *
235*677dec6eSriastradh  * This checks whether @tag is granted access to @node. It is the same as
236cb459498Sriastradh  * drm_vma_node_is_allowed() but suitable as drop-in helper for TTM
237cb459498Sriastradh  * verify_access() callbacks.
238cb459498Sriastradh  *
239cb459498Sriastradh  * RETURNS:
240cb459498Sriastradh  * 0 if access is granted, -EACCES otherwise.
241cb459498Sriastradh  */
drm_vma_node_verify_access(struct drm_vma_offset_node * node,struct drm_file * tag)242cb459498Sriastradh static inline int drm_vma_node_verify_access(struct drm_vma_offset_node *node,
243*677dec6eSriastradh 					     struct drm_file *tag)
244cb459498Sriastradh {
245*677dec6eSriastradh 	return drm_vma_node_is_allowed(node, tag) ? 0 : -EACCES;
246cb459498Sriastradh }
247cb459498Sriastradh 
248cb459498Sriastradh #endif /* __DRM_VMA_MANAGER_H__ */
249