1fe3078faSDavid Herrmann #ifndef __DRM_VMA_MANAGER_H__
2fe3078faSDavid Herrmann #define __DRM_VMA_MANAGER_H__
3fe3078faSDavid Herrmann
4fe3078faSDavid Herrmann /*
5fe3078faSDavid Herrmann * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
6fe3078faSDavid Herrmann *
7fe3078faSDavid Herrmann * Permission is hereby granted, free of charge, to any person obtaining a
8fe3078faSDavid Herrmann * copy of this software and associated documentation files (the "Software"),
9fe3078faSDavid Herrmann * to deal in the Software without restriction, including without limitation
10fe3078faSDavid Herrmann * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11fe3078faSDavid Herrmann * and/or sell copies of the Software, and to permit persons to whom the
12fe3078faSDavid Herrmann * Software is furnished to do so, subject to the following conditions:
13fe3078faSDavid Herrmann *
14fe3078faSDavid Herrmann * The above copyright notice and this permission notice shall be included in
15fe3078faSDavid Herrmann * all copies or substantial portions of the Software.
16fe3078faSDavid Herrmann *
17fe3078faSDavid Herrmann * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18fe3078faSDavid Herrmann * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19fe3078faSDavid Herrmann * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20fe3078faSDavid Herrmann * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
21fe3078faSDavid Herrmann * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22fe3078faSDavid Herrmann * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23fe3078faSDavid Herrmann * OTHER DEALINGS IN THE SOFTWARE.
24fe3078faSDavid Herrmann */
25fe3078faSDavid Herrmann
26fe3078faSDavid Herrmann #include <drm/drm_mm.h>
2751335df9SDavid Herrmann #include <linux/mm.h>
28fe3078faSDavid Herrmann #include <linux/rbtree.h>
29fe3078faSDavid Herrmann #include <linux/spinlock.h>
30fe3078faSDavid Herrmann #include <linux/types.h>
31fe3078faSDavid Herrmann
32bf141a88SThomas Zimmermann /* We make up offsets for buffer objects so we can recognize them at
33bf141a88SThomas Zimmermann * mmap time. pgoff in mmap is an unsigned long, so we need to make sure
34bf141a88SThomas Zimmermann * that the faked up offset will fit
35bf141a88SThomas Zimmermann */
36bf141a88SThomas Zimmermann #if BITS_PER_LONG == 64
37bf141a88SThomas Zimmermann #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
386b622063SPhilip Yang #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 256)
39bf141a88SThomas Zimmermann #else
40bf141a88SThomas Zimmermann #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
41bf141a88SThomas Zimmermann #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
42bf141a88SThomas Zimmermann #endif
43bf141a88SThomas Zimmermann
44d9a1f0b4SDavid Herrmann struct drm_file;
45d9a1f0b4SDavid Herrmann
4688d7ebe5SDavid Herrmann struct drm_vma_offset_file {
4788d7ebe5SDavid Herrmann struct rb_node vm_rb;
48d9a1f0b4SDavid Herrmann struct drm_file *vm_tag;
4988d7ebe5SDavid Herrmann unsigned long vm_count;
5088d7ebe5SDavid Herrmann };
5188d7ebe5SDavid Herrmann
52fe3078faSDavid Herrmann struct drm_vma_offset_node {
5388d7ebe5SDavid Herrmann rwlock_t vm_lock;
54fe3078faSDavid Herrmann struct drm_mm_node vm_node;
5588d7ebe5SDavid Herrmann struct rb_root vm_files;
56f425821bSMaarten Lankhorst void *driver_private;
57fe3078faSDavid Herrmann };
58fe3078faSDavid Herrmann
59fe3078faSDavid Herrmann struct drm_vma_offset_manager {
60fe3078faSDavid Herrmann rwlock_t vm_lock;
61fe3078faSDavid Herrmann struct drm_mm vm_addr_space_mm;
62fe3078faSDavid Herrmann };
63fe3078faSDavid Herrmann
64fe3078faSDavid Herrmann void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
65fe3078faSDavid Herrmann unsigned long page_offset, unsigned long size);
66fe3078faSDavid Herrmann void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr);
67fe3078faSDavid Herrmann
68fe3078faSDavid Herrmann struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
69fe3078faSDavid Herrmann unsigned long start,
70fe3078faSDavid Herrmann unsigned long pages);
71fe3078faSDavid Herrmann int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
72fe3078faSDavid Herrmann struct drm_vma_offset_node *node, unsigned long pages);
73fe3078faSDavid Herrmann void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
74fe3078faSDavid Herrmann struct drm_vma_offset_node *node);
75fe3078faSDavid Herrmann
76d9a1f0b4SDavid Herrmann int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag);
77*899d3a3cSNirmoy Das int drm_vma_node_allow_once(struct drm_vma_offset_node *node, struct drm_file *tag);
78d9a1f0b4SDavid Herrmann void drm_vma_node_revoke(struct drm_vma_offset_node *node,
79d9a1f0b4SDavid Herrmann struct drm_file *tag);
8088d7ebe5SDavid Herrmann bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
81d9a1f0b4SDavid Herrmann struct drm_file *tag);
8288d7ebe5SDavid Herrmann
83fe3078faSDavid Herrmann /**
842225cfe4SDaniel Vetter * drm_vma_offset_exact_lookup_locked() - Look up node by exact address
85fe3078faSDavid Herrmann * @mgr: Manager object
86fe3078faSDavid Herrmann * @start: Start address (page-based, not byte-based)
87fe3078faSDavid Herrmann * @pages: Size of object (page-based)
88fe3078faSDavid Herrmann *
892225cfe4SDaniel Vetter * Same as drm_vma_offset_lookup_locked() but does not allow any offset into the node.
90fe3078faSDavid Herrmann * It only returns the exact object with the given start address.
91fe3078faSDavid Herrmann *
92fe3078faSDavid Herrmann * RETURNS:
93fe3078faSDavid Herrmann * Node at exact start address @start.
94fe3078faSDavid Herrmann */
95fe3078faSDavid Herrmann static inline struct drm_vma_offset_node *
drm_vma_offset_exact_lookup_locked(struct drm_vma_offset_manager * mgr,unsigned long start,unsigned long pages)962225cfe4SDaniel Vetter drm_vma_offset_exact_lookup_locked(struct drm_vma_offset_manager *mgr,
97fe3078faSDavid Herrmann unsigned long start,
98fe3078faSDavid Herrmann unsigned long pages)
99fe3078faSDavid Herrmann {
100fe3078faSDavid Herrmann struct drm_vma_offset_node *node;
101fe3078faSDavid Herrmann
1022225cfe4SDaniel Vetter node = drm_vma_offset_lookup_locked(mgr, start, pages);
103fe3078faSDavid Herrmann return (node && node->vm_node.start == start) ? node : NULL;
104fe3078faSDavid Herrmann }
105fe3078faSDavid Herrmann
106fe3078faSDavid Herrmann /**
107fe3078faSDavid Herrmann * drm_vma_offset_lock_lookup() - Lock lookup for extended private use
108fe3078faSDavid Herrmann * @mgr: Manager object
109fe3078faSDavid Herrmann *
110f71a6d60SLukas Wunner * Lock VMA manager for extended lookups. Only locked VMA function calls
111fe3078faSDavid Herrmann * are allowed while holding this lock. All other contexts are blocked from VMA
112fe3078faSDavid Herrmann * until the lock is released via drm_vma_offset_unlock_lookup().
113fe3078faSDavid Herrmann *
114fe3078faSDavid Herrmann * Use this if you need to take a reference to the objects returned by
115fe3078faSDavid Herrmann * drm_vma_offset_lookup_locked() before releasing this lock again.
116fe3078faSDavid Herrmann *
117fe3078faSDavid Herrmann * This lock must not be used for anything else than extended lookups. You must
118fe3078faSDavid Herrmann * not call any other VMA helpers while holding this lock.
119fe3078faSDavid Herrmann *
120fe3078faSDavid Herrmann * Note: You're in atomic-context while holding this lock!
121fe3078faSDavid Herrmann */
drm_vma_offset_lock_lookup(struct drm_vma_offset_manager * mgr)122fe3078faSDavid Herrmann static inline void drm_vma_offset_lock_lookup(struct drm_vma_offset_manager *mgr)
123fe3078faSDavid Herrmann {
124fe3078faSDavid Herrmann read_lock(&mgr->vm_lock);
125fe3078faSDavid Herrmann }
126fe3078faSDavid Herrmann
127fe3078faSDavid Herrmann /**
128fe3078faSDavid Herrmann * drm_vma_offset_unlock_lookup() - Unlock lookup for extended private use
129fe3078faSDavid Herrmann * @mgr: Manager object
130fe3078faSDavid Herrmann *
131fe3078faSDavid Herrmann * Release lookup-lock. See drm_vma_offset_lock_lookup() for more information.
132fe3078faSDavid Herrmann */
drm_vma_offset_unlock_lookup(struct drm_vma_offset_manager * mgr)133fe3078faSDavid Herrmann static inline void drm_vma_offset_unlock_lookup(struct drm_vma_offset_manager *mgr)
134fe3078faSDavid Herrmann {
135fe3078faSDavid Herrmann read_unlock(&mgr->vm_lock);
136fe3078faSDavid Herrmann }
137fe3078faSDavid Herrmann
138fe3078faSDavid Herrmann /**
139fe3078faSDavid Herrmann * drm_vma_node_reset() - Initialize or reset node object
140fe3078faSDavid Herrmann * @node: Node to initialize or reset
141fe3078faSDavid Herrmann *
14288d7ebe5SDavid Herrmann * Reset a node to its initial state. This must be called before using it with
14388d7ebe5SDavid Herrmann * any VMA offset manager.
144fe3078faSDavid Herrmann *
145fe3078faSDavid Herrmann * This must not be called on an already allocated node, or you will leak
146fe3078faSDavid Herrmann * memory.
147fe3078faSDavid Herrmann */
drm_vma_node_reset(struct drm_vma_offset_node * node)148fe3078faSDavid Herrmann static inline void drm_vma_node_reset(struct drm_vma_offset_node *node)
149fe3078faSDavid Herrmann {
150fe3078faSDavid Herrmann memset(node, 0, sizeof(*node));
15188d7ebe5SDavid Herrmann node->vm_files = RB_ROOT;
15288d7ebe5SDavid Herrmann rwlock_init(&node->vm_lock);
153fe3078faSDavid Herrmann }
154fe3078faSDavid Herrmann
155fe3078faSDavid Herrmann /**
156fe3078faSDavid Herrmann * drm_vma_node_start() - Return start address for page-based addressing
157fe3078faSDavid Herrmann * @node: Node to inspect
158fe3078faSDavid Herrmann *
159fe3078faSDavid Herrmann * Return the start address of the given node. This can be used as offset into
160fe3078faSDavid Herrmann * the linear VM space that is provided by the VMA offset manager. Note that
161fe3078faSDavid Herrmann * this can only be used for page-based addressing. If you need a proper offset
162fe3078faSDavid Herrmann * for user-space mappings, you must apply "<< PAGE_SHIFT" or use the
163fe3078faSDavid Herrmann * drm_vma_node_offset_addr() helper instead.
164fe3078faSDavid Herrmann *
165fe3078faSDavid Herrmann * RETURNS:
166fe3078faSDavid Herrmann * Start address of @node for page-based addressing. 0 if the node does not
167fe3078faSDavid Herrmann * have an offset allocated.
168fe3078faSDavid Herrmann */
drm_vma_node_start(const struct drm_vma_offset_node * node)169bf38b055SNoralf Trønnes static inline unsigned long drm_vma_node_start(const struct drm_vma_offset_node *node)
170fe3078faSDavid Herrmann {
171fe3078faSDavid Herrmann return node->vm_node.start;
172fe3078faSDavid Herrmann }
173fe3078faSDavid Herrmann
174fe3078faSDavid Herrmann /**
175fe3078faSDavid Herrmann * drm_vma_node_size() - Return size (page-based)
176fe3078faSDavid Herrmann * @node: Node to inspect
177fe3078faSDavid Herrmann *
178fe3078faSDavid Herrmann * Return the size as number of pages for the given node. This is the same size
179fe3078faSDavid Herrmann * that was passed to drm_vma_offset_add(). If no offset is allocated for the
180fe3078faSDavid Herrmann * node, this is 0.
181fe3078faSDavid Herrmann *
182fe3078faSDavid Herrmann * RETURNS:
183fe3078faSDavid Herrmann * Size of @node as number of pages. 0 if the node does not have an offset
184fe3078faSDavid Herrmann * allocated.
185fe3078faSDavid Herrmann */
drm_vma_node_size(struct drm_vma_offset_node * node)186fe3078faSDavid Herrmann static inline unsigned long drm_vma_node_size(struct drm_vma_offset_node *node)
187fe3078faSDavid Herrmann {
188fe3078faSDavid Herrmann return node->vm_node.size;
189fe3078faSDavid Herrmann }
190fe3078faSDavid Herrmann
191fe3078faSDavid Herrmann /**
192fe3078faSDavid Herrmann * drm_vma_node_offset_addr() - Return sanitized offset for user-space mmaps
193fe3078faSDavid Herrmann * @node: Linked offset node
194fe3078faSDavid Herrmann *
195fe3078faSDavid Herrmann * Same as drm_vma_node_start() but returns the address as a valid offset that
196fe3078faSDavid Herrmann * can be used for user-space mappings during mmap().
197fe3078faSDavid Herrmann * This must not be called on unlinked nodes.
198fe3078faSDavid Herrmann *
199fe3078faSDavid Herrmann * RETURNS:
200fe3078faSDavid Herrmann * Offset of @node for byte-based addressing. 0 if the node does not have an
201fe3078faSDavid Herrmann * object allocated.
202fe3078faSDavid Herrmann */
drm_vma_node_offset_addr(struct drm_vma_offset_node * node)203fe3078faSDavid Herrmann static inline __u64 drm_vma_node_offset_addr(struct drm_vma_offset_node *node)
204fe3078faSDavid Herrmann {
205fe3078faSDavid Herrmann return ((__u64)node->vm_node.start) << PAGE_SHIFT;
206fe3078faSDavid Herrmann }
207fe3078faSDavid Herrmann
20851335df9SDavid Herrmann /**
20951335df9SDavid Herrmann * drm_vma_node_unmap() - Unmap offset node
21051335df9SDavid Herrmann * @node: Offset node
21151335df9SDavid Herrmann * @file_mapping: Address space to unmap @node from
21251335df9SDavid Herrmann *
21351335df9SDavid Herrmann * Unmap all userspace mappings for a given offset node. The mappings must be
21444d847b7SDavid Herrmann * associated with the @file_mapping address-space. If no offset exists
21544d847b7SDavid Herrmann * nothing is done.
21651335df9SDavid Herrmann *
21751335df9SDavid Herrmann * This call is unlocked. The caller must guarantee that drm_vma_offset_remove()
21851335df9SDavid Herrmann * is not called on this node concurrently.
21951335df9SDavid Herrmann */
drm_vma_node_unmap(struct drm_vma_offset_node * node,struct address_space * file_mapping)22051335df9SDavid Herrmann static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node,
22151335df9SDavid Herrmann struct address_space *file_mapping)
22251335df9SDavid Herrmann {
223f74418a4SDaniel Vetter if (drm_mm_node_allocated(&node->vm_node))
22451335df9SDavid Herrmann unmap_mapping_range(file_mapping,
22551335df9SDavid Herrmann drm_vma_node_offset_addr(node),
22651335df9SDavid Herrmann drm_vma_node_size(node) << PAGE_SHIFT, 1);
22751335df9SDavid Herrmann }
22851335df9SDavid Herrmann
22988d7ebe5SDavid Herrmann /**
23088d7ebe5SDavid Herrmann * drm_vma_node_verify_access() - Access verification helper for TTM
23188d7ebe5SDavid Herrmann * @node: Offset node
232d9a1f0b4SDavid Herrmann * @tag: Tag of file to check
23388d7ebe5SDavid Herrmann *
234d9a1f0b4SDavid Herrmann * This checks whether @tag is granted access to @node. It is the same as
23588d7ebe5SDavid Herrmann * drm_vma_node_is_allowed() but suitable as drop-in helper for TTM
23688d7ebe5SDavid Herrmann * verify_access() callbacks.
23788d7ebe5SDavid Herrmann *
23888d7ebe5SDavid Herrmann * RETURNS:
23988d7ebe5SDavid Herrmann * 0 if access is granted, -EACCES otherwise.
24088d7ebe5SDavid Herrmann */
drm_vma_node_verify_access(struct drm_vma_offset_node * node,struct drm_file * tag)24188d7ebe5SDavid Herrmann static inline int drm_vma_node_verify_access(struct drm_vma_offset_node *node,
242d9a1f0b4SDavid Herrmann struct drm_file *tag)
24388d7ebe5SDavid Herrmann {
244d9a1f0b4SDavid Herrmann return drm_vma_node_is_allowed(node, tag) ? 0 : -EACCES;
24588d7ebe5SDavid Herrmann }
24688d7ebe5SDavid Herrmann
247fe3078faSDavid Herrmann #endif /* __DRM_VMA_MANAGER_H__ */
248