xref: /linux/drivers/gpu/drm/msm/msm_gem_vma.c (revision 2ee4b5d2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2016 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #include "msm_drv.h"
8 #include "msm_gem.h"
9 #include "msm_mmu.h"
10 
11 static void
12 msm_gem_address_space_destroy(struct kref *kref)
13 {
14 	struct msm_gem_address_space *aspace = container_of(kref,
15 			struct msm_gem_address_space, kref);
16 
17 	drm_mm_takedown(&aspace->mm);
18 	if (aspace->mmu)
19 		aspace->mmu->funcs->destroy(aspace->mmu);
20 	put_pid(aspace->pid);
21 	kfree(aspace);
22 }
23 
24 
25 void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
26 {
27 	if (aspace)
28 		kref_put(&aspace->kref, msm_gem_address_space_destroy);
29 }
30 
31 struct msm_gem_address_space *
32 msm_gem_address_space_get(struct msm_gem_address_space *aspace)
33 {
34 	if (!IS_ERR_OR_NULL(aspace))
35 		kref_get(&aspace->kref);
36 
37 	return aspace;
38 }
39 
40 bool msm_gem_vma_inuse(struct msm_gem_vma *vma)
41 {
42 	return !!vma->inuse;
43 }
44 
45 /* Actually unmap memory for the vma */
46 void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
47 		struct msm_gem_vma *vma)
48 {
49 	unsigned size = vma->node.size;
50 
51 	/* Print a message if we try to purge a vma in use */
52 	if (GEM_WARN_ON(msm_gem_vma_inuse(vma)))
53 		return;
54 
55 	/* Don't do anything if the memory isn't mapped */
56 	if (!vma->mapped)
57 		return;
58 
59 	if (aspace->mmu)
60 		aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
61 
62 	vma->mapped = false;
63 }
64 
65 /* Remove reference counts for the mapping */
66 void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
67 		struct msm_gem_vma *vma)
68 {
69 	if (!GEM_WARN_ON(!vma->iova))
70 		vma->inuse--;
71 }
72 
73 int
74 msm_gem_map_vma(struct msm_gem_address_space *aspace,
75 		struct msm_gem_vma *vma, int prot,
76 		struct sg_table *sgt, int size)
77 {
78 	int ret = 0;
79 
80 	if (GEM_WARN_ON(!vma->iova))
81 		return -EINVAL;
82 
83 	/* Increase the usage counter */
84 	vma->inuse++;
85 
86 	if (vma->mapped)
87 		return 0;
88 
89 	vma->mapped = true;
90 
91 	if (aspace && aspace->mmu)
92 		ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
93 				size, prot);
94 
95 	if (ret) {
96 		vma->mapped = false;
97 		vma->inuse--;
98 	}
99 
100 	return ret;
101 }
102 
103 /* Close an iova.  Warn if it is still in use */
104 void msm_gem_close_vma(struct msm_gem_address_space *aspace,
105 		struct msm_gem_vma *vma)
106 {
107 	if (GEM_WARN_ON(msm_gem_vma_inuse(vma) || vma->mapped))
108 		return;
109 
110 	spin_lock(&aspace->lock);
111 	if (vma->iova)
112 		drm_mm_remove_node(&vma->node);
113 	spin_unlock(&aspace->lock);
114 
115 	vma->iova = 0;
116 
117 	msm_gem_address_space_put(aspace);
118 }
119 
120 /* Initialize a new vma and allocate an iova for it */
121 int msm_gem_init_vma(struct msm_gem_address_space *aspace,
122 		struct msm_gem_vma *vma, int size,
123 		u64 range_start, u64 range_end)
124 {
125 	int ret;
126 
127 	if (GEM_WARN_ON(vma->iova))
128 		return -EBUSY;
129 
130 	spin_lock(&aspace->lock);
131 	ret = drm_mm_insert_node_in_range(&aspace->mm, &vma->node,
132 					  size, PAGE_SIZE, 0,
133 					  range_start, range_end, 0);
134 	spin_unlock(&aspace->lock);
135 
136 	if (ret)
137 		return ret;
138 
139 	vma->iova = vma->node.start;
140 	vma->mapped = false;
141 
142 	kref_get(&aspace->kref);
143 
144 	return 0;
145 }
146 
147 struct msm_gem_address_space *
148 msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
149 		u64 va_start, u64 size)
150 {
151 	struct msm_gem_address_space *aspace;
152 
153 	if (IS_ERR(mmu))
154 		return ERR_CAST(mmu);
155 
156 	aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
157 	if (!aspace)
158 		return ERR_PTR(-ENOMEM);
159 
160 	spin_lock_init(&aspace->lock);
161 	aspace->name = name;
162 	aspace->mmu = mmu;
163 
164 	drm_mm_init(&aspace->mm, va_start, size);
165 
166 	kref_init(&aspace->kref);
167 
168 	return aspace;
169 }
170