1 /* $OpenBSD: drm_gem_dma_helper.c,v 1.4 2024/11/10 06:51:59 jsg Exp $ */
2 /* $NetBSD: drm_gem_dma_helper.c,v 1.9 2019/11/05 23:29:28 jmcneill Exp $ */
3 /*-
4 * Copyright (c) 2015-2017 Jared McNeill <jmcneill@invisible.ca>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/param.h>
30 #include <linux/iosys-map.h>
31
32 #include <drm/drm_device.h>
33 #include <drm/drm_gem_dma_helper.h>
34
35 #include <uvm/uvm_extern.h>
36
37 static const struct drm_gem_object_funcs drm_gem_dma_default_funcs = {
38 .free = drm_gem_dma_free_object,
39 .get_sg_table = drm_gem_dma_get_sg_table,
40 .vmap = drm_gem_dma_vmap,
41 // .mmap = drm_gem_dma_mmap,
42 };
43
44 static struct drm_gem_dma_object *
drm_gem_dma_create_internal(struct drm_device * ddev,size_t size,struct sg_table * sgt)45 drm_gem_dma_create_internal(struct drm_device *ddev, size_t size,
46 struct sg_table *sgt)
47 {
48 struct drm_gem_dma_object *obj;
49 int error, nsegs;
50
51 obj = malloc(sizeof(*obj), M_DRM, M_WAITOK | M_ZERO);
52 obj->dmat = ddev->dmat;
53 obj->dmasize = size;
54 obj->base.funcs = &drm_gem_dma_default_funcs;
55
56 if (sgt) {
57 STUB();
58 #ifdef notyet
59 error = -drm_prime_sg_to_bus_dmamem(obj->dmat, obj->dmasegs, 1,
60 &nsegs, sgt);
61 #endif
62 error = -ENOMEM;
63 } else {
64 error = bus_dmamem_alloc(obj->dmat, obj->dmasize,
65 PAGE_SIZE, 0, obj->dmasegs, 1, &nsegs,
66 BUS_DMA_WAITOK);
67 }
68 if (error)
69 goto failed;
70 error = bus_dmamem_map(obj->dmat, obj->dmasegs, nsegs,
71 obj->dmasize, &obj->vaddr,
72 BUS_DMA_WAITOK | BUS_DMA_NOCACHE);
73 if (error)
74 goto free;
75 error = bus_dmamap_create(obj->dmat, obj->dmasize, 1,
76 obj->dmasize, 0, BUS_DMA_WAITOK, &obj->dmamap);
77 if (error)
78 goto unmap;
79 error = bus_dmamap_load(obj->dmat, obj->dmamap, obj->vaddr,
80 obj->dmasize, NULL, BUS_DMA_WAITOK);
81 if (error)
82 goto destroy;
83
84 #ifdef notyet
85 if (!sgt)
86 #endif
87 memset(obj->vaddr, 0, obj->dmasize);
88
89 error = drm_gem_object_init(ddev, &obj->base, size);
90 if (error)
91 goto unload;
92
93 obj->dma_addr = obj->dmamap->dm_segs[0].ds_addr;
94 return obj;
95
96 unload:
97 bus_dmamap_unload(obj->dmat, obj->dmamap);
98 destroy:
99 bus_dmamap_destroy(obj->dmat, obj->dmamap);
100 unmap:
101 bus_dmamem_unmap(obj->dmat, obj->vaddr, obj->dmasize);
102 free:
103 #ifdef notyet
104 if (obj->sgt)
105 drm_prime_sg_free(obj->sgt);
106 else
107 #endif
108 bus_dmamem_free(obj->dmat, obj->dmasegs, nsegs);
109 failed:
110 free(obj, M_DRM, sizeof(*obj));
111
112 return NULL;
113 }
114
115 struct drm_gem_dma_object *
drm_gem_dma_create(struct drm_device * ddev,size_t size)116 drm_gem_dma_create(struct drm_device *ddev, size_t size)
117 {
118
119 return drm_gem_dma_create_internal(ddev, size, NULL);
120 }
121
122 static void
drm_gem_dma_obj_free(struct drm_gem_dma_object * obj)123 drm_gem_dma_obj_free(struct drm_gem_dma_object *obj)
124 {
125
126 bus_dmamap_unload(obj->dmat, obj->dmamap);
127 bus_dmamap_destroy(obj->dmat, obj->dmamap);
128 bus_dmamem_unmap(obj->dmat, obj->vaddr, obj->dmasize);
129 #ifdef notyet
130 if (obj->sgt)
131 drm_prime_sg_free(obj->sgt);
132 else
133 #endif
134 bus_dmamem_free(obj->dmat, obj->dmasegs, 1);
135 free(obj, M_DRM, sizeof(*obj));
136 }
137
138 void
drm_gem_dma_free_object(struct drm_gem_object * gem_obj)139 drm_gem_dma_free_object(struct drm_gem_object *gem_obj)
140 {
141 struct drm_gem_dma_object *obj = to_drm_gem_dma_obj(gem_obj);
142
143 drm_gem_free_mmap_offset(gem_obj);
144 drm_gem_object_release(gem_obj);
145 drm_gem_dma_obj_free(obj);
146 }
147
148 int
drm_gem_dma_dumb_create_internal(struct drm_file * file_priv,struct drm_device * ddev,struct drm_mode_create_dumb * args)149 drm_gem_dma_dumb_create_internal(struct drm_file *file_priv,
150 struct drm_device *ddev, struct drm_mode_create_dumb *args)
151 {
152 struct drm_gem_dma_object *obj;
153 uint32_t handle;
154 int error;
155
156 args->handle = 0;
157
158 obj = drm_gem_dma_create(ddev, args->size);
159 if (obj == NULL)
160 return -ENOMEM;
161
162 error = drm_gem_handle_create(file_priv, &obj->base, &handle);
163 drm_gem_object_put(&obj->base);
164 if (error) {
165 drm_gem_dma_obj_free(obj);
166 return error;
167 }
168
169 args->handle = handle;
170
171 return 0;
172 }
173
174 int
drm_gem_dma_dumb_create(struct drm_file * file_priv,struct drm_device * ddev,struct drm_mode_create_dumb * args)175 drm_gem_dma_dumb_create(struct drm_file *file_priv, struct drm_device *ddev,
176 struct drm_mode_create_dumb *args)
177 {
178 args->pitch = args->width * ((args->bpp + 7) / 8);
179 args->size = args->pitch * args->height;
180 args->size = roundup(args->size, PAGE_SIZE);
181
182 return drm_gem_dma_dumb_create_internal(file_priv, ddev, args);
183 }
184
185 int
drm_gem_dma_fault(struct drm_gem_object * gem_obj,struct uvm_faultinfo * ufi,off_t offset,vaddr_t vaddr,vm_page_t * pps,int npages,int centeridx,vm_prot_t access_type,int flags)186 drm_gem_dma_fault(struct drm_gem_object *gem_obj, struct uvm_faultinfo *ufi,
187 off_t offset, vaddr_t vaddr, vm_page_t *pps, int npages, int centeridx,
188 vm_prot_t access_type, int flags)
189 {
190 struct drm_gem_dma_object *obj = to_drm_gem_dma_obj(gem_obj);
191 struct uvm_object *uobj = &obj->base.uobj;
192 paddr_t paddr;
193 int lcv, retval;
194 vm_prot_t mapprot;
195
196 offset -= drm_vma_node_offset_addr(&obj->base.vma_node);
197 mapprot = ufi->entry->protection;
198
199 retval = 0;
200 for (lcv = 0; lcv < npages; lcv++, offset += PAGE_SIZE,
201 vaddr += PAGE_SIZE) {
202 if ((flags & PGO_ALLPAGES) == 0 && lcv != centeridx)
203 continue;
204
205 if (pps[lcv] == PGO_DONTCARE)
206 continue;
207
208 paddr = bus_dmamem_mmap(obj->dmat, obj->dmasegs, 1,
209 offset, access_type, BUS_DMA_NOCACHE);
210 if (paddr == -1) {
211 retval = VM_PAGER_BAD;
212 break;
213 }
214
215 if (pmap_enter(ufi->orig_map->pmap, vaddr, paddr,
216 mapprot, PMAP_CANFAIL | mapprot) != 0) {
217 pmap_update(ufi->orig_map->pmap);
218 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap,
219 uobj);
220 uvm_wait("drm_gem_dma_fault");
221 return VM_PAGER_REFAULT;
222 }
223 }
224
225 pmap_update(ufi->orig_map->pmap);
226 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
227
228 return retval;
229 }
230
231 struct sg_table *
drm_gem_dma_get_sg_table(struct drm_gem_object * gem_obj)232 drm_gem_dma_get_sg_table(struct drm_gem_object *gem_obj)
233 {
234 return NULL;
235 #ifdef notyet
236 struct drm_gem_dma_object *obj = to_drm_gem_dma_obj(gem_obj);
237
238 return drm_prime_bus_dmamem_to_sg(obj->dmat, obj->dmasegs, 1);
239 #endif
240 }
241
242 struct drm_gem_object *
drm_gem_dma_prime_import_sg_table(struct drm_device * ddev,struct dma_buf_attachment * attach,struct sg_table * sgt)243 drm_gem_dma_prime_import_sg_table(struct drm_device *ddev,
244 struct dma_buf_attachment *attach, struct sg_table *sgt)
245 {
246 return NULL;
247 #ifdef notyet
248 size_t size = drm_prime_sg_size(sgt);
249 struct drm_gem_dma_object *obj;
250
251 obj = drm_gem_dma_create_internal(ddev, size, sgt);
252 if (obj == NULL)
253 return ERR_PTR(-ENOMEM);
254
255 return &obj->base;
256 #endif
257 }
258
259 int
drm_gem_dma_vmap(struct drm_gem_object * gem_obj,struct iosys_map * map)260 drm_gem_dma_vmap(struct drm_gem_object *gem_obj, struct iosys_map *map)
261 {
262 struct drm_gem_dma_object *obj = to_drm_gem_dma_obj(gem_obj);
263
264 iosys_map_set_vaddr(map, obj->vaddr);
265
266 return 0;
267 }
268