1 /*
2  * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include "nvidia-drm-conftest.h"
24 
25 #if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
26 
27 #include "nvidia-drm-gem-nvkms-memory.h"
28 #include "nvidia-drm-helper.h"
29 #include "nvidia-drm-ioctl.h"
30 
31 #if defined(NV_DRM_DRM_DRV_H_PRESENT)
32 #include <drm/drm_drv.h>
33 #endif
34 
35 #if defined(NV_DRM_DRM_PRIME_H_PRESENT)
36 #include <drm/drm_prime.h>
37 #endif
38 
39 #include <linux/io.h>
40 #if defined(NV_BSD)
41 #include <vm/vm_pageout.h>
42 #endif
43 
44 #include "nv-mm.h"
45 
__nv_drm_gem_nvkms_memory_free(struct nv_drm_gem_object * nv_gem)46 static void __nv_drm_gem_nvkms_memory_free(struct nv_drm_gem_object *nv_gem)
47 {
48     struct nv_drm_device *nv_dev = nv_gem->nv_dev;
49     struct nv_drm_gem_nvkms_memory *nv_nvkms_memory =
50         to_nv_nvkms_memory(nv_gem);
51 
52     if (nv_nvkms_memory->physically_mapped) {
53         if (nv_nvkms_memory->pWriteCombinedIORemapAddress != NULL) {
54             iounmap(nv_nvkms_memory->pWriteCombinedIORemapAddress);
55         }
56 
57         nvKms->unmapMemory(nv_dev->pDevice,
58                            nv_nvkms_memory->base.pMemory,
59                            NVKMS_KAPI_MAPPING_TYPE_USER,
60                            nv_nvkms_memory->pPhysicalAddress);
61     }
62 
63     if (nv_nvkms_memory->pages_count != 0) {
64         nvKms->freeMemoryPages((NvU64 *)nv_nvkms_memory->pages);
65     }
66 
67     /* Free NvKmsKapiMemory handle associated with this gem object */
68 
69     nvKms->freeMemory(nv_dev->pDevice, nv_nvkms_memory->base.pMemory);
70 
71     nv_drm_free(nv_nvkms_memory);
72 }
73 
__nv_drm_gem_nvkms_mmap(struct nv_drm_gem_object * nv_gem,struct vm_area_struct * vma)74 static int __nv_drm_gem_nvkms_mmap(struct nv_drm_gem_object *nv_gem,
75                                    struct vm_area_struct *vma)
76 {
77     return drm_gem_mmap_obj(&nv_gem->base,
78                 drm_vma_node_size(&nv_gem->base.vma_node) << PAGE_SHIFT, vma);
79 }
80 
__nv_drm_gem_nvkms_handle_vma_fault(struct nv_drm_gem_object * nv_gem,struct vm_area_struct * vma,struct vm_fault * vmf)81 static vm_fault_t __nv_drm_gem_nvkms_handle_vma_fault(
82     struct nv_drm_gem_object *nv_gem,
83     struct vm_area_struct *vma,
84     struct vm_fault *vmf)
85 {
86 #if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
87     struct nv_drm_gem_nvkms_memory *nv_nvkms_memory =
88         to_nv_nvkms_memory(nv_gem);
89     unsigned long address = nv_page_fault_va(vmf);
90     struct drm_gem_object *gem = vma->vm_private_data;
91     unsigned long page_offset, pfn;
92     vm_fault_t ret;
93 
94     page_offset = vmf->pgoff - drm_vma_node_start(&gem->vma_node);
95 
96     if (nv_nvkms_memory->pages_count == 0) {
97         pfn = (unsigned long)(uintptr_t)nv_nvkms_memory->pPhysicalAddress;
98         pfn >>= PAGE_SHIFT;
99 #if defined(NV_LINUX)
100         /*
101          * FreeBSD doesn't set pgoff. We instead have pfn be the base physical
102          * address, and we will calculate the index pidx from the virtual address.
103          *
104          * This only works because linux_cdev_pager_populate passes the pidx as
105          * vmf->virtual_address. Then we turn the virtual address
106          * into a physical page number.
107          */
108         pfn += page_offset;
109 #endif
110     } else {
111         BUG_ON(page_offset >= nv_nvkms_memory->pages_count);
112         pfn = page_to_pfn(nv_nvkms_memory->pages[page_offset]);
113     }
114 
115 #if defined(NV_VMF_INSERT_PFN_PRESENT)
116     ret = vmf_insert_pfn(vma, address, pfn);
117 #else
118     ret = vm_insert_pfn(vma, address, pfn);
119     switch (ret) {
120         case 0:
121         case -EBUSY:
122             /*
123              * EBUSY indicates that another thread already handled
124              * the faulted range.
125              */
126             ret = VM_FAULT_NOPAGE;
127             break;
128         case -ENOMEM:
129             ret = VM_FAULT_OOM;
130             break;
131         default:
132             WARN_ONCE(1, "Unhandled error in %s: %d\n", __FUNCTION__, ret);
133             ret = VM_FAULT_SIGBUS;
134             break;
135     }
136 #endif /* defined(NV_VMF_INSERT_PFN_PRESENT) */
137     return ret;
138 #endif /* defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) */
139     return VM_FAULT_SIGBUS;
140 }
141 
142 static struct drm_gem_object *__nv_drm_gem_nvkms_prime_dup(
143     struct drm_device *dev,
144     const struct nv_drm_gem_object *nv_gem_src);
145 
__nv_drm_gem_nvkms_map(struct nv_drm_gem_nvkms_memory * nv_nvkms_memory)146 static int __nv_drm_gem_nvkms_map(
147     struct nv_drm_gem_nvkms_memory *nv_nvkms_memory)
148 {
149     struct nv_drm_device *nv_dev = nv_nvkms_memory->base.nv_dev;
150     struct NvKmsKapiMemory *pMemory = nv_nvkms_memory->base.pMemory;
151 
152     if (!nv_dev->hasVideoMemory) {
153         return 0;
154     }
155 
156     if (!nvKms->mapMemory(nv_dev->pDevice,
157                           pMemory,
158                           NVKMS_KAPI_MAPPING_TYPE_USER,
159                           &nv_nvkms_memory->pPhysicalAddress)) {
160         NV_DRM_DEV_LOG_ERR(
161             nv_dev,
162             "Failed to map NvKmsKapiMemory 0x%p",
163             pMemory);
164         return -ENOMEM;
165     }
166 
167     nv_nvkms_memory->pWriteCombinedIORemapAddress = ioremap_wc(
168             (uintptr_t)nv_nvkms_memory->pPhysicalAddress,
169             nv_nvkms_memory->base.base.size);
170 
171     if (!nv_nvkms_memory->pWriteCombinedIORemapAddress) {
172         NV_DRM_DEV_LOG_INFO(
173             nv_dev,
174             "Failed to ioremap_wc NvKmsKapiMemory 0x%p",
175             pMemory);
176     }
177 
178     nv_nvkms_memory->physically_mapped = true;
179 
180     return 0;
181 }
182 
__nv_drm_gem_nvkms_prime_vmap(struct nv_drm_gem_object * nv_gem)183 static void *__nv_drm_gem_nvkms_prime_vmap(
184     struct nv_drm_gem_object *nv_gem)
185 {
186     struct nv_drm_gem_nvkms_memory *nv_nvkms_memory =
187         to_nv_nvkms_memory(nv_gem);
188 
189     if (!nv_nvkms_memory->physically_mapped) {
190         int ret = __nv_drm_gem_nvkms_map(nv_nvkms_memory);
191         if (ret) {
192            return ERR_PTR(ret);
193         }
194     }
195 
196     return nv_nvkms_memory->pWriteCombinedIORemapAddress;
197 }
198 
__nv_drm_gem_map_nvkms_memory_offset(struct nv_drm_device * nv_dev,struct nv_drm_gem_object * nv_gem,uint64_t * offset)199 static int __nv_drm_gem_map_nvkms_memory_offset(
200     struct nv_drm_device *nv_dev,
201     struct nv_drm_gem_object *nv_gem,
202     uint64_t *offset)
203 {
204     struct nv_drm_gem_nvkms_memory *nv_nvkms_memory =
205         to_nv_nvkms_memory(nv_gem);
206 
207     if (!nv_nvkms_memory->physically_mapped) {
208         int ret = __nv_drm_gem_nvkms_map(nv_nvkms_memory);
209         if (ret) {
210            return ret;
211         }
212     }
213 
214     return nv_drm_gem_create_mmap_offset(&nv_nvkms_memory->base, offset);
215 }
216 
__nv_drm_gem_nvkms_memory_prime_get_sg_table(struct nv_drm_gem_object * nv_gem)217 static struct sg_table *__nv_drm_gem_nvkms_memory_prime_get_sg_table(
218     struct nv_drm_gem_object *nv_gem)
219 {
220     struct nv_drm_device *nv_dev = nv_gem->nv_dev;
221     struct nv_drm_gem_nvkms_memory *nv_nvkms_memory =
222         to_nv_nvkms_memory(nv_gem);
223     struct sg_table *sg_table;
224 
225     if (nv_nvkms_memory->pages_count == 0) {
226         NV_DRM_DEV_LOG_ERR(
227                 nv_dev,
228                 "Cannot create sg_table for NvKmsKapiMemory 0x%p",
229                 nv_gem->pMemory);
230         return ERR_PTR(-ENOMEM);
231     }
232 
233     sg_table = nv_drm_prime_pages_to_sg(nv_dev->dev,
234                                         nv_nvkms_memory->pages,
235                                         nv_nvkms_memory->pages_count);
236 
237     return sg_table;
238 }
239 
240 const struct nv_drm_gem_object_funcs nv_gem_nvkms_memory_ops = {
241     .free = __nv_drm_gem_nvkms_memory_free,
242     .prime_dup = __nv_drm_gem_nvkms_prime_dup,
243     .prime_vmap = __nv_drm_gem_nvkms_prime_vmap,
244     .mmap = __nv_drm_gem_nvkms_mmap,
245     .handle_vma_fault = __nv_drm_gem_nvkms_handle_vma_fault,
246     .create_mmap_offset = __nv_drm_gem_map_nvkms_memory_offset,
247     .prime_get_sg_table = __nv_drm_gem_nvkms_memory_prime_get_sg_table,
248 };
249 
__nv_drm_nvkms_gem_obj_init(struct nv_drm_device * nv_dev,struct nv_drm_gem_nvkms_memory * nv_nvkms_memory,struct NvKmsKapiMemory * pMemory,uint64_t size)250 static int __nv_drm_nvkms_gem_obj_init(
251     struct nv_drm_device *nv_dev,
252     struct nv_drm_gem_nvkms_memory *nv_nvkms_memory,
253     struct NvKmsKapiMemory *pMemory,
254     uint64_t size)
255 {
256     NvU64 *pages = NULL;
257     NvU32 numPages = 0;
258 
259     if ((size % PAGE_SIZE) != 0) {
260         NV_DRM_DEV_LOG_ERR(
261             nv_dev,
262             "NvKmsKapiMemory 0x%p size should be in a multiple of page size to "
263             "create a gem object",
264             pMemory);
265         return -EINVAL;
266     }
267 
268     nv_nvkms_memory->pPhysicalAddress = NULL;
269     nv_nvkms_memory->pWriteCombinedIORemapAddress = NULL;
270     nv_nvkms_memory->physically_mapped = false;
271 
272     if (!nvKms->getMemoryPages(nv_dev->pDevice,
273                                pMemory,
274                                &pages,
275                                &numPages) &&
276         !nv_dev->hasVideoMemory) {
277         /* GetMemoryPages may fail for vidmem allocations,
278          * but it should not fail for sysmem allocations. */
279         NV_DRM_DEV_LOG_ERR(nv_dev,
280             "Failed to get memory pages for NvKmsKapiMemory 0x%p",
281             pMemory);
282         return -ENOMEM;
283     }
284     nv_nvkms_memory->pages_count = numPages;
285     nv_nvkms_memory->pages = (struct page **)pages;
286 
287     nv_drm_gem_object_init(nv_dev,
288                            &nv_nvkms_memory->base,
289                            &nv_gem_nvkms_memory_ops,
290                            size,
291                            pMemory);
292 
293     return 0;
294 }
295 
nv_drm_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)296 int nv_drm_dumb_create(
297     struct drm_file *file_priv,
298     struct drm_device *dev, struct drm_mode_create_dumb *args)
299 {
300     struct nv_drm_device *nv_dev = to_nv_device(dev);
301     struct nv_drm_gem_nvkms_memory *nv_nvkms_memory;
302     uint8_t compressible = 0;
303     struct NvKmsKapiMemory *pMemory;
304     int ret = 0;
305 
306     args->pitch = roundup(args->width * ((args->bpp + 7) >> 3),
307                           nv_dev->pitchAlignment);
308 
309     args->size = args->height * args->pitch;
310 
311     /* Core DRM requires gem object size to be aligned with PAGE_SIZE */
312 
313     args->size = roundup(args->size, PAGE_SIZE);
314 
315     if ((nv_nvkms_memory =
316             nv_drm_calloc(1, sizeof(*nv_nvkms_memory))) == NULL) {
317         ret = -ENOMEM;
318         goto fail;
319     }
320 
321     if (nv_dev->hasVideoMemory) {
322         pMemory = nvKms->allocateVideoMemory(nv_dev->pDevice,
323                                              NvKmsSurfaceMemoryLayoutPitch,
324                                              NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT,
325                                              args->size,
326                                              &compressible);
327     } else {
328         pMemory = nvKms->allocateSystemMemory(nv_dev->pDevice,
329                                               NvKmsSurfaceMemoryLayoutPitch,
330                                               NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT,
331                                               args->size,
332                                               &compressible);
333     }
334 
335     if (pMemory == NULL) {
336         ret = -ENOMEM;
337         NV_DRM_DEV_LOG_ERR(
338             nv_dev,
339             "Failed to allocate NvKmsKapiMemory for dumb object of size %" NvU64_fmtu,
340             args->size);
341         goto nvkms_alloc_memory_failed;
342     }
343 
344     ret = __nv_drm_nvkms_gem_obj_init(nv_dev, nv_nvkms_memory, pMemory, args->size);
345     if (ret) {
346         goto nvkms_gem_obj_init_failed;
347     }
348 
349     /* Always map dumb buffer memory up front.  Clients are only expected
350      * to use dumb buffers for software rendering, so they're not much use
351      * without a CPU mapping.
352      */
353     ret = __nv_drm_gem_nvkms_map(nv_nvkms_memory);
354     if (ret) {
355         nv_drm_gem_object_unreference_unlocked(&nv_nvkms_memory->base);
356         goto fail;
357     }
358 
359     return nv_drm_gem_handle_create_drop_reference(file_priv,
360                                                    &nv_nvkms_memory->base,
361                                                    &args->handle);
362 
363 nvkms_gem_obj_init_failed:
364     nvKms->freeMemory(nv_dev->pDevice, pMemory);
365 
366 nvkms_alloc_memory_failed:
367     nv_drm_free(nv_nvkms_memory);
368 
369 fail:
370     return ret;
371 }
372 
nv_drm_gem_import_nvkms_memory_ioctl(struct drm_device * dev,void * data,struct drm_file * filep)373 int nv_drm_gem_import_nvkms_memory_ioctl(struct drm_device *dev,
374                                          void *data, struct drm_file *filep)
375 {
376     struct nv_drm_device *nv_dev = to_nv_device(dev);
377     struct drm_nvidia_gem_import_nvkms_memory_params *p = data;
378     struct nv_drm_gem_nvkms_memory *nv_nvkms_memory;
379     struct NvKmsKapiMemory *pMemory;
380     int ret;
381 
382     if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
383         ret = -EINVAL;
384         goto failed;
385     }
386 
387     if ((nv_nvkms_memory =
388             nv_drm_calloc(1, sizeof(*nv_nvkms_memory))) == NULL) {
389         ret = -ENOMEM;
390         goto failed;
391     }
392 
393     pMemory = nvKms->importMemory(nv_dev->pDevice,
394                                   p->mem_size,
395                                   p->nvkms_params_ptr,
396                                   p->nvkms_params_size);
397 
398     if (pMemory == NULL) {
399         ret = -EINVAL;
400         NV_DRM_DEV_LOG_ERR(
401             nv_dev,
402             "Failed to import NVKMS memory to GEM object");
403         goto nvkms_import_memory_failed;
404     }
405 
406     ret = __nv_drm_nvkms_gem_obj_init(nv_dev, nv_nvkms_memory, pMemory, p->mem_size);
407     if (ret) {
408         goto nvkms_gem_obj_init_failed;
409     }
410 
411     return nv_drm_gem_handle_create_drop_reference(filep,
412                                                    &nv_nvkms_memory->base,
413                                                    &p->handle);
414 nvkms_gem_obj_init_failed:
415     nvKms->freeMemory(nv_dev->pDevice, pMemory);
416 
417 nvkms_import_memory_failed:
418     nv_drm_free(nv_nvkms_memory);
419 
420 failed:
421     return ret;
422 }
423 
nv_drm_gem_export_nvkms_memory_ioctl(struct drm_device * dev,void * data,struct drm_file * filep)424 int nv_drm_gem_export_nvkms_memory_ioctl(struct drm_device *dev,
425                                          void *data, struct drm_file *filep)
426 {
427     struct nv_drm_device *nv_dev = to_nv_device(dev);
428     struct drm_nvidia_gem_export_nvkms_memory_params *p = data;
429     struct nv_drm_gem_nvkms_memory *nv_nvkms_memory = NULL;
430     int ret = 0;
431 
432     if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
433         ret = -EINVAL;
434         goto done;
435     }
436 
437     if (p->__pad != 0) {
438         ret = -EINVAL;
439         NV_DRM_DEV_LOG_ERR(nv_dev, "Padding fields must be zeroed");
440         goto done;
441     }
442 
443     if ((nv_nvkms_memory = nv_drm_gem_object_nvkms_memory_lookup(
444                     dev,
445                     filep,
446                     p->handle)) == NULL) {
447         ret = -EINVAL;
448         NV_DRM_DEV_LOG_ERR(
449             nv_dev,
450             "Failed to lookup NVKMS gem object for export: 0x%08x",
451             p->handle);
452         goto done;
453     }
454 
455     if (!nvKms->exportMemory(nv_dev->pDevice,
456                              nv_nvkms_memory->base.pMemory,
457                              p->nvkms_params_ptr,
458                              p->nvkms_params_size)) {
459         ret = -EINVAL;
460         NV_DRM_DEV_LOG_ERR(
461             nv_dev,
462             "Failed to export memory from NVKMS GEM object: 0x%08x", p->handle);
463         goto done;
464     }
465 
466 done:
467     if (nv_nvkms_memory != NULL) {
468         nv_drm_gem_object_unreference_unlocked(&nv_nvkms_memory->base);
469     }
470 
471     return ret;
472 }
473 
nv_drm_gem_alloc_nvkms_memory_ioctl(struct drm_device * dev,void * data,struct drm_file * filep)474 int nv_drm_gem_alloc_nvkms_memory_ioctl(struct drm_device *dev,
475                                         void *data, struct drm_file *filep)
476 {
477     struct nv_drm_device *nv_dev = to_nv_device(dev);
478     struct drm_nvidia_gem_alloc_nvkms_memory_params *p = data;
479     struct nv_drm_gem_nvkms_memory *nv_nvkms_memory = NULL;
480     struct NvKmsKapiMemory *pMemory;
481     enum NvKmsSurfaceMemoryLayout layout;
482     enum NvKmsKapiAllocationType type;
483     int ret = 0;
484 
485     if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
486         ret = -EINVAL;
487         goto failed;
488     }
489 
490     if ((p->__pad0 != 0) || (p->__pad1 != 0)) {
491         ret = -EINVAL;
492         NV_DRM_DEV_LOG_ERR(nv_dev, "non-zero value in padding field");
493         goto failed;
494     }
495 
496     if ((nv_nvkms_memory =
497             nv_drm_calloc(1, sizeof(*nv_nvkms_memory))) == NULL) {
498         ret = -ENOMEM;
499         goto failed;
500     }
501 
502     layout = p->block_linear ?
503         NvKmsSurfaceMemoryLayoutBlockLinear : NvKmsSurfaceMemoryLayoutPitch;
504     type = (p->flags & NV_GEM_ALLOC_NO_SCANOUT) ?
505         NVKMS_KAPI_ALLOCATION_TYPE_OFFSCREEN : NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT;
506 
507     if (nv_dev->hasVideoMemory) {
508         pMemory = nvKms->allocateVideoMemory(nv_dev->pDevice,
509                                              layout,
510                                              type,
511                                              p->memory_size,
512                                              &p->compressible);
513     } else {
514         pMemory = nvKms->allocateSystemMemory(nv_dev->pDevice,
515                                               layout,
516                                               type,
517                                               p->memory_size,
518                                               &p->compressible);
519     }
520 
521     if (pMemory == NULL) {
522         ret = -EINVAL;
523         NV_DRM_DEV_LOG_ERR(nv_dev,
524                            "Failed to allocate NVKMS memory for GEM object");
525         goto nvkms_alloc_memory_failed;
526     }
527 
528     ret = __nv_drm_nvkms_gem_obj_init(nv_dev, nv_nvkms_memory, pMemory,
529                                       p->memory_size);
530     if (ret) {
531         goto nvkms_gem_obj_init_failed;
532     }
533 
534     return nv_drm_gem_handle_create_drop_reference(filep,
535                                                    &nv_nvkms_memory->base,
536                                                    &p->handle);
537 
538 nvkms_gem_obj_init_failed:
539     nvKms->freeMemory(nv_dev->pDevice, pMemory);
540 
541 nvkms_alloc_memory_failed:
542     nv_drm_free(nv_nvkms_memory);
543 
544 failed:
545     return ret;
546 }
547 
__nv_drm_gem_nvkms_prime_dup(struct drm_device * dev,const struct nv_drm_gem_object * nv_gem_src)548 static struct drm_gem_object *__nv_drm_gem_nvkms_prime_dup(
549     struct drm_device *dev,
550     const struct nv_drm_gem_object *nv_gem_src)
551 {
552     struct nv_drm_device *nv_dev = to_nv_device(dev);
553     const struct nv_drm_device *nv_dev_src;
554     const struct nv_drm_gem_nvkms_memory *nv_nvkms_memory_src;
555     struct nv_drm_gem_nvkms_memory *nv_nvkms_memory;
556     struct NvKmsKapiMemory *pMemory;
557 
558     BUG_ON(nv_gem_src == NULL || nv_gem_src->ops != &nv_gem_nvkms_memory_ops);
559 
560     nv_dev_src = to_nv_device(nv_gem_src->base.dev);
561     nv_nvkms_memory_src = to_nv_nvkms_memory_const(nv_gem_src);
562 
563     if ((nv_nvkms_memory =
564             nv_drm_calloc(1, sizeof(*nv_nvkms_memory))) == NULL) {
565         return NULL;
566     }
567 
568     pMemory = nvKms->dupMemory(nv_dev->pDevice,
569                                nv_dev_src->pDevice, nv_gem_src->pMemory);
570     if (pMemory == NULL) {
571         NV_DRM_DEV_LOG_ERR(
572             nv_dev,
573             "Failed to import NVKMS memory to GEM object");
574         goto nvkms_dup_memory_failed;
575     }
576 
577     if (__nv_drm_nvkms_gem_obj_init(nv_dev,
578                                     nv_nvkms_memory,
579                                     pMemory,
580                                     nv_gem_src->base.size)) {
581         goto nvkms_gem_obj_init_failed;
582     }
583 
584     return &nv_nvkms_memory->base.base;
585 
586 nvkms_gem_obj_init_failed:
587     nvKms->freeMemory(nv_dev->pDevice, pMemory);
588 
589 nvkms_dup_memory_failed:
590     nv_drm_free(nv_nvkms_memory);
591 
592     return NULL;
593 }
594 
nv_drm_dumb_map_offset(struct drm_file * file,struct drm_device * dev,uint32_t handle,uint64_t * offset)595 int nv_drm_dumb_map_offset(struct drm_file *file,
596                            struct drm_device *dev, uint32_t handle,
597                            uint64_t *offset)
598 {
599     struct nv_drm_device *nv_dev = to_nv_device(dev);
600     struct nv_drm_gem_nvkms_memory *nv_nvkms_memory;
601     int ret = -EINVAL;
602 
603     if ((nv_nvkms_memory = nv_drm_gem_object_nvkms_memory_lookup(
604                     dev,
605                     file,
606                     handle)) == NULL) {
607         NV_DRM_DEV_LOG_ERR(
608             nv_dev,
609             "Failed to lookup gem object for mapping: 0x%08x",
610             handle);
611         return ret;
612     }
613 
614     ret = __nv_drm_gem_map_nvkms_memory_offset(nv_dev,
615                                                &nv_nvkms_memory->base, offset);
616 
617     nv_drm_gem_object_unreference_unlocked(&nv_nvkms_memory->base);
618 
619     return ret;
620 }
621 
622 #if defined(NV_DRM_DRIVER_HAS_DUMB_DESTROY)
nv_drm_dumb_destroy(struct drm_file * file,struct drm_device * dev,uint32_t handle)623 int nv_drm_dumb_destroy(struct drm_file *file,
624                         struct drm_device *dev,
625                         uint32_t handle)
626 {
627     return drm_gem_handle_delete(file, handle);
628 }
629 #endif /* NV_DRM_DRIVER_HAS_DUMB_DESTROY */
630 
631 #endif
632