1 /*
2  * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include "nvidia-drm-conftest.h"
24 
25 #if defined(NV_DRM_AVAILABLE)
26 
27 #if defined(NV_DRM_DRM_PRIME_H_PRESENT)
28 #include <drm/drm_prime.h>
29 #endif
30 
31 #include "nvidia-drm-gem-user-memory.h"
32 #include "nvidia-drm-helper.h"
33 #include "nvidia-drm-ioctl.h"
34 
35 #include "linux/dma-buf.h"
36 #include "linux/mm.h"
37 #include "nv-mm.h"
38 
39 #if defined(NV_BSD)
40 #include <vm/vm_pageout.h>
41 #endif
42 
43 static inline
__nv_drm_gem_user_memory_free(struct nv_drm_gem_object * nv_gem)44 void __nv_drm_gem_user_memory_free(struct nv_drm_gem_object *nv_gem)
45 {
46     struct nv_drm_gem_user_memory *nv_user_memory = to_nv_user_memory(nv_gem);
47 
48     nv_drm_unlock_user_pages(nv_user_memory->pages_count,
49                              nv_user_memory->pages);
50 
51     nv_drm_free(nv_user_memory);
52 }
53 
__nv_drm_gem_user_memory_prime_get_sg_table(struct nv_drm_gem_object * nv_gem)54 static struct sg_table *__nv_drm_gem_user_memory_prime_get_sg_table(
55     struct nv_drm_gem_object *nv_gem)
56 {
57     struct nv_drm_gem_user_memory *nv_user_memory = to_nv_user_memory(nv_gem);
58     struct drm_gem_object *gem = &nv_gem->base;
59 
60     return nv_drm_prime_pages_to_sg(gem->dev,
61                                     nv_user_memory->pages,
62                                     nv_user_memory->pages_count);
63 }
64 
__nv_drm_gem_user_memory_prime_vmap(struct nv_drm_gem_object * nv_gem)65 static void *__nv_drm_gem_user_memory_prime_vmap(
66     struct nv_drm_gem_object *nv_gem)
67 {
68     struct nv_drm_gem_user_memory *nv_user_memory = to_nv_user_memory(nv_gem);
69 
70     return nv_drm_vmap(nv_user_memory->pages,
71                            nv_user_memory->pages_count);
72 }
73 
__nv_drm_gem_user_memory_prime_vunmap(struct nv_drm_gem_object * gem,void * address)74 static void __nv_drm_gem_user_memory_prime_vunmap(
75     struct nv_drm_gem_object *gem,
76     void *address)
77 {
78     nv_drm_vunmap(address);
79 }
80 
__nv_drm_gem_user_memory_mmap(struct nv_drm_gem_object * nv_gem,struct vm_area_struct * vma)81 static int __nv_drm_gem_user_memory_mmap(struct nv_drm_gem_object *nv_gem,
82                                          struct vm_area_struct *vma)
83 {
84     int ret = drm_gem_mmap_obj(&nv_gem->base,
85                 drm_vma_node_size(&nv_gem->base.vma_node) << PAGE_SHIFT, vma);
86 
87     if (ret < 0) {
88         return ret;
89     }
90 
91     /*
92      * Enforce that user-memory GEM mappings are MAP_SHARED, to prevent COW
93      * with MAP_PRIVATE and VM_MIXEDMAP
94      */
95     if (!(vma->vm_flags & VM_SHARED)) {
96         return -EINVAL;
97     }
98 
99     nv_vm_flags_clear(vma, VM_PFNMAP);
100     nv_vm_flags_clear(vma, VM_IO);
101     nv_vm_flags_set(vma, VM_MIXEDMAP);
102 
103     return 0;
104 }
105 
__nv_drm_gem_user_memory_handle_vma_fault(struct nv_drm_gem_object * nv_gem,struct vm_area_struct * vma,struct vm_fault * vmf)106 static vm_fault_t __nv_drm_gem_user_memory_handle_vma_fault(
107     struct nv_drm_gem_object *nv_gem,
108     struct vm_area_struct *vma,
109     struct vm_fault *vmf)
110 {
111     struct nv_drm_gem_user_memory *nv_user_memory = to_nv_user_memory(nv_gem);
112     unsigned long address = nv_page_fault_va(vmf);
113     struct drm_gem_object *gem = vma->vm_private_data;
114     unsigned long page_offset;
115     vm_fault_t ret;
116 
117     page_offset = vmf->pgoff - drm_vma_node_start(&gem->vma_node);
118 
119     BUG_ON(page_offset >= nv_user_memory->pages_count);
120 
121 #if !defined(NV_LINUX)
122     ret = vmf_insert_pfn(vma, address, page_to_pfn(nv_user_memory->pages[page_offset]));
123 #else /* !defined(NV_LINUX) */
124     ret = vm_insert_page(vma, address, nv_user_memory->pages[page_offset]);
125     switch (ret) {
126         case 0:
127         case -EBUSY:
128             /*
129              * EBUSY indicates that another thread already handled
130              * the faulted range.
131              */
132             ret = VM_FAULT_NOPAGE;
133             break;
134         case -ENOMEM:
135             ret = VM_FAULT_OOM;
136             break;
137         default:
138             WARN_ONCE(1, "Unhandled error in %s: %d\n", __FUNCTION__, ret);
139             ret = VM_FAULT_SIGBUS;
140             break;
141     }
142 #endif /* !defined(NV_LINUX) */
143 
144     return ret;
145 }
146 
__nv_drm_gem_user_create_mmap_offset(struct nv_drm_device * nv_dev,struct nv_drm_gem_object * nv_gem,uint64_t * offset)147 static int __nv_drm_gem_user_create_mmap_offset(
148     struct nv_drm_device *nv_dev,
149     struct nv_drm_gem_object *nv_gem,
150     uint64_t *offset)
151 {
152     (void)nv_dev;
153     return nv_drm_gem_create_mmap_offset(nv_gem, offset);
154 }
155 
156 const struct nv_drm_gem_object_funcs __nv_gem_user_memory_ops = {
157     .free = __nv_drm_gem_user_memory_free,
158     .prime_get_sg_table = __nv_drm_gem_user_memory_prime_get_sg_table,
159     .prime_vmap = __nv_drm_gem_user_memory_prime_vmap,
160     .prime_vunmap = __nv_drm_gem_user_memory_prime_vunmap,
161     .mmap = __nv_drm_gem_user_memory_mmap,
162     .handle_vma_fault = __nv_drm_gem_user_memory_handle_vma_fault,
163     .create_mmap_offset = __nv_drm_gem_user_create_mmap_offset,
164 };
165 
nv_drm_gem_import_userspace_memory_ioctl(struct drm_device * dev,void * data,struct drm_file * filep)166 int nv_drm_gem_import_userspace_memory_ioctl(struct drm_device *dev,
167                                              void *data, struct drm_file *filep)
168 {
169     struct nv_drm_device *nv_dev = to_nv_device(dev);
170 
171     struct drm_nvidia_gem_import_userspace_memory_params *params = data;
172     struct nv_drm_gem_user_memory *nv_user_memory;
173 
174     struct page **pages = NULL;
175     unsigned long pages_count = 0;
176 
177     int ret = 0;
178 
179     if ((params->size % PAGE_SIZE) != 0) {
180         NV_DRM_DEV_LOG_ERR(
181             nv_dev,
182             "Userspace memory 0x%" NvU64_fmtx " size should be in a multiple of page "
183             "size to create a gem object",
184             params->address);
185         return -EINVAL;
186     }
187 
188     pages_count = params->size / PAGE_SIZE;
189 
190     ret = nv_drm_lock_user_pages(params->address, pages_count, &pages);
191 
192     if (ret != 0) {
193         NV_DRM_DEV_LOG_ERR(
194             nv_dev,
195             "Failed to lock user pages for address 0x%" NvU64_fmtx ": %d",
196             params->address, ret);
197         return ret;
198     }
199 
200     if ((nv_user_memory =
201             nv_drm_calloc(1, sizeof(*nv_user_memory))) == NULL) {
202         ret = -ENOMEM;
203         goto failed;
204     }
205 
206     nv_user_memory->pages = pages;
207     nv_user_memory->pages_count = pages_count;
208 
209     nv_drm_gem_object_init(nv_dev,
210                            &nv_user_memory->base,
211                            &__nv_gem_user_memory_ops,
212                            params->size,
213                            NULL /* pMemory */);
214 
215     return nv_drm_gem_handle_create_drop_reference(filep,
216                                                    &nv_user_memory->base,
217                                                    &params->handle);
218 
219 failed:
220     nv_drm_unlock_user_pages(pages_count, pages);
221 
222     return ret;
223 }
224 
225 #endif
226