1 /*******************************************************************************
2     Copyright (c) 2015-2022 NVIDIA Corporation
3 
4     Permission is hereby granted, free of charge, to any person obtaining a copy
5     of this software and associated documentation files (the "Software"), to
6     deal in the Software without restriction, including without limitation the
7     rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
8     sell copies of the Software, and to permit persons to whom the Software is
9     furnished to do so, subject to the following conditions:
10 
11         The above copyright notice and this permission notice shall be
12         included in all copies or substantial portions of the Software.
13 
14     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15     IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16     FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17     THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18     LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20     DEALINGS IN THE SOFTWARE.
21 
22 *******************************************************************************/
23 
24 #include "uvm_api.h"
25 #include "uvm_global.h"
26 #include "uvm_gpu_replayable_faults.h"
27 #include "uvm_tools_init.h"
28 #include "uvm_lock.h"
29 #include "uvm_test.h"
30 #include "uvm_va_space.h"
31 #include "uvm_va_range.h"
32 #include "uvm_va_block.h"
33 #include "uvm_tools.h"
34 #include "uvm_common.h"
35 #include "uvm_linux_ioctl.h"
36 #include "uvm_hmm.h"
37 #include "uvm_mem.h"
38 #include "uvm_kvmalloc.h"
39 
40 #define NVIDIA_UVM_DEVICE_NAME          "nvidia-uvm"
41 
42 static dev_t g_uvm_base_dev;
43 static struct cdev g_uvm_cdev;
44 static const struct file_operations uvm_fops;
45 
46 bool uvm_file_is_nvidia_uvm(struct file *filp)
47 {
48     return (filp != NULL) && (filp->f_op == &uvm_fops);
49 }
50 
51 uvm_fd_type_t uvm_fd_type(struct file *filp, void **ptr_val)
52 {
53     unsigned long uptr;
54     uvm_fd_type_t type;
55     void *ptr;
56 
57     UVM_ASSERT(uvm_file_is_nvidia_uvm(filp));
58 
59     uptr = atomic_long_read_acquire((atomic_long_t *) (&filp->private_data));
60     type = (uvm_fd_type_t)(uptr & UVM_FD_TYPE_MASK);
61     ptr = (void *)(uptr & ~UVM_FD_TYPE_MASK);
62     BUILD_BUG_ON(UVM_FD_COUNT > UVM_FD_TYPE_MASK + 1);
63 
64     switch (type) {
65         case UVM_FD_UNINITIALIZED:
66         case UVM_FD_INITIALIZING:
67             UVM_ASSERT(!ptr);
68             break;
69 
70         case UVM_FD_VA_SPACE:
71             UVM_ASSERT(ptr);
72             BUILD_BUG_ON(__alignof__(uvm_va_space_t) < (1UL << UVM_FD_TYPE_BITS));
73             break;
74 
75         default:
76             UVM_ASSERT(0);
77     }
78 
79     if (ptr_val)
80         *ptr_val = ptr;
81 
82     return type;
83 }
84 
85 // Called when opening /dev/nvidia-uvm. This code doesn't take any UVM locks, so
86 // there's no need to acquire g_uvm_global.pm.lock, but if that changes the PM
87 // lock will need to be taken.
88 static int uvm_open(struct inode *inode, struct file *filp)
89 {
90     struct address_space *mapping;
91     NV_STATUS status = uvm_global_get_status();
92 
93     if (status != NV_OK)
94         return -nv_status_to_errno(status);
95 
96     mapping = uvm_kvmalloc(sizeof(*mapping));
97     if (!mapping)
98         return -ENOMEM;
99 
100     // By default all struct files on the same inode share the same
101     // address_space structure (the inode's) across all processes. This means
102     // unmap_mapping_range would unmap virtual mappings across all processes on
103     // that inode.
104     //
105     // Since the UVM driver uses the mapping offset as the VA of the file's
106     // process, we need to isolate the mappings to each process.
107     address_space_init_once(mapping);
108     mapping->host = inode;
109 
110     // Some paths in the kernel, for example force_page_cache_readahead which
111     // can be invoked from user-space via madvise MADV_WILLNEED and fadvise
112     // POSIX_FADV_WILLNEED, check the function pointers within
113     // file->f_mapping->a_ops for validity. However, those paths assume that a_ops
114     // itself is always valid. Handle that by using the inode's a_ops pointer,
115     // which is what f_mapping->a_ops would point to anyway if we weren't re-
116     // assigning f_mapping.
117     mapping->a_ops = inode->i_mapping->a_ops;
118 
119 #if defined(NV_ADDRESS_SPACE_HAS_BACKING_DEV_INFO)
120     mapping->backing_dev_info = inode->i_mapping->backing_dev_info;
121 #endif
122 
123     filp->private_data = NULL;
124     filp->f_mapping = mapping;
125 
126     return NV_OK;
127 }
128 
129 static int uvm_open_entry(struct inode *inode, struct file *filp)
130 {
131    UVM_ENTRY_RET(uvm_open(inode, filp));
132 }
133 
134 static void uvm_release_deferred(void *data)
135 {
136     uvm_va_space_t *va_space = data;
137 
138     // Since this function is only scheduled to run when uvm_release() fails
139     // to trylock-acquire the pm.lock, the following acquisition attempt
140     // is expected to block this thread, and cause it to remain blocked until
141     // uvm_resume() releases the lock.  As a result, the deferred release
142     // kthread queue may stall for long periods of time.
143     uvm_down_read(&g_uvm_global.pm.lock);
144 
145     uvm_va_space_destroy(va_space);
146 
147     uvm_up_read(&g_uvm_global.pm.lock);
148 }
149 
150 static int uvm_release(struct inode *inode, struct file *filp)
151 {
152     uvm_va_space_t *va_space;
153     uvm_fd_type_t fd_type;
154     int ret;
155 
156     fd_type = uvm_fd_type(filp, (void **)&va_space);
157     UVM_ASSERT(fd_type != UVM_FD_INITIALIZING);
158     if (fd_type == UVM_FD_UNINITIALIZED) {
159         uvm_kvfree(filp->f_mapping);
160         return 0;
161     }
162 
163     UVM_ASSERT(fd_type == UVM_FD_VA_SPACE);
164     filp->private_data = NULL;
165     filp->f_mapping = NULL;
166 
167     // Because the kernel discards the status code returned from this release
168     // callback, early exit in case of a pm.lock acquisition failure is not
169     // an option.  Instead, the teardown work normally performed synchronously
170     // needs to be scheduled to run after uvm_resume() releases the lock.
171     if (uvm_down_read_trylock(&g_uvm_global.pm.lock)) {
172         uvm_va_space_destroy(va_space);
173         uvm_up_read(&g_uvm_global.pm.lock);
174     }
175     else {
176         // Remove references to this inode from the address_space.  This isn't
177         // strictly necessary, as any CPU mappings of this file have already
178         // been destroyed, and va_space->mapping won't be used again. Still,
179         // the va_space survives the inode if its destruction is deferred, in
180         // which case the references are rendered stale.
181         address_space_init_once(va_space->mapping);
182 
183         nv_kthread_q_item_init(&va_space->deferred_release_q_item, uvm_release_deferred, va_space);
184         ret = nv_kthread_q_schedule_q_item(&g_uvm_global.deferred_release_q, &va_space->deferred_release_q_item);
185         UVM_ASSERT(ret != 0);
186     }
187 
188     return 0;
189 }
190 
191 static int uvm_release_entry(struct inode *inode, struct file *filp)
192 {
193    UVM_ENTRY_RET(uvm_release(inode, filp));
194 }
195 
196 static void uvm_destroy_vma_managed(struct vm_area_struct *vma, bool make_zombie)
197 {
198     uvm_va_range_t *va_range, *va_range_next;
199     NvU64 size = 0;
200 
201     uvm_assert_rwsem_locked_write(&uvm_va_space_get(vma->vm_file)->lock);
202     uvm_for_each_va_range_in_vma_safe(va_range, va_range_next, vma) {
203         // On exit_mmap (process teardown), current->mm is cleared so
204         // uvm_va_range_vma_current would return NULL.
205         UVM_ASSERT(uvm_va_range_vma(va_range) == vma);
206         UVM_ASSERT(va_range->node.start >= vma->vm_start);
207         UVM_ASSERT(va_range->node.end   <  vma->vm_end);
208         size += uvm_va_range_size(va_range);
209         if (make_zombie)
210             uvm_va_range_zombify(va_range);
211         else
212             uvm_va_range_destroy(va_range, NULL);
213     }
214 
215     if (vma->vm_private_data) {
216         uvm_vma_wrapper_destroy(vma->vm_private_data);
217         vma->vm_private_data = NULL;
218     }
219     UVM_ASSERT(size == vma->vm_end - vma->vm_start);
220 }
221 
222 static void uvm_destroy_vma_semaphore_pool(struct vm_area_struct *vma)
223 {
224     uvm_va_space_t *va_space;
225     uvm_va_range_t *va_range;
226 
227     va_space = uvm_va_space_get(vma->vm_file);
228     uvm_assert_rwsem_locked(&va_space->lock);
229     va_range = uvm_va_range_find(va_space, vma->vm_start);
230     UVM_ASSERT(va_range &&
231                va_range->node.start   == vma->vm_start &&
232                va_range->node.end + 1 == vma->vm_end &&
233                va_range->type == UVM_VA_RANGE_TYPE_SEMAPHORE_POOL);
234 
235     uvm_mem_unmap_cpu_user(va_range->semaphore_pool.mem);
236 }
237 
238 // If a fault handler is not set, paths like handle_pte_fault in older kernels
239 // assume the memory is anonymous. That would make debugging this failure harder
240 // so we force it to fail instead.
241 static vm_fault_t uvm_vm_fault_sigbus(struct vm_area_struct *vma, struct vm_fault *vmf)
242 {
243     UVM_DBG_PRINT_RL("Fault to address 0x%lx in disabled vma\n", nv_page_fault_va(vmf));
244     return VM_FAULT_SIGBUS;
245 }
246 
247 static vm_fault_t uvm_vm_fault_sigbus_entry(struct vm_area_struct *vma, struct vm_fault *vmf)
248 {
249     UVM_ENTRY_RET(uvm_vm_fault_sigbus(vma, vmf));
250 }
251 
252 static vm_fault_t uvm_vm_fault_sigbus_wrapper(struct vm_fault *vmf)
253 {
254 #if defined(NV_VM_OPS_FAULT_REMOVED_VMA_ARG)
255     return uvm_vm_fault_sigbus(vmf->vma, vmf);
256 #else
257     return uvm_vm_fault_sigbus(NULL, vmf);
258 #endif
259 }
260 
261 static vm_fault_t uvm_vm_fault_sigbus_wrapper_entry(struct vm_fault *vmf)
262 {
263     UVM_ENTRY_RET(uvm_vm_fault_sigbus_wrapper(vmf));
264 }
265 
266 static struct vm_operations_struct uvm_vm_ops_disabled =
267 {
268 #if defined(NV_VM_OPS_FAULT_REMOVED_VMA_ARG)
269     .fault = uvm_vm_fault_sigbus_wrapper_entry
270 #else
271     .fault = uvm_vm_fault_sigbus_entry
272 #endif
273 };
274 
275 static void uvm_disable_vma(struct vm_area_struct *vma)
276 {
277     // In the case of fork, the kernel has already copied the old PTEs over to
278     // the child process, so an access in the child might succeed instead of
279     // causing a fault. To force a fault we'll unmap it directly here.
280     //
281     // Note that since the unmap works on file offset, not virtual address, this
282     // unmaps both the old and new vmas.
283     //
284     // In the case of a move (mremap), the kernel will copy the PTEs over later,
285     // so it doesn't matter if we unmap here. However, the new vma's open will
286     // immediately be followed by a close on the old vma. We call
287     // unmap_mapping_range for the close, which also unmaps the new vma because
288     // they have the same file offset.
289     unmap_mapping_range(vma->vm_file->f_mapping,
290                         vma->vm_pgoff << PAGE_SHIFT,
291                         vma->vm_end - vma->vm_start,
292                         1);
293 
294     vma->vm_ops = &uvm_vm_ops_disabled;
295 
296     if (vma->vm_private_data) {
297         uvm_vma_wrapper_destroy(vma->vm_private_data);
298         vma->vm_private_data = NULL;
299     }
300 }
301 
302 // We can't return an error from uvm_vm_open so on failed splits
303 // we'll disable *both* vmas. This isn't great behavior for the
304 // user, but we don't have many options. We could leave the old VA
305 // range in place but that breaks the model of vmas always
306 // completely covering VA ranges. We'd have to be very careful
307 // handling later splits and closes of both that partially-covered
308 // VA range, and of the vmas which might or might not cover it any
309 // more.
310 //
311 // A failure likely means we're in OOM territory, so this should not
312 // be common by any means, and the process might die anyway.
313 static void uvm_vm_open_failure(struct vm_area_struct *original,
314                                 struct vm_area_struct *new)
315 {
316     uvm_va_space_t *va_space = uvm_va_space_get(new->vm_file);
317     static const bool make_zombie = false;
318 
319     UVM_ASSERT(va_space == uvm_va_space_get(original->vm_file));
320     uvm_assert_rwsem_locked_write(&va_space->lock);
321 
322     uvm_destroy_vma_managed(original, make_zombie);
323     uvm_disable_vma(original);
324     uvm_disable_vma(new);
325 }
326 
327 // vm_ops->open cases:
328 //
329 // 1) Parent vma is dup'd (fork)
330 //    This is undefined behavior in the UVM Programming Model. For convenience
331 //    the parent will continue operating properly, but the child is not
332 //    guaranteed access to the range.
333 //
334 // 2) Original vma is split (munmap, mprotect, mremap, mbind, etc)
335 //    The UVM Programming Model supports mbind always and supports mprotect if
336 //    HMM is present. Supporting either of those means all such splitting cases
337 //    must be handled. This involves splitting the va_range covering the split
338 //    location. Note that the kernel will never merge us back on two counts: we
339 //    set VM_MIXEDMAP and we have a ->close callback.
340 //
341 // 3) Original vma is moved (mremap)
342 //    This is undefined behavior in the UVM Programming Model. We'll get an open
343 //    on the new vma in which we disable operations on the new vma, then a close
344 //    on the old vma.
345 //
346 // Note that since we set VM_DONTEXPAND on the vma we're guaranteed that the vma
347 // will never increase in size, only shrink/split.
348 static void uvm_vm_open_managed(struct vm_area_struct *vma)
349 {
350     uvm_va_space_t *va_space = uvm_va_space_get(vma->vm_file);
351     uvm_va_range_t *va_range;
352     struct vm_area_struct *original;
353     NV_STATUS status;
354     NvU64 new_end;
355 
356     // This is slightly ugly. We need to know the parent vma of this new one,
357     // but we can't use the range tree to look up the original because that
358     // doesn't handle a vma move operation.
359     //
360     // However, all of the old vma's fields have been copied into the new vma,
361     // and open of the new vma is always called before close of the old (in
362     // cases where close will be called immediately afterwards, like move).
363     // vma->vm_private_data will thus still point to the original vma that we
364     // set in mmap or open.
365     //
366     // Things to watch out for here:
367     // - For splits, the old vma hasn't been adjusted yet so its vm_start and
368     //   vm_end region will overlap with this vma's start and end.
369     //
370     // - For splits and moves, the new vma has not yet been inserted into the
371     //   mm's list so vma->vm_prev and vma->vm_next cannot be used, nor will
372     //   the new vma show up in find_vma and friends.
373     original = ((uvm_vma_wrapper_t*)vma->vm_private_data)->vma;
374     vma->vm_private_data = NULL;
375     // On fork or move we want to simply disable the new vma
376     if (vma->vm_mm != original->vm_mm ||
377         (vma->vm_start != original->vm_start && vma->vm_end != original->vm_end)) {
378         uvm_disable_vma(vma);
379         return;
380     }
381 
382     // At this point we are guaranteed that the mmap_lock is held in write
383     // mode.
384     uvm_record_lock_mmap_lock_write(current->mm);
385 
386     // Split vmas should always fall entirely within the old one, and be on one
387     // side.
388     UVM_ASSERT(vma->vm_start >= original->vm_start && vma->vm_end <= original->vm_end);
389     UVM_ASSERT(vma->vm_start == original->vm_start || vma->vm_end == original->vm_end);
390 
391     // The vma is splitting, so create a new range under this vma if necessary.
392     // The kernel handles splits in the middle of the vma by doing two separate
393     // splits so we just have to handle one vma splitting in two here.
394     if (vma->vm_start == original->vm_start)
395         new_end = vma->vm_end - 1; // Left split (new_end is inclusive)
396     else
397         new_end = vma->vm_start - 1; // Right split (new_end is inclusive)
398 
399     uvm_va_space_down_write(va_space);
400 
401     vma->vm_private_data = uvm_vma_wrapper_alloc(vma);
402     if (!vma->vm_private_data) {
403         uvm_vm_open_failure(original, vma);
404         goto out;
405     }
406 
407     // There can be multiple va_ranges under the vma already. Check if one spans
408     // the new split boundary. If so, split it.
409     va_range = uvm_va_range_find(va_space, new_end);
410     UVM_ASSERT(va_range);
411     UVM_ASSERT(uvm_va_range_vma_current(va_range) == original);
412     if (va_range->node.end != new_end) {
413         status = uvm_va_range_split(va_range, new_end, NULL);
414         if (status != NV_OK) {
415             UVM_DBG_PRINT("Failed to split VA range, destroying both: %s. "
416                           "original vma [0x%lx, 0x%lx) new vma [0x%lx, 0x%lx)\n",
417                           nvstatusToString(status),
418                           original->vm_start, original->vm_end,
419                           vma->vm_start, vma->vm_end);
420             uvm_vm_open_failure(original, vma);
421             goto out;
422         }
423     }
424 
425     // Point va_ranges to the new vma
426     uvm_for_each_va_range_in_vma(va_range, vma) {
427         UVM_ASSERT(uvm_va_range_vma_current(va_range) == original);
428         va_range->managed.vma_wrapper = vma->vm_private_data;
429     }
430 
431 out:
432     uvm_va_space_up_write(va_space);
433     uvm_record_unlock_mmap_lock_write(current->mm);
434 }
435 
436 static void uvm_vm_open_managed_entry(struct vm_area_struct *vma)
437 {
438    UVM_ENTRY_VOID(uvm_vm_open_managed(vma));
439 }
440 
441 static void uvm_vm_close_managed(struct vm_area_struct *vma)
442 {
443     uvm_va_space_t *va_space = uvm_va_space_get(vma->vm_file);
444     uvm_processor_id_t gpu_id;
445     bool make_zombie = false;
446 
447     if (current->mm != NULL)
448         uvm_record_lock_mmap_lock_write(current->mm);
449 
450     // current->mm will be NULL on process teardown, in which case we have
451     // special handling.
452     if (current->mm == NULL) {
453         make_zombie = (va_space->initialization_flags & UVM_INIT_FLAGS_MULTI_PROCESS_SHARING_MODE);
454         if (!make_zombie) {
455             // If we're not in multi-process mode, then we want to stop all user
456             // channels before unmapping the managed allocations to avoid
457             // spurious MMU faults in the system log. If we have a va_space_mm
458             // then this must've already happened as part of
459             // uvm_va_space_mm_shutdown. Otherwise we need to handle it here.
460             if (uvm_va_space_mm_enabled(va_space) && current->mm == va_space->va_space_mm.mm) {
461                 UVM_ASSERT(atomic_read(&va_space->user_channels_stopped));
462             }
463             else {
464                 // Stopping channels involves making RM calls, so we have to do
465                 // that with the VA space lock in read mode.
466                 uvm_va_space_down_read_rm(va_space);
467                 if (!atomic_read(&va_space->user_channels_stopped))
468                     uvm_va_space_stop_all_user_channels(va_space);
469                 uvm_va_space_up_read_rm(va_space);
470             }
471         }
472     }
473 
474     // See uvm_mmap for why we need this in addition to mmap_lock
475     uvm_va_space_down_write(va_space);
476 
477     uvm_destroy_vma_managed(vma, make_zombie);
478 
479     // Notify GPU address spaces that the fault buffer needs to be flushed to
480     // avoid finding stale entries that can be attributed to new VA ranges
481     // reallocated at the same address.
482     for_each_gpu_id_in_mask(gpu_id, &va_space->registered_gpu_va_spaces) {
483         uvm_processor_mask_set_atomic(&va_space->needs_fault_buffer_flush, gpu_id);
484     }
485     uvm_va_space_up_write(va_space);
486 
487     if (current->mm != NULL)
488         uvm_record_unlock_mmap_lock_write(current->mm);
489 }
490 
491 static void uvm_vm_close_managed_entry(struct vm_area_struct *vma)
492 {
493     UVM_ENTRY_VOID(uvm_vm_close_managed(vma));
494 }
495 
496 static vm_fault_t uvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
497 {
498     uvm_va_space_t *va_space = uvm_va_space_get(vma->vm_file);
499 
500     return uvm_va_space_cpu_fault_managed(va_space, vma, vmf);
501 }
502 
503 static vm_fault_t uvm_vm_fault_entry(struct vm_area_struct *vma, struct vm_fault *vmf)
504 {
505     UVM_ENTRY_RET(uvm_vm_fault(vma, vmf));
506 }
507 
508 static vm_fault_t uvm_vm_fault_wrapper(struct vm_fault *vmf)
509 {
510 #if defined(NV_VM_OPS_FAULT_REMOVED_VMA_ARG)
511     return uvm_vm_fault(vmf->vma, vmf);
512 #else
513     return uvm_vm_fault(NULL, vmf);
514 #endif
515 }
516 
517 static vm_fault_t uvm_vm_fault_wrapper_entry(struct vm_fault *vmf)
518 {
519     UVM_ENTRY_RET(uvm_vm_fault_wrapper(vmf));
520 }
521 
522 static struct vm_operations_struct uvm_vm_ops_managed =
523 {
524     .open         = uvm_vm_open_managed_entry,
525     .close        = uvm_vm_close_managed_entry,
526 
527 #if defined(NV_VM_OPS_FAULT_REMOVED_VMA_ARG)
528     .fault        = uvm_vm_fault_wrapper_entry,
529     .page_mkwrite = uvm_vm_fault_wrapper_entry,
530 #else
531     .fault        = uvm_vm_fault_entry,
532     .page_mkwrite = uvm_vm_fault_entry,
533 #endif
534 };
535 
536 // vm operations on semaphore pool allocations only control CPU mappings. Unmapping GPUs,
537 // freeing the allocation, and destroying the va_range are handled by UVM_FREE.
538 static void uvm_vm_open_semaphore_pool(struct vm_area_struct *vma)
539 {
540     struct vm_area_struct *origin_vma = (struct vm_area_struct *)vma->vm_private_data;
541     uvm_va_space_t *va_space = uvm_va_space_get(origin_vma->vm_file);
542     uvm_va_range_t *va_range;
543     bool is_fork = (vma->vm_mm != origin_vma->vm_mm);
544     NV_STATUS status;
545 
546     uvm_record_lock_mmap_lock_write(current->mm);
547 
548     uvm_va_space_down_write(va_space);
549 
550     va_range = uvm_va_range_find(va_space, origin_vma->vm_start);
551     UVM_ASSERT(va_range);
552     UVM_ASSERT_MSG(va_range->type == UVM_VA_RANGE_TYPE_SEMAPHORE_POOL &&
553                    va_range->node.start == origin_vma->vm_start &&
554                    va_range->node.end + 1 == origin_vma->vm_end,
555                    "origin vma [0x%llx, 0x%llx); va_range [0x%llx, 0x%llx) type %d\n",
556                    (NvU64)origin_vma->vm_start, (NvU64)origin_vma->vm_end, va_range->node.start,
557                    va_range->node.end + 1, va_range->type);
558 
559     // Semaphore pool vmas do not have vma wrappers, but some functions will
560     // assume vm_private_data is a wrapper.
561     vma->vm_private_data = NULL;
562 
563     if (is_fork) {
564         // If we forked, leave the parent vma alone.
565         uvm_disable_vma(vma);
566 
567         // uvm_disable_vma unmaps in the parent as well; clear the uvm_mem CPU
568         // user mapping metadata and then remap.
569         uvm_mem_unmap_cpu_user(va_range->semaphore_pool.mem);
570 
571         status = uvm_mem_map_cpu_user(va_range->semaphore_pool.mem, va_range->va_space, origin_vma);
572         if (status != NV_OK) {
573             UVM_DBG_PRINT("Failed to remap semaphore pool to CPU for parent after fork; status = %d (%s)",
574                     status, nvstatusToString(status));
575             origin_vma->vm_ops = &uvm_vm_ops_disabled;
576         }
577     }
578     else {
579         origin_vma->vm_private_data = NULL;
580         origin_vma->vm_ops = &uvm_vm_ops_disabled;
581         vma->vm_ops = &uvm_vm_ops_disabled;
582         uvm_mem_unmap_cpu_user(va_range->semaphore_pool.mem);
583     }
584 
585     uvm_va_space_up_write(va_space);
586 
587     uvm_record_unlock_mmap_lock_write(current->mm);
588 }
589 
590 static void uvm_vm_open_semaphore_pool_entry(struct vm_area_struct *vma)
591 {
592    UVM_ENTRY_VOID(uvm_vm_open_semaphore_pool(vma));
593 }
594 
595 // vm operations on semaphore pool allocations only control CPU mappings. Unmapping GPUs,
596 // freeing the allocation, and destroying the va_range are handled by UVM_FREE.
597 static void uvm_vm_close_semaphore_pool(struct vm_area_struct *vma)
598 {
599     uvm_va_space_t *va_space = uvm_va_space_get(vma->vm_file);
600 
601     if (current->mm != NULL)
602         uvm_record_lock_mmap_lock_write(current->mm);
603 
604     uvm_va_space_down_read(va_space);
605 
606     uvm_destroy_vma_semaphore_pool(vma);
607 
608     uvm_va_space_up_read(va_space);
609 
610     if (current->mm != NULL)
611         uvm_record_unlock_mmap_lock_write(current->mm);
612 }
613 
614 static void uvm_vm_close_semaphore_pool_entry(struct vm_area_struct *vma)
615 {
616    UVM_ENTRY_VOID(uvm_vm_close_semaphore_pool(vma));
617 }
618 
619 static struct vm_operations_struct uvm_vm_ops_semaphore_pool =
620 {
621     .open         = uvm_vm_open_semaphore_pool_entry,
622     .close        = uvm_vm_close_semaphore_pool_entry,
623 
624 #if defined(NV_VM_OPS_FAULT_REMOVED_VMA_ARG)
625     .fault        = uvm_vm_fault_sigbus_wrapper_entry,
626 #else
627     .fault        = uvm_vm_fault_sigbus_entry,
628 #endif
629 };
630 
631 static int uvm_mmap(struct file *filp, struct vm_area_struct *vma)
632 {
633     uvm_va_space_t *va_space;
634     uvm_va_range_t *va_range;
635     NV_STATUS status = uvm_global_get_status();
636     int ret = 0;
637     bool vma_wrapper_allocated = false;
638 
639     if (status != NV_OK)
640         return -nv_status_to_errno(status);
641 
642     va_space = uvm_fd_va_space(filp);
643     if (!va_space)
644         return -EBADFD;
645 
646     // When the VA space is associated with an mm, all vmas under the VA space
647     // must come from that mm.
648     if (uvm_va_space_mm_enabled(va_space)) {
649         UVM_ASSERT(va_space->va_space_mm.mm);
650         if (va_space->va_space_mm.mm != current->mm)
651             return -EOPNOTSUPP;
652     }
653 
654     // UVM mappings are required to set offset == VA. This simplifies things
655     // since we don't have to worry about address aliasing (except for fork,
656     // handled separately) and it makes unmap_mapping_range simpler.
657     if (vma->vm_start != (vma->vm_pgoff << PAGE_SHIFT)) {
658         UVM_DBG_PRINT_RL("vm_start 0x%lx != vm_pgoff 0x%lx\n", vma->vm_start, vma->vm_pgoff << PAGE_SHIFT);
659         return -EINVAL;
660     }
661 
662     // Enforce shared read/writable mappings so we get all fault callbacks
663     // without the kernel doing COW behind our backs. The user can still call
664     // mprotect to change protections, but that will only hurt user space.
665     if ((vma->vm_flags & (VM_SHARED|VM_READ|VM_WRITE)) !=
666                          (VM_SHARED|VM_READ|VM_WRITE)) {
667         UVM_DBG_PRINT_RL("User requested non-shared or non-writable mapping\n");
668         return -EINVAL;
669     }
670 
671     // If the PM lock cannot be acquired, disable the VMA and report success
672     // to the caller.  The caller is expected to determine whether the
673     // map operation succeeded via an ioctl() call.  This is necessary to
674     // safely handle MAP_FIXED, which needs to complete atomically to prevent
675     // the loss of the virtual address range.
676     if (!uvm_down_read_trylock(&g_uvm_global.pm.lock)) {
677         uvm_disable_vma(vma);
678         return 0;
679     }
680 
681     uvm_record_lock_mmap_lock_write(current->mm);
682 
683     // VM_MIXEDMAP      Required to use vm_insert_page
684     //
685     // VM_DONTEXPAND    mremap can grow a vma in place without giving us any
686     //                  callback. We need to prevent this so our ranges stay
687     //                  up-to-date with the vma. This flag doesn't prevent
688     //                  mremap from moving the mapping elsewhere, nor from
689     //                  shrinking it. We can detect both of those cases however
690     //                  with vm_ops->open() and vm_ops->close() callbacks.
691     //
692     // Using VM_DONTCOPY would be nice, but madvise(MADV_DOFORK) can reset that
693     // so we have to handle vm_open on fork anyway. We could disable MADV_DOFORK
694     // with VM_IO, but that causes other mapping issues.
695     // Make the default behavior be VM_DONTCOPY to avoid the performance impact
696     // of removing CPU mappings in the parent on fork()+exec(). Users can call
697     // madvise(MDV_DOFORK) if the child process requires access to the
698     // allocation.
699     nv_vm_flags_set(vma, VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTCOPY);
700 
701     vma->vm_ops = &uvm_vm_ops_managed;
702 
703     // This identity assignment is needed so uvm_vm_open can find its parent vma
704     vma->vm_private_data = uvm_vma_wrapper_alloc(vma);
705     if (!vma->vm_private_data) {
706         ret = -ENOMEM;
707         goto out;
708     }
709     vma_wrapper_allocated = true;
710 
711     // The kernel has taken mmap_lock in write mode, but that doesn't prevent
712     // this va_space from being modified by the GPU fault path or from the ioctl
713     // path where we don't have this mm for sure, so we have to lock the VA
714     // space directly.
715     uvm_va_space_down_write(va_space);
716 
717     // uvm_va_range_create_mmap will catch collisions. Below are some example
718     // cases which can cause collisions. There may be others.
719     // 1) An overlapping range was previously created with an ioctl, for example
720     //    for an external mapping.
721     // 2) This file was passed to another process via a UNIX domain socket
722     status = uvm_va_range_create_mmap(va_space, current->mm, vma->vm_private_data, NULL);
723 
724     if (status == NV_ERR_UVM_ADDRESS_IN_USE) {
725         // If the mmap is for a semaphore pool, the VA range will have been
726         // allocated by a previous ioctl, and the mmap just creates the CPU
727         // mapping.
728         va_range = uvm_va_range_find(va_space, vma->vm_start);
729         if (va_range && va_range->node.start == vma->vm_start &&
730                 va_range->node.end + 1 == vma->vm_end &&
731                 va_range->type == UVM_VA_RANGE_TYPE_SEMAPHORE_POOL) {
732             uvm_vma_wrapper_destroy(vma->vm_private_data);
733             vma_wrapper_allocated = false;
734             vma->vm_private_data = vma;
735             vma->vm_ops = &uvm_vm_ops_semaphore_pool;
736             status = uvm_mem_map_cpu_user(va_range->semaphore_pool.mem, va_range->va_space, vma);
737         }
738     }
739 
740     if (status != NV_OK) {
741         UVM_DBG_PRINT_RL("Failed to create or map VA range for vma [0x%lx, 0x%lx): %s\n",
742                          vma->vm_start, vma->vm_end, nvstatusToString(status));
743         ret = -nv_status_to_errno(status);
744     }
745 
746     uvm_va_space_up_write(va_space);
747 
748 out:
749     if (ret != 0 && vma_wrapper_allocated)
750         uvm_vma_wrapper_destroy(vma->vm_private_data);
751 
752     uvm_record_unlock_mmap_lock_write(current->mm);
753 
754     uvm_up_read(&g_uvm_global.pm.lock);
755 
756     return ret;
757 }
758 
759 static int uvm_mmap_entry(struct file *filp, struct vm_area_struct *vma)
760 {
761    UVM_ENTRY_RET(uvm_mmap(filp, vma));
762 }
763 
764 static NV_STATUS uvm_api_initialize(UVM_INITIALIZE_PARAMS *params, struct file *filp)
765 {
766     uvm_va_space_t *va_space;
767     NV_STATUS status;
768     uvm_fd_type_t old_fd_type;
769 
770     // Normally we expect private_data == UVM_FD_UNINITIALIZED. However multiple
771     // threads may call this ioctl concurrently so we have to be careful to
772     // avoid initializing multiple va_spaces and/or leaking memory. To do this
773     // we do an atomic compare and swap. Only one thread will observe
774     // UVM_FD_UNINITIALIZED and that thread will allocate and setup the
775     // va_space.
776     //
777     // Other threads will either see UVM_FD_INITIALIZING or UVM_FD_VA_SPACE. In
778     // the case of UVM_FD_VA_SPACE we return success if and only if the
779     // initialization flags match. If another thread is still initializing the
780     // va_space we return NV_ERR_BUSY_RETRY.
781     //
782     // If va_space initialization fails we return the failure code and reset the
783     // FD state back to UVM_FD_UNINITIALIZED to allow another initialization
784     // attempt to be made. This is safe because other threads will have only had
785     // a chance to observe UVM_FD_INITIALIZING and not UVM_FD_VA_SPACE in this
786     // case.
787     old_fd_type = nv_atomic_long_cmpxchg((atomic_long_t *)&filp->private_data,
788                                          UVM_FD_UNINITIALIZED, UVM_FD_INITIALIZING);
789     old_fd_type &= UVM_FD_TYPE_MASK;
790     if (old_fd_type == UVM_FD_UNINITIALIZED) {
791         status = uvm_va_space_create(filp->f_mapping, &va_space, params->flags);
792         if (status != NV_OK) {
793             atomic_long_set_release((atomic_long_t *)&filp->private_data, UVM_FD_UNINITIALIZED);
794             return status;
795         }
796 
797         atomic_long_set_release((atomic_long_t *)&filp->private_data, (long)va_space | UVM_FD_VA_SPACE);
798     }
799     else if (old_fd_type == UVM_FD_VA_SPACE) {
800         va_space = uvm_va_space_get(filp);
801 
802         if (params->flags != va_space->initialization_flags)
803             status = NV_ERR_INVALID_ARGUMENT;
804         else
805             status = NV_OK;
806     }
807     else {
808         UVM_ASSERT(old_fd_type == UVM_FD_INITIALIZING);
809         status = NV_ERR_BUSY_RETRY;
810     }
811 
812     return status;
813 }
814 
815 static NV_STATUS uvm_api_pageable_mem_access(UVM_PAGEABLE_MEM_ACCESS_PARAMS *params, struct file *filp)
816 {
817     uvm_va_space_t *va_space = uvm_va_space_get(filp);
818     params->pageableMemAccess = uvm_va_space_pageable_mem_access_supported(va_space) ? NV_TRUE : NV_FALSE;
819     return NV_OK;
820 }
821 
822 static long uvm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
823 {
824     switch (cmd)
825     {
826         case UVM_DEINITIALIZE:
827             return 0;
828 
829         UVM_ROUTE_CMD_STACK_NO_INIT_CHECK(UVM_INITIALIZE,                  uvm_api_initialize);
830 
831         UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_PAGEABLE_MEM_ACCESS,            uvm_api_pageable_mem_access);
832         UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_PAGEABLE_MEM_ACCESS_ON_GPU,     uvm_api_pageable_mem_access_on_gpu);
833         UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_REGISTER_GPU,                   uvm_api_register_gpu);
834         UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_UNREGISTER_GPU,                 uvm_api_unregister_gpu);
835         UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_CREATE_RANGE_GROUP,             uvm_api_create_range_group);
836         UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_DESTROY_RANGE_GROUP,            uvm_api_destroy_range_group);
837         UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_ENABLE_PEER_ACCESS,             uvm_api_enable_peer_access);
838         UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_DISABLE_PEER_ACCESS,            uvm_api_disable_peer_access);
839         UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_SET_RANGE_GROUP,                uvm_api_set_range_group);
840         UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_CREATE_EXTERNAL_RANGE,          uvm_api_create_external_range);
841         UVM_ROUTE_CMD_ALLOC_INIT_CHECK(UVM_MAP_EXTERNAL_ALLOCATION,        uvm_api_map_external_allocation);
842         UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_MAP_EXTERNAL_SPARSE,            uvm_api_map_external_sparse);
843         UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_FREE,                           uvm_api_free);
844         UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_PREVENT_MIGRATION_RANGE_GROUPS, uvm_api_prevent_migration_range_groups);
845         UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_ALLOW_MIGRATION_RANGE_GROUPS,   uvm_api_allow_migration_range_groups);
846         UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_SET_PREFERRED_LOCATION,         uvm_api_set_preferred_location);
847         UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_UNSET_PREFERRED_LOCATION,       uvm_api_unset_preferred_location);
848         UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_SET_ACCESSED_BY,                uvm_api_set_accessed_by);
849         UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_UNSET_ACCESSED_BY,              uvm_api_unset_accessed_by);
850         UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_REGISTER_GPU_VASPACE,           uvm_api_register_gpu_va_space);
851         UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_UNREGISTER_GPU_VASPACE,         uvm_api_unregister_gpu_va_space);
852         UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_REGISTER_CHANNEL,               uvm_api_register_channel);
853         UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_UNREGISTER_CHANNEL,             uvm_api_unregister_channel);
854         UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_ENABLE_READ_DUPLICATION,        uvm_api_enable_read_duplication);
855         UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_DISABLE_READ_DUPLICATION,       uvm_api_disable_read_duplication);
856         UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_MIGRATE,                        uvm_api_migrate);
857         UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_ENABLE_SYSTEM_WIDE_ATOMICS,     uvm_api_enable_system_wide_atomics);
858         UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_DISABLE_SYSTEM_WIDE_ATOMICS,    uvm_api_disable_system_wide_atomics);
859         UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TOOLS_READ_PROCESS_MEMORY,      uvm_api_tools_read_process_memory);
860         UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TOOLS_WRITE_PROCESS_MEMORY,     uvm_api_tools_write_process_memory);
861         UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TOOLS_GET_PROCESSOR_UUID_TABLE, uvm_api_tools_get_processor_uuid_table);
862         UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_MAP_DYNAMIC_PARALLELISM_REGION, uvm_api_map_dynamic_parallelism_region);
863         UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_UNMAP_EXTERNAL,                 uvm_api_unmap_external);
864         UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_MIGRATE_RANGE_GROUP,            uvm_api_migrate_range_group);
865         UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TOOLS_FLUSH_EVENTS,             uvm_api_tools_flush_events);
866         UVM_ROUTE_CMD_ALLOC_INIT_CHECK(UVM_ALLOC_SEMAPHORE_POOL,           uvm_api_alloc_semaphore_pool);
867         UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_CLEAN_UP_ZOMBIE_RESOURCES,      uvm_api_clean_up_zombie_resources);
868         UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_POPULATE_PAGEABLE,              uvm_api_populate_pageable);
869         UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_VALIDATE_VA_RANGE,              uvm_api_validate_va_range);
870     }
871 
872     // Try the test ioctls if none of the above matched
873     return uvm_test_ioctl(filp, cmd, arg);
874 }
875 
876 static long uvm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
877 {
878     long ret;
879 
880     if (!uvm_down_read_trylock(&g_uvm_global.pm.lock))
881         return -EAGAIN;
882 
883     ret = uvm_ioctl(filp, cmd, arg);
884 
885     uvm_up_read(&g_uvm_global.pm.lock);
886 
887     uvm_thread_assert_all_unlocked();
888 
889     return ret;
890 }
891 
892 static long uvm_unlocked_ioctl_entry(struct file *filp, unsigned int cmd, unsigned long arg)
893 {
894    UVM_ENTRY_RET(uvm_unlocked_ioctl(filp, cmd, arg));
895 }
896 
897 static const struct file_operations uvm_fops =
898 {
899     .open            = uvm_open_entry,
900     .release         = uvm_release_entry,
901     .mmap            = uvm_mmap_entry,
902     .unlocked_ioctl  = uvm_unlocked_ioctl_entry,
903 #if NVCPU_IS_X86_64
904     .compat_ioctl    = uvm_unlocked_ioctl_entry,
905 #endif
906     .owner           = THIS_MODULE,
907 };
908 
909 NV_STATUS uvm_test_register_unload_state_buffer(UVM_TEST_REGISTER_UNLOAD_STATE_BUFFER_PARAMS *params, struct file *filp)
910 {
911     long ret;
912     struct page *page;
913     NV_STATUS status = NV_OK;
914 
915     if (!IS_ALIGNED(params->unload_state_buf, sizeof(NvU64)))
916         return NV_ERR_INVALID_ADDRESS;
917 
918     // Hold mmap_lock to call get_user_pages(), the UVM locking helper functions
919     // are not used because unload_state_buf may be a managed memory pointer and
920     // therefore a locking assertion from the CPU fault handler could be fired.
921     nv_mmap_read_lock(current->mm);
922     ret = NV_PIN_USER_PAGES(params->unload_state_buf, 1, FOLL_WRITE, &page, NULL);
923     nv_mmap_read_unlock(current->mm);
924 
925     if (ret < 0)
926         return errno_to_nv_status(ret);
927     UVM_ASSERT(ret == 1);
928 
929     uvm_mutex_lock(&g_uvm_global.global_lock);
930 
931     if (g_uvm_global.unload_state.ptr) {
932         NV_UNPIN_USER_PAGE(page);
933         status = NV_ERR_IN_USE;
934         goto error;
935     }
936 
937     g_uvm_global.unload_state.page = page;
938     g_uvm_global.unload_state.ptr = (NvU64 *)((char *)kmap(page) + (params->unload_state_buf & ~PAGE_MASK));
939     *g_uvm_global.unload_state.ptr = 0;
940 
941 error:
942     uvm_mutex_unlock(&g_uvm_global.global_lock);
943 
944     return status;
945 }
946 
947 static void uvm_test_unload_state_exit(void)
948 {
949     if (g_uvm_global.unload_state.ptr) {
950         kunmap(g_uvm_global.unload_state.page);
951         NV_UNPIN_USER_PAGE(g_uvm_global.unload_state.page);
952     }
953 }
954 
955 static int uvm_chardev_create(void)
956 {
957     dev_t uvm_dev;
958 
959     int ret = alloc_chrdev_region(&g_uvm_base_dev,
960                                   0,
961                                   NVIDIA_UVM_NUM_MINOR_DEVICES,
962                                   NVIDIA_UVM_DEVICE_NAME);
963     if (ret != 0) {
964         UVM_ERR_PRINT("alloc_chrdev_region failed: %d\n", ret);
965         return ret;
966     }
967     uvm_dev = MKDEV(MAJOR(g_uvm_base_dev), NVIDIA_UVM_PRIMARY_MINOR_NUMBER);
968 
969     uvm_init_character_device(&g_uvm_cdev, &uvm_fops);
970     ret = cdev_add(&g_uvm_cdev, uvm_dev, 1);
971     if (ret != 0) {
972         UVM_ERR_PRINT("cdev_add (major %u, minor %u) failed: %d\n", MAJOR(uvm_dev), MINOR(uvm_dev), ret);
973         unregister_chrdev_region(g_uvm_base_dev, NVIDIA_UVM_NUM_MINOR_DEVICES);
974         return ret;
975     }
976 
977     return 0;
978 }
979 
980 static void uvm_chardev_exit(void)
981 {
982     cdev_del(&g_uvm_cdev);
983     unregister_chrdev_region(g_uvm_base_dev, NVIDIA_UVM_NUM_MINOR_DEVICES);
984 }
985 
986 static int uvm_init(void)
987 {
988     bool initialized_globals = false;
989     bool added_device = false;
990     int ret;
991 
992     NV_STATUS status = uvm_global_init();
993     if (status != NV_OK) {
994         UVM_ERR_PRINT("uvm_global_init() failed: %s\n", nvstatusToString(status));
995         ret = -ENODEV;
996         goto error;
997     }
998     initialized_globals = true;
999 
1000     ret = uvm_chardev_create();
1001     if (ret != 0) {
1002         UVM_ERR_PRINT("uvm_chardev_create failed: %d\n", ret);
1003         goto error;
1004     }
1005     added_device = true;
1006 
1007     ret = uvm_tools_init(g_uvm_base_dev);
1008     if (ret != 0) {
1009         UVM_ERR_PRINT("uvm_tools_init() failed: %d\n", ret);
1010         goto error;
1011     }
1012 
1013     pr_info("Loaded the UVM driver, major device number %d.\n", MAJOR(g_uvm_base_dev));
1014 
1015     if (uvm_enable_builtin_tests)
1016         pr_info("Built-in UVM tests are enabled. This is a security risk.\n");
1017 
1018     // After Open RM is released, both the enclosing "#if" and this comment
1019     // block should be removed, because the uvm_hmm_is_enabled_system_wide()
1020     // check is both necessary and sufficient for reporting functionality.
1021     // Until that time, however, we need to avoid advertisting UVM's ability to
1022     // enable HMM functionality.
1023 
1024     if (uvm_hmm_is_enabled_system_wide())
1025         UVM_INFO_PRINT("HMM (Heterogeneous Memory Management) is enabled in the UVM driver.\n");
1026 
1027     return 0;
1028 
1029 error:
1030     if (added_device)
1031         uvm_chardev_exit();
1032 
1033     if (initialized_globals)
1034         uvm_global_exit();
1035 
1036     UVM_ERR_PRINT("uvm init failed: %d\n", ret);
1037 
1038     return ret;
1039 }
1040 
1041 static int __init uvm_init_entry(void)
1042 {
1043    UVM_ENTRY_RET(uvm_init());
1044 }
1045 
1046 static void uvm_exit(void)
1047 {
1048     uvm_tools_exit();
1049     uvm_chardev_exit();
1050 
1051     uvm_global_exit();
1052 
1053     uvm_test_unload_state_exit();
1054 
1055     pr_info("Unloaded the UVM driver.\n");
1056 }
1057 
1058 static void __exit uvm_exit_entry(void)
1059 {
1060    UVM_ENTRY_VOID(uvm_exit());
1061 }
1062 
1063 module_init(uvm_init_entry);
1064 module_exit(uvm_exit_entry);
1065 
1066 MODULE_LICENSE("Dual MIT/GPL");
1067 MODULE_INFO(supported, "external");
1068 MODULE_VERSION(NV_VERSION_STRING);
1069