1 /*******************************************************************************
2     Copyright (c) 2021 NVIDIA Corporation
3 
4     Permission is hereby granted, free of charge, to any person obtaining a copy
5     of this software and associated documentation files (the "Software"), to
6     deal in the Software without restriction, including without limitation the
7     rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
8     sell copies of the Software, and to permit persons to whom the Software is
9     furnished to do so, subject to the following conditions:
10 
11         The above copyright notice and this permission notice shall be
12         included in all copies or substantial portions of the Software.
13 
14     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15     IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16     FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17     THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18     LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20     DEALINGS IN THE SOFTWARE.
21 *******************************************************************************/
22 
23 #include "uvm_linux.h"
24 #include "uvm_forward_decl.h"
25 #include "uvm_lock.h"
26 #include "uvm_global.h"
27 #include "uvm_va_space.h"
28 #include "uvm_gpu.h"
29 
30 // Service ATS faults in the range (base, base + UVM_VA_BLOCK_SIZE) with service
31 // type for individual pages in the range requested by page masks set in
32 // ats_context->read_fault_mask/write_fault_mask. base must be aligned to
33 // UVM_VA_BLOCK_SIZE. The caller is responsible for ensuring that faulting
34 // addresses fall completely within the VMA. The caller is also responsible for
35 // ensuring that the faulting addresses don't overlap a GMMU region. (See
36 // uvm_ats_check_in_gmmu_region). The caller is also responsible for handling
37 // any errors returned by this function (fault cancellations etc.).
38 //
39 // Returns the fault service status in ats_context->faults_serviced_mask. In
40 // addition, ats_context->reads_serviced_mask returns whether read servicing
41 // worked on write faults iff the read service was also requested in the
42 // corresponding bit in read_fault_mask. These returned masks are only valid if
43 // the return status is NV_OK. Status other than NV_OK indicate system global
44 // fault servicing failures.
45 //
46 // LOCKING: The caller must retain and hold the mmap_lock and hold the va_space
47 // lock.
48 NV_STATUS uvm_ats_service_faults(uvm_gpu_va_space_t *gpu_va_space,
49                                  struct vm_area_struct *vma,
50                                  NvU64 base,
51                                  uvm_ats_fault_context_t *ats_context);
52 
53 // Service access counter notifications on ATS regions in the range (base, base
54 // + UVM_VA_BLOCK_SIZE) for individual pages in the range requested by page_mask
55 // set in ats_context->accessed_mask. base must be aligned to UVM_VA_BLOCK_SIZE.
56 // The caller is responsible for ensuring that the addresses in the
57 // accessed_mask is completely covered by the VMA. The caller is also
58 // responsible for handling any errors returned by this function.
59 //
60 // Returns NV_OK if servicing was successful. Any other error indicates an error
61 // while servicing the range.
62 //
63 // LOCKING: The caller must retain and hold the mmap_lock and hold the va_space
64 // lock.
65 NV_STATUS uvm_ats_service_access_counters(uvm_gpu_va_space_t *gpu_va_space,
66                                           struct vm_area_struct *vma,
67                                           NvU64 base,
68                                           uvm_ats_fault_context_t *ats_context);
69 
70 // Return whether there are any VA ranges (and thus GMMU mappings) within the
71 // UVM_GMMU_ATS_GRANULARITY-aligned region containing address.
72 bool uvm_ats_check_in_gmmu_region(uvm_va_space_t *va_space, NvU64 address, uvm_va_range_t *next);
73 
74 // This function performs pending TLB invalidations for ATS and clears the
75 // ats_invalidate->tlb_batch_pending flag
76 NV_STATUS uvm_ats_invalidate_tlbs(uvm_gpu_va_space_t *gpu_va_space,
77                                   uvm_ats_fault_invalidate_t *ats_invalidate,
78                                   uvm_tracker_t *out_tracker);
79 
uvm_ats_can_service_faults(uvm_gpu_va_space_t * gpu_va_space,struct mm_struct * mm)80 static bool uvm_ats_can_service_faults(uvm_gpu_va_space_t *gpu_va_space, struct mm_struct *mm)
81 {
82     if (mm)
83         uvm_assert_mmap_lock_locked(mm);
84     if (gpu_va_space->ats.enabled)
85         UVM_ASSERT(g_uvm_global.ats.enabled);
86 
87     return gpu_va_space->ats.enabled && mm;
88 }
89