1 /*******************************************************************************
2     Copyright (c) 2016-2023 NVIDIA Corporation
3 
4     Permission is hereby granted, free of charge, to any person obtaining a copy
5     of this software and associated documentation files (the "Software"), to
6     deal in the Software without restriction, including without limitation the
7     rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
8     sell copies of the Software, and to permit persons to whom the Software is
9     furnished to do so, subject to the following conditions:
10 
11         The above copyright notice and this permission notice shall be
12         included in all copies or substantial portions of the Software.
13 
14     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15     IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16     FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17     THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18     LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20     DEALINGS IN THE SOFTWARE.
21 
22 *******************************************************************************/
23 
24 #ifndef _UVM_HMM_H_
25 #define _UVM_HMM_H_
26 
27 #include "nvtypes.h"
28 #include "uvm_forward_decl.h"
29 #include "uvm_va_block_types.h"
30 #include "uvm_va_policy.h"
31 #include "uvm_linux.h"
32 #include "uvm_range_tree.h"
33 #include "uvm_lock.h"
34 
35 typedef struct
36 {
37     // This stores pointers to uvm_va_block_t for HMM blocks.
38     uvm_range_tree_t blocks;
39     uvm_mutex_t blocks_lock;
40 } uvm_hmm_va_space_t;
41 
42 #if UVM_IS_CONFIG_HMM()
43 
44     // Tells whether HMM is enabled for the given va_space.
45     // If it is not enabled, all of the functions below are no-ops.
46     bool uvm_hmm_is_enabled(uvm_va_space_t *va_space);
47 
48     // Self-explanatory name: reports if HMM is enabled system-wide.
49     bool uvm_hmm_is_enabled_system_wide(void);
50 
51     // Initialize HMM for the given the va_space.
52     // Locking: the va_space->va_space_mm.mm mmap_lock must be write locked
53     // and the va_space lock must be held in write mode.
54     NV_STATUS uvm_hmm_va_space_initialize(uvm_va_space_t *va_space);
55 
56     // Destroy any HMM state for the given the va_space.
57     // Locking: va_space lock must be held in write mode.
58     void uvm_hmm_va_space_destroy(uvm_va_space_t *va_space);
59 
60     // Unmap all page tables in this VA space which map memory owned by this
61     // GPU. Any memory still resident on this GPU will be evicted to system
62     // memory. Note that 'mm' can be NULL (e.g., when closing the UVM file)
63     // in which case any GPU memory is simply freed.
64     // Locking: if mm is not NULL, the caller must hold mm->mmap_lock in at
65     // least read mode and the va_space lock must be held in write mode.
66     void uvm_hmm_unregister_gpu(uvm_va_space_t *va_space, uvm_gpu_t *gpu, struct mm_struct *mm);
67 
68     // Destroy the VA space's mappings on the GPU, if it has any.
69     // Locking: if mm is not NULL, the caller must hold mm->mmap_lock in at
70     // least read mode and the va_space lock must be held in write mode.
71     void uvm_hmm_remove_gpu_va_space(uvm_va_space_t *va_space,
72                                      uvm_gpu_va_space_t *gpu_va_space,
73                                      struct mm_struct *mm);
74 
75     // Find an existing HMM va_block.
76     // This function can be called without having retained and locked the mm,
77     // but in that case, the only allowed operations on the returned block are
78     // locking the block, reading its state, and performing eviction. GPU fault
79     // handling and user-initiated migrations are not allowed.
80     // Return values are the same as uvm_va_block_find().
81     // Locking: This must be called with va_space lock held in at least read
82     // mode.
83     NV_STATUS uvm_hmm_va_block_find(uvm_va_space_t *va_space,
84                                     NvU64 addr,
85                                     uvm_va_block_t **va_block_ptr);
86 
87     // Find an existing HMM va_block when processing a CPU fault and try to
88     // isolate and lock the faulting page.
89     // Return NV_ERR_INVALID_ADDRESS if the block is not found,
90     // NV_ERR_BUSY_RETRY if the page could not be locked, and
91     // NV_OK if the block is found and the page is locked. Also,
92     // uvm_hmm_cpu_fault_finish() must be called if NV_OK is returned.
93     // Locking: This must be called with the vma->vm_mm locked and the va_space
94     // read locked.
95     NV_STATUS uvm_hmm_va_block_cpu_find(uvm_va_space_t *va_space,
96                                         uvm_service_block_context_t *service_context,
97                                         struct vm_fault *vmf,
98                                         uvm_va_block_t **va_block_ptr);
99 
100     // This must be called after uvm_va_block_cpu_fault() if
101     // uvm_hmm_va_block_cpu_find() returns NV_OK.
102     // Locking: This must be called with the vma->vm_mm locked and the va_space
103     // read locked.
104     void uvm_hmm_cpu_fault_finish(uvm_service_block_context_t *service_context);
105 
106     // Find or create a new HMM va_block.
107     //
108     // Return NV_ERR_INVALID_ADDRESS if there is no VMA associated with the
109     // address 'addr' or the VMA does not have at least PROT_READ permission.
110     // The caller is also responsible for checking that there is no UVM
111     // va_range covering the given address before calling this function.
112     // If va_block_context is not NULL, the VMA is cached in
113     // va_block_context->hmm.vma.
114     // Locking: This function must be called with mm retained and locked for
115     // at least read and the va_space lock at least for read.
116     NV_STATUS uvm_hmm_va_block_find_create(uvm_va_space_t *va_space,
117                                            NvU64 addr,
118                                            uvm_va_block_context_t *va_block_context,
119                                            uvm_va_block_t **va_block_ptr);
120 
121     // Find the VMA for the given address and set va_block_context->hmm.vma.
122     // Return NV_ERR_INVALID_ADDRESS if va_block_context->mm is NULL or there
123     // is no VMA associated with the address 'addr' or the VMA does not have at
124     // least PROT_READ permission.
125     // Locking: This function must be called with mm retained and locked for
126     // at least read or mm equal to NULL.
127     NV_STATUS uvm_hmm_find_vma(uvm_va_block_context_t *va_block_context, NvU64 addr);
128 
129     // If va_block is a HMM va_block, check that va_block_context->hmm.vma is
130     // not NULL and covers the given region. This always returns true and is
131     // intended to only be used with UVM_ASSERT().
132     // Locking: This function must be called with the va_block lock held and if
133     // va_block is a HMM block, va_block_context->mm must be retained and
134     // locked for at least read.
135     bool uvm_hmm_check_context_vma_is_valid(uvm_va_block_t *va_block,
136                                             uvm_va_block_context_t *va_block_context,
137                                             uvm_va_block_region_t region);
138 
139     // Find or create a HMM va_block and mark it so the next va_block split
140     // will fail for testing purposes.
141     // Locking: This function must be called with mm retained and locked for
142     // at least read and the va_space lock at least for read.
143     NV_STATUS uvm_hmm_test_va_block_inject_split_error(uvm_va_space_t *va_space, NvU64 addr);
144 
145     // Reclaim any HMM va_blocks that overlap the given range.
146     // Note that 'end' is inclusive. If mm is NULL, any HMM va_block in the
147     // range will be reclaimed which assumes that the mm is being torn down
148     // and was not retained.
149     // Return values:
150     // NV_ERR_NO_MEMORY: Reclaim required a block split, which failed.
151     // NV_OK:            There were no HMM blocks in the range, or all HMM
152     //                   blocks in the range were successfully reclaimed.
153     // Locking: If mm is not NULL, it must equal va_space_mm.mm, the caller
154     // must retain it with uvm_va_space_mm_or_current_retain() or be sure that
155     // mm->mm_users is not zero, and it must be locked for at least read mode.
156     // Also, the va_space lock must be held in write mode.
157     // TODO: Bug 3372166: add asynchronous va_block reclaim.
158     NV_STATUS uvm_hmm_va_block_reclaim(uvm_va_space_t *va_space,
159                                        struct mm_struct *mm,
160                                        NvU64 start,
161                                        NvU64 end);
162 
163     // This is called to update the va_space tree of HMM va_blocks after an
164     // existing va_block is split.
165     // Locking: the va_space lock must be held in write mode.
166     void uvm_hmm_va_block_split_tree(uvm_va_block_t *existing_va_block, uvm_va_block_t *new_block);
167 
168     // Find a HMM policy range that needs to be split. The callback function
169     // 'split_needed_cb' returns true if the policy range needs to be split.
170     // If a policy range is split, the existing range is truncated to
171     // [existing_start, addr - 1] and a new policy node with the same policy
172     // values as the existing node is created covering [addr, existing_end].
173     // Before: [----------- existing ------------]
174     // After:  [---- existing ----][---- new ----]
175     //                             ^addr
176     // Locking: the va_space must be write locked.
177     NV_STATUS uvm_hmm_split_as_needed(uvm_va_space_t *va_space,
178                                       NvU64 addr,
179                                       uvm_va_policy_is_split_needed_t split_needed_cb,
180                                       void *data);
181 
182     // Set the preferred location policy for the given range.
183     // Note that 'last_address' is inclusive.
184     // Locking: the va_space->va_space_mm.mm mmap_lock must be locked
185     // and the va_space lock must be held in write mode.
186     NV_STATUS uvm_hmm_set_preferred_location(uvm_va_space_t *va_space,
187                                              uvm_processor_id_t preferred_location,
188                                              NvU64 base,
189                                              NvU64 last_address,
190                                              uvm_tracker_t *out_tracker);
191 
192     // Set the accessed by policy for the given range. This also tries to
193     // map the range. Note that 'last_address' is inclusive.
194     // Locking: the va_space->va_space_mm.mm mmap_lock must be locked
195     // and the va_space lock must be held in write mode.
196     NV_STATUS uvm_hmm_set_accessed_by(uvm_va_space_t *va_space,
197                                       uvm_processor_id_t processor_id,
198                                       bool set_bit,
199                                       NvU64 base,
200                                       NvU64 last_address,
201                                       uvm_tracker_t *out_tracker);
202 
203     // Deferred work item to reestablish accessed by mappings after eviction. On
204     // GPUs with access counters enabled, the evicted GPU will also get remote
205     // mappings.
206     // Locking: the va_space->va_space_mm.mm mmap_lock must be locked
207     // and the va_space lock must be held in at least read mode.
208     void uvm_hmm_block_add_eviction_mappings(uvm_va_space_t *va_space,
209                                              uvm_va_block_t *va_block,
210                                              uvm_va_block_context_t *block_context);
211 
212     // Set the read duplication policy for the given range.
213     // Note that 'last_address' is inclusive.
214     // Locking: the va_space->va_space_mm.mm mmap_lock must be write locked
215     // and the va_space lock must be held in write mode.
216     // TODO: Bug 3660922: need to implement HMM read duplication support.
217     static NV_STATUS uvm_hmm_set_read_duplication(uvm_va_space_t *va_space,
218                                                   uvm_read_duplication_policy_t new_policy,
219                                                   NvU64 base,
220                                                   NvU64 last_address)
221     {
222         if (!uvm_hmm_is_enabled(va_space))
223             return NV_ERR_INVALID_ADDRESS;
224         return NV_OK;
225     }
226 
227     // This function assigns va_block_context->policy to the policy covering
228     // the given address 'addr' and assigns the ending address '*endp' to the
229     // minimum of va_block->end, va_block_context->hmm.vma->vm_end - 1, and the
230     // ending address of the policy range. Note that va_block_context->hmm.vma
231     // is expected to be initialized before calling this function.
232     // Locking: This function must be called with
233     // va_block_context->hmm.vma->vm_mm retained and locked for least read and
234     // the va_block lock held.
235     void uvm_hmm_find_policy_end(uvm_va_block_t *va_block,
236                                  uvm_va_block_context_t *va_block_context,
237                                  unsigned long addr,
238                                  NvU64 *endp);
239 
240     // This function finds the VMA for the page index 'page_index' and assigns
241     // it to va_block_context->vma, sets va_block_context->policy to the policy
242     // covering the given address, and sets the ending page range '*outerp'
243     // to the minimum of *outerp, va_block_context->hmm.vma->vm_end - 1, the
244     // ending address of the policy range, and va_block->end.
245     // Return NV_ERR_INVALID_ADDRESS if no VMA is found; otherwise, NV_OK.
246     // Locking: This function must be called with
247     // va_block_context->hmm.vma->vm_mm retained and locked for least read and
248     // the va_block lock held.
249     NV_STATUS uvm_hmm_find_policy_vma_and_outer(uvm_va_block_t *va_block,
250                                                 uvm_va_block_context_t *va_block_context,
251                                                 uvm_page_index_t page_index,
252                                                 uvm_page_index_t *outerp);
253 
254     // Clear thrashing policy information from all HMM va_blocks.
255     // Locking: va_space lock must be held in write mode.
256     NV_STATUS uvm_hmm_clear_thrashing_policy(uvm_va_space_t *va_space);
257 
258     // Return the expanded region around 'address' limited to the intersection
259     // of va_block start/end, vma start/end, and policy start/end.
260     // va_block_context must not be NULL, va_block_context->hmm.vma must be
261     // valid (this is usually set by uvm_hmm_va_block_find_create()), and
262     // va_block_context->policy must be valid.
263     // Locking: the caller must hold mm->mmap_lock in at least read mode, the
264     // va_space lock must be held in at least read mode, and the va_block lock
265     // held.
266     uvm_va_block_region_t uvm_hmm_get_prefetch_region(uvm_va_block_t *va_block,
267                                                       uvm_va_block_context_t *va_block_context,
268                                                       NvU64 address);
269 
270     // Return the logical protection allowed of a HMM va_block for the page at
271     // the given address.
272     // va_block_context must not be NULL and va_block_context->hmm.vma must be
273     // valid (this is usually set by uvm_hmm_va_block_find_create()).
274     // Locking: the caller must hold va_block_context->mm mmap_lock in at least
275     // read mode.
276     uvm_prot_t uvm_hmm_compute_logical_prot(uvm_va_block_t *va_block,
277                                             uvm_va_block_context_t *va_block_context,
278                                             NvU64 addr);
279 
280     // This is called to service a GPU fault.
281     // Locking: the va_space->va_space_mm.mm mmap_lock must be locked,
282     // the va_space read lock must be held, and the va_block lock held.
283     NV_STATUS uvm_hmm_va_block_service_locked(uvm_processor_id_t processor_id,
284                                               uvm_processor_id_t new_residency,
285                                               uvm_va_block_t *va_block,
286                                               uvm_va_block_retry_t *va_block_retry,
287                                               uvm_service_block_context_t *service_context);
288 
289     // This is called to migrate a region within a HMM va_block.
290     // va_block_context must not be NULL and va_block_context->policy and
291     // va_block_context->hmm.vma must be valid.
292     // Locking: the va_block_context->mm must be retained, mmap_lock must be
293     // locked, and the va_block lock held.
294     NV_STATUS uvm_hmm_va_block_migrate_locked(uvm_va_block_t *va_block,
295                                               uvm_va_block_retry_t *va_block_retry,
296                                               uvm_va_block_context_t *va_block_context,
297                                               uvm_processor_id_t dest_id,
298                                               uvm_va_block_region_t region,
299                                               uvm_make_resident_cause_t cause);
300 
301     // This is called to migrate an address range of HMM allocations via
302     // UvmMigrate().
303     //
304     // va_block_context must not be NULL. The caller is not required to set
305     // va_block_context->policy or va_block_context->hmm.vma.
306     //
307     // Locking: the va_space->va_space_mm.mm mmap_lock must be locked and
308     // the va_space read lock must be held.
309     NV_STATUS uvm_hmm_migrate_ranges(uvm_va_space_t *va_space,
310                                      uvm_va_block_context_t *va_block_context,
311                                      NvU64 base,
312                                      NvU64 length,
313                                      uvm_processor_id_t dest_id,
314                                      uvm_migrate_mode_t mode,
315                                      uvm_tracker_t *out_tracker);
316 
317     // This sets the va_block_context->hmm.src_pfns[] to the ZONE_DEVICE private
318     // PFN for the GPU chunk memory.
319     NV_STATUS uvm_hmm_va_block_evict_chunk_prep(uvm_va_block_t *va_block,
320                                                 uvm_va_block_context_t *va_block_context,
321                                                 uvm_gpu_chunk_t *gpu_chunk,
322                                                 uvm_va_block_region_t chunk_region);
323 
324     // Migrate pages to system memory for the given page mask.
325     // Note that the mmap lock is not held and there is no MM retained.
326     // This must be called after uvm_hmm_va_block_evict_chunk_prep() has
327     // initialized va_block_context->hmm.src_pfns[] for the source GPU physical
328     // PFNs being migrated. Note that the input mask 'pages_to_evict' can be
329     // modified. If any of the evicted pages has the accessed by policy set,
330     // then record that by setting out_accessed_by_set.
331     // Locking: the va_block lock must be locked.
332     NV_STATUS uvm_hmm_va_block_evict_chunks(uvm_va_block_t *va_block,
333                                             uvm_va_block_context_t *va_block_context,
334                                             const uvm_page_mask_t *pages_to_evict,
335                                             uvm_va_block_region_t region,
336                                             bool *out_accessed_by_set);
337 
338     // Migrate pages from the given GPU to system memory for the given page
339     // mask and region. va_block_context must not be NULL.
340     // Note that the mmap lock is not held and there is no MM retained.
341     // Locking: the va_block lock must be locked.
342     NV_STATUS uvm_hmm_va_block_evict_pages_from_gpu(uvm_va_block_t *va_block,
343                                                     uvm_gpu_t *gpu,
344                                                     uvm_va_block_context_t *va_block_context,
345                                                     const uvm_page_mask_t *pages_to_evict,
346                                                     uvm_va_block_region_t region);
347 
348     // Migrate a GPU chunk to system memory. This called to remove CPU page
349     // table references to device private struct pages for the given GPU after
350     // all other references in va_blocks have been released and the GPU is
351     // in the process of being removed/torn down. Note that there is no mm,
352     // VMA, va_block or any user channel activity on this GPU.
353     NV_STATUS uvm_hmm_pmm_gpu_evict_chunk(uvm_gpu_t *gpu,
354                                           uvm_gpu_chunk_t *gpu_chunk);
355 
356     // This returns what would be the intersection of va_block start/end and
357     // VMA start/end-1 for the given 'lookup_address' if
358     // uvm_hmm_va_block_find_create() was called.
359     // Locking: the caller must hold mm->mmap_lock in at least read mode and
360     // the va_space lock must be held in at least read mode.
361     NV_STATUS uvm_hmm_va_block_range_bounds(uvm_va_space_t *va_space,
362                                             struct mm_struct *mm,
363                                             NvU64 lookup_address,
364                                             NvU64 *startp,
365                                             NvU64 *endp,
366                                             UVM_TEST_VA_RESIDENCY_INFO_PARAMS *params);
367 
368     // This updates the HMM va_block CPU residency information for a single
369     // page at 'lookup_address' by calling hmm_range_fault(). If 'populate' is
370     // true, the CPU page will be faulted in read/write or read-only
371     // (depending on the permission of the underlying VMA at lookup_address).
372     // Locking: the caller must hold mm->mmap_lock in at least read mode and
373     // the va_space lock must be held in at least read mode.
374     NV_STATUS uvm_hmm_va_block_update_residency_info(uvm_va_block_t *va_block,
375                                                      struct mm_struct *mm,
376                                                      NvU64 lookup_address,
377                                                      bool populate);
378 
379     NV_STATUS uvm_test_split_invalidate_delay(UVM_TEST_SPLIT_INVALIDATE_DELAY_PARAMS *params,
380                                               struct file *filp);
381 
382     NV_STATUS uvm_hmm_va_range_info(uvm_va_space_t *va_space,
383                                     struct mm_struct *mm,
384                                     UVM_TEST_VA_RANGE_INFO_PARAMS *params);
385 
386     // Return true if GPU fault new residency location should be system memory.
387     // va_block_context must not be NULL and va_block_context->hmm.vma must be
388     // valid (this is usually set by uvm_hmm_va_block_find_create()).
389     // TODO: Bug 3660968: Remove this hack as soon as HMM migration is
390     // implemented for VMAs other than anonymous memory.
391     // Locking: the va_block lock must be held. If the va_block is a HMM
392     // va_block, the va_block_context->mm must be retained and locked for least
393     // read.
394     bool uvm_hmm_must_use_sysmem(uvm_va_block_t *va_block,
395                                  uvm_va_block_context_t *va_block_context);
396 
397 #else // UVM_IS_CONFIG_HMM()
398 
399     static bool uvm_hmm_is_enabled(uvm_va_space_t *va_space)
400     {
401         return false;
402     }
403 
404     static bool uvm_hmm_is_enabled_system_wide(void)
405     {
406         return false;
407     }
408 
409     static NV_STATUS uvm_hmm_va_space_initialize(uvm_va_space_t *va_space)
410     {
411         return NV_OK;
412     }
413 
414     static void uvm_hmm_va_space_destroy(uvm_va_space_t *va_space)
415     {
416     }
417 
418     static void uvm_hmm_unregister_gpu(uvm_va_space_t *va_space, uvm_gpu_t *gpu, struct mm_struct *mm)
419     {
420     }
421 
422     static void uvm_hmm_remove_gpu_va_space(uvm_va_space_t *va_space,
423                                             uvm_gpu_va_space_t *gpu_va_space,
424                                             struct mm_struct *mm)
425     {
426     }
427 
428     static NV_STATUS uvm_hmm_va_block_find(uvm_va_space_t *va_space,
429                                            NvU64 addr,
430                                            uvm_va_block_t **va_block_ptr)
431     {
432         return NV_ERR_INVALID_ADDRESS;
433     }
434 
435     static NV_STATUS uvm_hmm_va_block_cpu_find(uvm_va_space_t *va_space,
436                                                uvm_service_block_context_t *service_context,
437                                                struct vm_fault *vmf,
438                                                uvm_va_block_t **va_block_ptr)
439     {
440         return NV_ERR_INVALID_ADDRESS;
441     }
442 
443     static void uvm_hmm_cpu_fault_finish(uvm_service_block_context_t *service_context)
444     {
445     }
446 
447     static NV_STATUS uvm_hmm_va_block_find_create(uvm_va_space_t *va_space,
448                                                   NvU64 addr,
449                                                   uvm_va_block_context_t *va_block_context,
450                                                   uvm_va_block_t **va_block_ptr)
451     {
452         return NV_ERR_INVALID_ADDRESS;
453     }
454 
455     static NV_STATUS uvm_hmm_find_vma(uvm_va_block_context_t *va_block_context, NvU64 addr)
456     {
457         return NV_OK;
458     }
459 
460     static bool uvm_hmm_check_context_vma_is_valid(uvm_va_block_t *va_block,
461                                                    uvm_va_block_context_t *va_block_context,
462                                                    uvm_va_block_region_t region)
463     {
464         return true;
465     }
466 
467     static NV_STATUS uvm_hmm_test_va_block_inject_split_error(uvm_va_space_t *va_space, NvU64 addr)
468     {
469         return NV_ERR_INVALID_ADDRESS;
470     }
471 
472     static NV_STATUS uvm_hmm_va_block_reclaim(uvm_va_space_t *va_space,
473                                               struct mm_struct *mm,
474                                               NvU64 start,
475                                               NvU64 end)
476     {
477         return NV_OK;
478     }
479 
480     static void uvm_hmm_va_block_split_tree(uvm_va_block_t *existing_va_block, uvm_va_block_t *new_block)
481     {
482     }
483 
484     static NV_STATUS uvm_hmm_split_as_needed(uvm_va_space_t *va_space,
485                                              NvU64 addr,
486                                              uvm_va_policy_is_split_needed_t split_needed_cb,
487                                              void *data)
488     {
489         return NV_OK;
490     }
491 
492     static NV_STATUS uvm_hmm_set_preferred_location(uvm_va_space_t *va_space,
493                                                     uvm_processor_id_t preferred_location,
494                                                     NvU64 base,
495                                                     NvU64 last_address,
496                                                     uvm_tracker_t *out_tracker)
497     {
498         return NV_ERR_INVALID_ADDRESS;
499     }
500 
501     static NV_STATUS uvm_hmm_set_accessed_by(uvm_va_space_t *va_space,
502                                              uvm_processor_id_t processor_id,
503                                              bool set_bit,
504                                              NvU64 base,
505                                              NvU64 last_address,
506                                              uvm_tracker_t *out_tracker)
507     {
508         return NV_ERR_INVALID_ADDRESS;
509     }
510 
511     static void uvm_hmm_block_add_eviction_mappings(uvm_va_space_t *va_space,
512                                                     uvm_va_block_t *va_block,
513                                                     uvm_va_block_context_t *block_context)
514     {
515     }
516 
517     static NV_STATUS uvm_hmm_set_read_duplication(uvm_va_space_t *va_space,
518                                                   uvm_read_duplication_policy_t new_policy,
519                                                   NvU64 base,
520                                                   NvU64 last_address)
521     {
522         return NV_ERR_INVALID_ADDRESS;
523     }
524 
525     static void uvm_hmm_find_policy_end(uvm_va_block_t *va_block,
526                                         uvm_va_block_context_t *va_block_context,
527                                         unsigned long addr,
528                                         NvU64 *endp)
529     {
530     }
531 
532     static NV_STATUS uvm_hmm_find_policy_vma_and_outer(uvm_va_block_t *va_block,
533                                                        uvm_va_block_context_t *va_block_context,
534                                                        uvm_page_index_t page_index,
535                                                        uvm_page_index_t *outerp)
536     {
537         return NV_OK;
538     }
539 
540     static NV_STATUS uvm_hmm_clear_thrashing_policy(uvm_va_space_t *va_space)
541     {
542         return NV_OK;
543     }
544 
545     static uvm_va_block_region_t uvm_hmm_get_prefetch_region(uvm_va_block_t *va_block,
546                                                              uvm_va_block_context_t *va_block_context,
547                                                              NvU64 address)
548     {
549         return (uvm_va_block_region_t){};
550     }
551 
552     static uvm_prot_t uvm_hmm_compute_logical_prot(uvm_va_block_t *va_block,
553                                                    uvm_va_block_context_t *va_block_context,
554                                                    NvU64 addr)
555     {
556         return UVM_PROT_NONE;
557     }
558 
559     static NV_STATUS uvm_hmm_va_block_service_locked(uvm_processor_id_t processor_id,
560                                                      uvm_processor_id_t new_residency,
561                                                      uvm_va_block_t *va_block,
562                                                      uvm_va_block_retry_t *va_block_retry,
563                                                      uvm_service_block_context_t *service_context)
564     {
565         return NV_ERR_INVALID_ADDRESS;
566     }
567 
568     static NV_STATUS uvm_hmm_va_block_migrate_locked(uvm_va_block_t *va_block,
569                                                      uvm_va_block_retry_t *va_block_retry,
570                                                      uvm_va_block_context_t *va_block_context,
571                                                      uvm_processor_id_t dest_id,
572                                                      uvm_va_block_region_t region,
573                                                      uvm_make_resident_cause_t cause)
574     {
575         return NV_ERR_INVALID_ADDRESS;
576     }
577 
578     static NV_STATUS uvm_hmm_migrate_ranges(uvm_va_space_t *va_space,
579                                             uvm_va_block_context_t *va_block_context,
580                                             NvU64 base,
581                                             NvU64 length,
582                                             uvm_processor_id_t dest_id,
583                                             uvm_migrate_mode_t mode,
584                                             uvm_tracker_t *out_tracker)
585     {
586         return NV_ERR_INVALID_ADDRESS;
587     }
588 
589     static NV_STATUS uvm_hmm_va_block_evict_chunk_prep(uvm_va_block_t *va_block,
590                                                        uvm_va_block_context_t *va_block_context,
591                                                        uvm_gpu_chunk_t *gpu_chunk,
592                                                        uvm_va_block_region_t chunk_region)
593     {
594         return NV_OK;
595     }
596 
597     static NV_STATUS uvm_hmm_va_block_evict_chunks(uvm_va_block_t *va_block,
598                                                    uvm_va_block_context_t *va_block_context,
599                                                    const uvm_page_mask_t *pages_to_evict,
600                                                    uvm_va_block_region_t region,
601                                                    bool *out_accessed_by_set)
602     {
603         return NV_OK;
604     }
605 
606     static NV_STATUS uvm_hmm_va_block_evict_pages_from_gpu(uvm_va_block_t *va_block,
607                                                            uvm_gpu_t *gpu,
608                                                            uvm_va_block_context_t *va_block_context,
609                                                            const uvm_page_mask_t *pages_to_evict,
610                                                            uvm_va_block_region_t region)
611     {
612         return NV_OK;
613     }
614 
615     static NV_STATUS uvm_hmm_pmm_gpu_evict_chunk(uvm_gpu_t *gpu,
616                                                  uvm_gpu_chunk_t *gpu_chunk)
617     {
618         return NV_OK;
619     }
620 
621     static NV_STATUS uvm_hmm_va_block_range_bounds(uvm_va_space_t *va_space,
622                                                    struct mm_struct *mm,
623                                                    NvU64 lookup_address,
624                                                    NvU64 *startp,
625                                                    NvU64 *endp,
626                                                    UVM_TEST_VA_RESIDENCY_INFO_PARAMS *params)
627     {
628         return NV_ERR_INVALID_ADDRESS;
629     }
630 
631     static NV_STATUS uvm_hmm_va_block_update_residency_info(uvm_va_block_t *va_block,
632                                                             struct mm_struct *mm,
633                                                             NvU64 lookup_address,
634                                                             bool populate)
635     {
636         return NV_ERR_INVALID_ADDRESS;
637     }
638 
639     static NV_STATUS uvm_test_split_invalidate_delay(UVM_TEST_SPLIT_INVALIDATE_DELAY_PARAMS *params,
640                                               struct file *filp)
641     {
642         return NV_ERR_INVALID_STATE;
643     }
644 
645     static NV_STATUS uvm_hmm_va_range_info(uvm_va_space_t *va_space,
646                                            struct mm_struct *mm,
647                                            UVM_TEST_VA_RANGE_INFO_PARAMS *params)
648     {
649         return NV_ERR_INVALID_ADDRESS;
650     }
651 
652     static bool uvm_hmm_must_use_sysmem(uvm_va_block_t *va_block,
653                                         uvm_va_block_context_t *va_block_context)
654     {
655         return false;
656     }
657 
658 #endif // UVM_IS_CONFIG_HMM()
659 
660 #endif  // _UVM_HMM_H_
661