1 /*******************************************************************************
2     Copyright (c) 2016-2023 NVIDIA Corporation
3 
4     Permission is hereby granted, free of charge, to any person obtaining a copy
5     of this software and associated documentation files (the "Software"), to
6     deal in the Software without restriction, including without limitation the
7     rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
8     sell copies of the Software, and to permit persons to whom the Software is
9     furnished to do so, subject to the following conditions:
10 
11         The above copyright notice and this permission notice shall be
12         included in all copies or substantial portions of the Software.
13 
14     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15     IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16     FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17     THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18     LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20     DEALINGS IN THE SOFTWARE.
21 
22 *******************************************************************************/
23 
24 #ifndef _UVM_HMM_H_
25 #define _UVM_HMM_H_
26 
27 #include "nvtypes.h"
28 #include "uvm_forward_decl.h"
29 #include "uvm_va_block_types.h"
30 #include "uvm_va_policy.h"
31 #include "uvm_linux.h"
32 #include "uvm_range_tree.h"
33 #include "uvm_lock.h"
34 
35 typedef struct
36 {
37     // This stores pointers to uvm_va_block_t for HMM blocks.
38     uvm_range_tree_t blocks;
39     uvm_mutex_t blocks_lock;
40 } uvm_hmm_va_space_t;
41 
42 #if UVM_IS_CONFIG_HMM()
43 
44     // Tells whether HMM is enabled for the given va_space.
45     // If it is not enabled, all of the functions below are no-ops.
46     bool uvm_hmm_is_enabled(uvm_va_space_t *va_space);
47 
48     // Self-explanatory name: reports if HMM is enabled system-wide.
49     bool uvm_hmm_is_enabled_system_wide(void);
50 
51     // Initialize HMM for the given the va_space.
52     void uvm_hmm_va_space_initialize(uvm_va_space_t *va_space);
53 
54     // Destroy any HMM state for the given the va_space.
55     // Locking: va_space lock must be held in write mode.
56     void uvm_hmm_va_space_destroy(uvm_va_space_t *va_space);
57 
58     // Unmap all page tables in this VA space which map memory owned by this
59     // GPU. Any memory still resident on this GPU will be evicted to system
60     // memory. Note that 'mm' can be NULL (e.g., when closing the UVM file)
61     // in which case any GPU memory is simply freed.
62     // Locking: if mm is not NULL, the caller must hold mm->mmap_lock in at
63     // least read mode and the va_space lock must be held in write mode.
64     void uvm_hmm_unregister_gpu(uvm_va_space_t *va_space, uvm_gpu_t *gpu, struct mm_struct *mm);
65 
66     // Destroy the VA space's mappings on the GPU, if it has any.
67     // Locking: if mm is not NULL, the caller must hold mm->mmap_lock in at
68     // least read mode and the va_space lock must be held in write mode.
69     void uvm_hmm_remove_gpu_va_space(uvm_va_space_t *va_space,
70                                      uvm_gpu_va_space_t *gpu_va_space,
71                                      struct mm_struct *mm);
72 
73     // Find an existing HMM va_block.
74     // This function can be called without having retained and locked the mm,
75     // but in that case, the only allowed operations on the returned block are
76     // locking the block, reading its state, and performing eviction. GPU fault
77     // handling and user-initiated migrations are not allowed.
78     // Return values are the same as uvm_va_block_find().
79     // Locking: This must be called with va_space lock held in at least read
80     // mode.
81     NV_STATUS uvm_hmm_va_block_find(uvm_va_space_t *va_space,
82                                     NvU64 addr,
83                                     uvm_va_block_t **va_block_ptr);
84 
85     // Find or create a new HMM va_block.
86     //
87     // Return NV_ERR_INVALID_ADDRESS if there is no VMA associated with the
88     // address 'addr' or the VMA does not have at least PROT_READ permission.
89     // The caller is also responsible for checking that there is no UVM
90     // va_range covering the given address before calling this function.
91     // The VMA is returned in vma_out if it's not NULL.
92     // Locking: This function must be called with mm retained and locked for
93     // at least read and the va_space lock at least for read.
94     NV_STATUS uvm_hmm_va_block_find_create(uvm_va_space_t *va_space,
95                                            NvU64 addr,
96                                            struct vm_area_struct **vma_out,
97                                            uvm_va_block_t **va_block_ptr);
98 
99     // Find the VMA for the given address and return it in vma_out. Return
100     // NV_ERR_INVALID_ADDRESS if mm is NULL or there is no VMA associated with
101     // the address 'addr' or the VMA does not have at least PROT_READ
102     // permission.
103     // Locking: This function must be called with mm retained and locked for
104     // at least read or mm equal to NULL.
105     NV_STATUS uvm_hmm_find_vma(struct mm_struct *mm, struct vm_area_struct **vma_out, NvU64 addr);
106 
107     // If va_block is a HMM va_block, check that vma is not NULL and covers the
108     // given region. This always returns true and is intended to only be used
109     // with UVM_ASSERT().
110     // Locking: This function must be called with the va_block lock held and if
111     // va_block is a HMM block, va_space->va_space_mm.mm->mmap_lock must be
112     // retained and locked for at least read.
113     bool uvm_hmm_check_context_vma_is_valid(uvm_va_block_t *va_block,
114                                             struct vm_area_struct *vma,
115                                             uvm_va_block_region_t region);
116 
117     // Initialize the HMM portion of the service_context.
118     // This should be called one time before any retry loops calling
119     // uvm_va_block_service_locked().
120     void uvm_hmm_service_context_init(uvm_service_block_context_t *service_context);
121 
122     // Begin a migration critical section. When calling into the kernel it is
123     // sometimes necessary to drop the va_block lock. This function returns
124     // NV_OK when no other thread has started a migration critical section.
125     // Otherwise, it returns NV_ERR_BUSY_RETRY and threads should then retry
126     // this function to begin a critical section.
127     // Locking: va_block lock must not be held.
128     NV_STATUS uvm_hmm_migrate_begin(uvm_va_block_t *va_block);
129 
130     // Same as uvm_hmm_migrate_begin() but waits if required before beginning a
131     // critical section.
132     void uvm_hmm_migrate_begin_wait(uvm_va_block_t *va_block);
133 
134     // Finish a migration critical section.
135     void uvm_hmm_migrate_finish(uvm_va_block_t *va_block);
136 
137     // Find or create a HMM va_block and mark it so the next va_block split
138     // will fail for testing purposes.
139     // Locking: This function must be called with mm retained and locked for
140     // at least read and the va_space lock at least for read.
141     NV_STATUS uvm_hmm_test_va_block_inject_split_error(uvm_va_space_t *va_space, NvU64 addr);
142 
143     // Reclaim any HMM va_blocks that overlap the given range.
144     // Note that 'end' is inclusive. If mm is NULL, any HMM va_block in the
145     // range will be reclaimed which assumes that the mm is being torn down
146     // and was not retained.
147     // Return values:
148     // NV_ERR_NO_MEMORY: Reclaim required a block split, which failed.
149     // NV_OK:            There were no HMM blocks in the range, or all HMM
150     //                   blocks in the range were successfully reclaimed.
151     // Locking: If mm is not NULL, it must equal va_space_mm.mm, the caller
152     // must retain it with uvm_va_space_mm_or_current_retain() or be sure that
153     // mm->mm_users is not zero, and it must be locked for at least read mode.
154     // Also, the va_space lock must be held in write mode.
155     // TODO: Bug 3372166: add asynchronous va_block reclaim.
156     NV_STATUS uvm_hmm_va_block_reclaim(uvm_va_space_t *va_space,
157                                        struct mm_struct *mm,
158                                        NvU64 start,
159                                        NvU64 end);
160 
161     // This is called to update the va_space tree of HMM va_blocks after an
162     // existing va_block is split.
163     // Locking: the va_space lock must be held in write mode.
164     void uvm_hmm_va_block_split_tree(uvm_va_block_t *existing_va_block, uvm_va_block_t *new_block);
165 
166     // Find a HMM policy range that needs to be split. The callback function
167     // 'split_needed_cb' returns true if the policy range needs to be split.
168     // If a policy range is split, the existing range is truncated to
169     // [existing_start, addr - 1] and a new policy node with the same policy
170     // values as the existing node is created covering [addr, existing_end].
171     // Before: [----------- existing ------------]
172     // After:  [---- existing ----][---- new ----]
173     //                             ^addr
174     // Locking: the va_space must be write locked.
175     NV_STATUS uvm_hmm_split_as_needed(uvm_va_space_t *va_space,
176                                       NvU64 addr,
177                                       uvm_va_policy_is_split_needed_t split_needed_cb,
178                                       void *data);
179 
180     // Set the preferred location policy for the given range.
181     // Note that 'last_address' is inclusive.
182     // Locking: the va_space->va_space_mm.mm mmap_lock must be locked
183     // and the va_space lock must be held in write mode.
184     NV_STATUS uvm_hmm_set_preferred_location(uvm_va_space_t *va_space,
185                                              uvm_processor_id_t preferred_location,
186                                              NvU64 base,
187                                              NvU64 last_address,
188                                              uvm_tracker_t *out_tracker);
189 
190     // Set the accessed by policy for the given range. This also tries to
191     // map the range. Note that 'last_address' is inclusive.
192     // Locking: the va_space->va_space_mm.mm mmap_lock must be locked
193     // and the va_space lock must be held in write mode.
194     NV_STATUS uvm_hmm_set_accessed_by(uvm_va_space_t *va_space,
195                                       uvm_processor_id_t processor_id,
196                                       bool set_bit,
197                                       NvU64 base,
198                                       NvU64 last_address,
199                                       uvm_tracker_t *out_tracker);
200 
201     // Deferred work item to reestablish accessed by mappings after eviction. On
202     // GPUs with access counters enabled, the evicted GPU will also get remote
203     // mappings.
204     // Locking: the va_space->va_space_mm.mm mmap_lock must be locked
205     // and the va_space lock must be held in at least read mode.
206     void uvm_hmm_block_add_eviction_mappings(uvm_va_space_t *va_space,
207                                              uvm_va_block_t *va_block,
208                                              uvm_va_block_context_t *block_context);
209 
210     // Set the read duplication policy for the given range.
211     // Note that 'last_address' is inclusive.
212     // Locking: the va_space->va_space_mm.mm mmap_lock must be write locked
213     // and the va_space lock must be held in write mode.
214     // TODO: Bug 3660922: need to implement HMM read duplication support.
215     static NV_STATUS uvm_hmm_set_read_duplication(uvm_va_space_t *va_space,
216                                                   uvm_read_duplication_policy_t new_policy,
217                                                   NvU64 base,
218                                                   NvU64 last_address)
219     {
220         if (!uvm_hmm_is_enabled(va_space))
221             return NV_ERR_INVALID_ADDRESS;
222         return NV_OK;
223     }
224 
225     // This function returns the policy covering the given address 'addr' and
226     // assigns the ending address '*endp' to the minimum of va_block->end,
227     // vma->vm_end - 1, and the ending address of the policy range. Locking:
228     // This function must be called with vma->vm_mm retained and locked for at
229     // least read and the va_block and va_space lock held.
230     const uvm_va_policy_t *uvm_hmm_find_policy_end(uvm_va_block_t *va_block,
231                                                    struct vm_area_struct *vma,
232                                                    unsigned long addr,
233                                                    NvU64 *endp);
234 
235     // This function finds the VMA for the page index 'page_index' and returns
236     // it in vma_out which must not be NULL. Returns the policy covering the
237     // given address, and sets the ending page range '*outerp' to the minimum of
238     // *outerp, vma->vm_end - 1, the ending address of the policy range, and
239     // va_block->end.
240     // Return NV_ERR_INVALID_ADDRESS if no VMA is found; otherwise sets *vma
241     // and returns NV_OK.
242     // Locking: This function must be called with mm retained and locked for at
243     // least read and the va_block and va_space lock held.
244     NV_STATUS uvm_hmm_find_policy_vma_and_outer(uvm_va_block_t *va_block,
245                                                 struct vm_area_struct **vma,
246                                                 uvm_page_index_t page_index,
247                                                 const uvm_va_policy_t **policy,
248                                                 uvm_page_index_t *outerp);
249 
250     // Clear thrashing policy information from all HMM va_blocks.
251     // Locking: va_space lock must be held in write mode.
252     NV_STATUS uvm_hmm_clear_thrashing_policy(uvm_va_space_t *va_space);
253 
254     // Return the expanded region around 'address' limited to the intersection
255     // of va_block start/end, vma start/end, and policy start/end.
256     // Locking: the caller must hold va_space->va_space_mm.mm->mmap_lock in at
257     // least read mode, the va_space lock must be held in at least read mode,
258     // and the va_block lock held.
259     uvm_va_block_region_t uvm_hmm_get_prefetch_region(uvm_va_block_t *va_block,
260                                                       struct vm_area_struct *vma,
261                                                       const uvm_va_policy_t *policy,
262                                                       NvU64 address);
263 
264     // Return the logical protection allowed of a HMM va_block for the page at
265     // the given address within the vma which must be valid. This is usually
266     // obtained from uvm_hmm_va_block_find_create()).
267     // Locking: the caller must hold va_space->va_space_mm.mm mmap_lock in at
268     // least read mode.
269     uvm_prot_t uvm_hmm_compute_logical_prot(uvm_va_block_t *va_block,
270                                             struct vm_area_struct *vma,
271                                             NvU64 addr);
272 
273     // This is called to service a GPU fault.
274     // Locking: the va_space->va_space_mm.mm mmap_lock must be locked,
275     // the va_space read lock must be held, and the va_block lock held.
276     NV_STATUS uvm_hmm_va_block_service_locked(uvm_processor_id_t processor_id,
277                                               uvm_processor_id_t new_residency,
278                                               uvm_va_block_t *va_block,
279                                               uvm_va_block_retry_t *va_block_retry,
280                                               uvm_service_block_context_t *service_context);
281 
282     // This is called to migrate a region within a HMM va_block.
283     // va_block_context must not be NULL and va_block_context->hmm.vma
284     // must be valid.
285     // Locking: the va_space->va_space_mm.mm must be retained, mmap_lock must be
286     // locked, and the va_block lock held.
287     NV_STATUS uvm_hmm_va_block_migrate_locked(uvm_va_block_t *va_block,
288                                               uvm_va_block_retry_t *va_block_retry,
289                                               uvm_va_block_context_t *va_block_context,
290                                               uvm_processor_id_t dest_id,
291                                               uvm_va_block_region_t region,
292                                               uvm_make_resident_cause_t cause);
293 
294     // This is called to migrate an address range of HMM allocations via
295     // UvmMigrate().
296     //
297     // va_block_context must not be NULL. The caller is not required to set
298     // va_block_context->hmm.vma.
299     //
300     // Locking: the va_space->va_space_mm.mm mmap_lock must be locked and
301     // the va_space read lock must be held.
302     NV_STATUS uvm_hmm_migrate_ranges(uvm_va_space_t *va_space,
303                                      uvm_va_block_context_t *va_block_context,
304                                      NvU64 base,
305                                      NvU64 length,
306                                      uvm_processor_id_t dest_id,
307                                      uvm_migrate_mode_t mode,
308                                      uvm_tracker_t *out_tracker);
309 
310     // Evicts all va_blocks in the va_space to the CPU. Unlike the
311     // other va_block eviction functions this is based on virtual
312     // address and therefore takes mmap_lock for read.
313     void uvm_hmm_evict_va_blocks(uvm_va_space_t *va_space);
314 
315     // This sets the va_block_context->hmm.src_pfns[] to the ZONE_DEVICE private
316     // PFN for the GPU chunk memory.
317     NV_STATUS uvm_hmm_va_block_evict_chunk_prep(uvm_va_block_t *va_block,
318                                                 uvm_va_block_context_t *va_block_context,
319                                                 uvm_gpu_chunk_t *gpu_chunk,
320                                                 uvm_va_block_region_t chunk_region);
321 
322     // Migrate pages to system memory for the given page mask.
323     // Note that the mmap lock is not held and there is no MM retained.
324     // This must be called after uvm_hmm_va_block_evict_chunk_prep() has
325     // initialized va_block_context->hmm.src_pfns[] for the source GPU physical
326     // PFNs being migrated. Note that the input mask 'pages_to_evict' can be
327     // modified. If any of the evicted pages has the accessed by policy set,
328     // then record that by setting out_accessed_by_set.
329     // Locking: the va_block lock must be locked.
330     NV_STATUS uvm_hmm_va_block_evict_chunks(uvm_va_block_t *va_block,
331                                             uvm_va_block_context_t *va_block_context,
332                                             const uvm_page_mask_t *pages_to_evict,
333                                             uvm_va_block_region_t region,
334                                             bool *out_accessed_by_set);
335 
336     // Migrate pages from the given GPU to system memory for the given page
337     // mask and region. va_block_context must not be NULL.
338     // Note that the mmap lock is not held and there is no MM retained.
339     // Locking: the va_block lock must be locked.
340     NV_STATUS uvm_hmm_va_block_evict_pages_from_gpu(uvm_va_block_t *va_block,
341                                                     uvm_gpu_t *gpu,
342                                                     uvm_va_block_context_t *va_block_context,
343                                                     const uvm_page_mask_t *pages_to_evict,
344                                                     uvm_va_block_region_t region);
345 
346     // Migrate a GPU device-private page to system memory. This is
347     // called to remove CPU page table references to device private
348     // struct pages for the given GPU after all other references in
349     // va_blocks have been released and the GPU is in the process of
350     // being removed/torn down. Note that there is no mm, VMA,
351     // va_block or any user channel activity on this GPU.
352     NV_STATUS uvm_hmm_pmm_gpu_evict_pfn(unsigned long pfn);
353 
354     // This returns what would be the intersection of va_block start/end and
355     // VMA start/end-1 for the given 'lookup_address' if
356     // uvm_hmm_va_block_find_create() was called.
357     // Locking: the caller must hold mm->mmap_lock in at least read mode and
358     // the va_space lock must be held in at least read mode.
359     NV_STATUS uvm_hmm_va_block_range_bounds(uvm_va_space_t *va_space,
360                                             struct mm_struct *mm,
361                                             NvU64 lookup_address,
362                                             NvU64 *startp,
363                                             NvU64 *endp,
364                                             UVM_TEST_VA_RESIDENCY_INFO_PARAMS *params);
365 
366     // This updates the HMM va_block CPU residency information for a single
367     // page at 'lookup_address' by calling hmm_range_fault(). If 'populate' is
368     // true, the CPU page will be faulted in read/write or read-only
369     // (depending on the permission of the underlying VMA at lookup_address).
370     // Locking: the caller must hold mm->mmap_lock in at least read mode and
371     // the va_space lock must be held in at least read mode.
372     NV_STATUS uvm_hmm_va_block_update_residency_info(uvm_va_block_t *va_block,
373                                                      struct mm_struct *mm,
374                                                      NvU64 lookup_address,
375                                                      bool populate);
376 
377     NV_STATUS uvm_test_split_invalidate_delay(UVM_TEST_SPLIT_INVALIDATE_DELAY_PARAMS *params,
378                                               struct file *filp);
379 
380     NV_STATUS uvm_hmm_va_range_info(uvm_va_space_t *va_space,
381                                     struct mm_struct *mm,
382                                     UVM_TEST_VA_RANGE_INFO_PARAMS *params);
383 
384     // Return true if GPU fault new residency location should be system memory.
385     // va_block_context must not be NULL and va_block_context->hmm.vma must be
386     // valid (this is usually set by uvm_hmm_va_block_find_create()).
387     // TODO: Bug 3660968: Remove this hack as soon as HMM migration is
388     // implemented for VMAs other than anonymous memory.
389     // Locking: the va_block lock must be held. If the va_block is a HMM
390     // va_block, the va_block_context->mm must be retained and locked for least
391     // read.
392     bool uvm_hmm_must_use_sysmem(uvm_va_block_t *va_block,
393                                  uvm_va_block_context_t *va_block_context);
394 
395 #else // UVM_IS_CONFIG_HMM()
396 
397     static bool uvm_hmm_is_enabled(uvm_va_space_t *va_space)
398     {
399         return false;
400     }
401 
402     static bool uvm_hmm_is_enabled_system_wide(void)
403     {
404         return false;
405     }
406 
407     static void uvm_hmm_va_space_initialize(uvm_va_space_t *va_space)
408     {
409     }
410 
411     static void uvm_hmm_va_space_destroy(uvm_va_space_t *va_space)
412     {
413     }
414 
415     static void uvm_hmm_unregister_gpu(uvm_va_space_t *va_space, uvm_gpu_t *gpu, struct mm_struct *mm)
416     {
417     }
418 
419     static void uvm_hmm_remove_gpu_va_space(uvm_va_space_t *va_space,
420                                             uvm_gpu_va_space_t *gpu_va_space,
421                                             struct mm_struct *mm)
422     {
423     }
424 
425     static NV_STATUS uvm_hmm_va_block_find(uvm_va_space_t *va_space,
426                                            NvU64 addr,
427                                            uvm_va_block_t **va_block_ptr)
428     {
429         return NV_ERR_INVALID_ADDRESS;
430     }
431 
432     static NV_STATUS uvm_hmm_va_block_find_create(uvm_va_space_t *va_space,
433                                                   NvU64 addr,
434                                                   struct vm_area_struct **vma,
435                                                   uvm_va_block_t **va_block_ptr)
436     {
437         return NV_ERR_INVALID_ADDRESS;
438     }
439 
440     static NV_STATUS uvm_hmm_find_vma(struct mm_struct *mm, struct vm_area_struct **vma, NvU64 addr)
441     {
442         return NV_OK;
443     }
444 
445     static bool uvm_hmm_check_context_vma_is_valid(uvm_va_block_t *va_block,
446                                                    struct vm_area_struct *vma,
447                                                    uvm_va_block_region_t region)
448     {
449         return true;
450     }
451 
452     static void uvm_hmm_service_context_init(uvm_service_block_context_t *service_context)
453     {
454     }
455 
456     static NV_STATUS uvm_hmm_migrate_begin(uvm_va_block_t *va_block)
457     {
458         return NV_OK;
459     }
460 
461     static void uvm_hmm_migrate_begin_wait(uvm_va_block_t *va_block)
462     {
463     }
464 
465     static void uvm_hmm_migrate_finish(uvm_va_block_t *va_block)
466     {
467     }
468 
469     static NV_STATUS uvm_hmm_test_va_block_inject_split_error(uvm_va_space_t *va_space, NvU64 addr)
470     {
471         return NV_ERR_INVALID_ADDRESS;
472     }
473 
474     static NV_STATUS uvm_hmm_va_block_reclaim(uvm_va_space_t *va_space,
475                                               struct mm_struct *mm,
476                                               NvU64 start,
477                                               NvU64 end)
478     {
479         return NV_OK;
480     }
481 
482     static void uvm_hmm_va_block_split_tree(uvm_va_block_t *existing_va_block, uvm_va_block_t *new_block)
483     {
484     }
485 
486     static NV_STATUS uvm_hmm_split_as_needed(uvm_va_space_t *va_space,
487                                              NvU64 addr,
488                                              uvm_va_policy_is_split_needed_t split_needed_cb,
489                                              void *data)
490     {
491         return NV_OK;
492     }
493 
494     static NV_STATUS uvm_hmm_set_preferred_location(uvm_va_space_t *va_space,
495                                                     uvm_processor_id_t preferred_location,
496                                                     NvU64 base,
497                                                     NvU64 last_address,
498                                                     uvm_tracker_t *out_tracker)
499     {
500         return NV_ERR_INVALID_ADDRESS;
501     }
502 
503     static NV_STATUS uvm_hmm_set_accessed_by(uvm_va_space_t *va_space,
504                                              uvm_processor_id_t processor_id,
505                                              bool set_bit,
506                                              NvU64 base,
507                                              NvU64 last_address,
508                                              uvm_tracker_t *out_tracker)
509     {
510         return NV_ERR_INVALID_ADDRESS;
511     }
512 
513     static void uvm_hmm_block_add_eviction_mappings(uvm_va_space_t *va_space,
514                                                     uvm_va_block_t *va_block,
515                                                     uvm_va_block_context_t *block_context)
516     {
517     }
518 
519     static NV_STATUS uvm_hmm_set_read_duplication(uvm_va_space_t *va_space,
520                                                   uvm_read_duplication_policy_t new_policy,
521                                                   NvU64 base,
522                                                   NvU64 last_address)
523     {
524         return NV_ERR_INVALID_ADDRESS;
525     }
526 
527     static const uvm_va_policy_t *uvm_hmm_find_policy_end(uvm_va_block_t *va_block,
528                                                           struct vm_area_struct *vma,
529                                                           unsigned long addr,
530                                                           NvU64 *endp)
531     {
532         UVM_ASSERT(0);
533         return NULL;
534     }
535 
536     static NV_STATUS uvm_hmm_find_policy_vma_and_outer(uvm_va_block_t *va_block,
537                                                        struct vm_area_struct **vma,
538                                                        uvm_page_index_t page_index,
539                                                        const uvm_va_policy_t **policy,
540                                                        uvm_page_index_t *outerp)
541     {
542         return NV_OK;
543     }
544 
545     static NV_STATUS uvm_hmm_clear_thrashing_policy(uvm_va_space_t *va_space)
546     {
547         return NV_OK;
548     }
549 
550     static uvm_va_block_region_t uvm_hmm_get_prefetch_region(uvm_va_block_t *va_block,
551                                                              struct vm_area_struct *vma,
552                                                              const uvm_va_policy_t *policy,
553                                                              NvU64 address)
554     {
555         return (uvm_va_block_region_t){};
556     }
557 
558     static uvm_prot_t uvm_hmm_compute_logical_prot(uvm_va_block_t *va_block,
559                                                    struct vm_area_struct *vma,
560                                                    NvU64 addr)
561     {
562         return UVM_PROT_NONE;
563     }
564 
565     static NV_STATUS uvm_hmm_va_block_service_locked(uvm_processor_id_t processor_id,
566                                                      uvm_processor_id_t new_residency,
567                                                      uvm_va_block_t *va_block,
568                                                      uvm_va_block_retry_t *va_block_retry,
569                                                      uvm_service_block_context_t *service_context)
570     {
571         return NV_ERR_INVALID_ADDRESS;
572     }
573 
574     static NV_STATUS uvm_hmm_va_block_migrate_locked(uvm_va_block_t *va_block,
575                                                      uvm_va_block_retry_t *va_block_retry,
576                                                      uvm_va_block_context_t *va_block_context,
577                                                      uvm_processor_id_t dest_id,
578                                                      uvm_va_block_region_t region,
579                                                      uvm_make_resident_cause_t cause)
580     {
581         return NV_ERR_INVALID_ADDRESS;
582     }
583 
584     static NV_STATUS uvm_hmm_migrate_ranges(uvm_va_space_t *va_space,
585                                             uvm_va_block_context_t *va_block_context,
586                                             NvU64 base,
587                                             NvU64 length,
588                                             uvm_processor_id_t dest_id,
589                                             uvm_migrate_mode_t mode,
590                                             uvm_tracker_t *out_tracker)
591     {
592         return NV_ERR_INVALID_ADDRESS;
593     }
594 
595     static void uvm_hmm_evict_va_blocks(uvm_va_space_t *va_space)
596     {
597     }
598 
599     static NV_STATUS uvm_hmm_va_block_evict_chunk_prep(uvm_va_block_t *va_block,
600                                                        uvm_va_block_context_t *va_block_context,
601                                                        uvm_gpu_chunk_t *gpu_chunk,
602                                                        uvm_va_block_region_t chunk_region)
603     {
604         return NV_OK;
605     }
606 
607     static NV_STATUS uvm_hmm_va_block_evict_chunks(uvm_va_block_t *va_block,
608                                                    uvm_va_block_context_t *va_block_context,
609                                                    const uvm_page_mask_t *pages_to_evict,
610                                                    uvm_va_block_region_t region,
611                                                    bool *out_accessed_by_set)
612     {
613         return NV_OK;
614     }
615 
616     static NV_STATUS uvm_hmm_va_block_evict_pages_from_gpu(uvm_va_block_t *va_block,
617                                                            uvm_gpu_t *gpu,
618                                                            uvm_va_block_context_t *va_block_context,
619                                                            const uvm_page_mask_t *pages_to_evict,
620                                                            uvm_va_block_region_t region)
621     {
622         return NV_OK;
623     }
624 
625     static NV_STATUS uvm_hmm_pmm_gpu_evict_pfn(unsigned long pfn)
626     {
627         return NV_OK;
628     }
629 
630     static NV_STATUS uvm_hmm_va_block_range_bounds(uvm_va_space_t *va_space,
631                                                    struct mm_struct *mm,
632                                                    NvU64 lookup_address,
633                                                    NvU64 *startp,
634                                                    NvU64 *endp,
635                                                    UVM_TEST_VA_RESIDENCY_INFO_PARAMS *params)
636     {
637         return NV_ERR_INVALID_ADDRESS;
638     }
639 
640     static NV_STATUS uvm_hmm_va_block_update_residency_info(uvm_va_block_t *va_block,
641                                                             struct mm_struct *mm,
642                                                             NvU64 lookup_address,
643                                                             bool populate)
644     {
645         return NV_ERR_INVALID_ADDRESS;
646     }
647 
648     static NV_STATUS uvm_test_split_invalidate_delay(UVM_TEST_SPLIT_INVALIDATE_DELAY_PARAMS *params,
649                                               struct file *filp)
650     {
651         return NV_ERR_INVALID_STATE;
652     }
653 
654     static NV_STATUS uvm_hmm_va_range_info(uvm_va_space_t *va_space,
655                                            struct mm_struct *mm,
656                                            UVM_TEST_VA_RANGE_INFO_PARAMS *params)
657     {
658         return NV_ERR_INVALID_ADDRESS;
659     }
660 
661     static bool uvm_hmm_must_use_sysmem(uvm_va_block_t *va_block,
662                                         uvm_va_block_context_t *va_block_context)
663     {
664         return false;
665     }
666 
667 #endif // UVM_IS_CONFIG_HMM()
668 
669 #endif  // _UVM_HMM_H_
670