1 /*******************************************************************************
2     Copyright (c) 2015-2022 NVidia Corporation
3 
4     Permission is hereby granted, free of charge, to any person obtaining a copy
5     of this software and associated documentation files (the "Software"), to
6     deal in the Software without restriction, including without limitation the
7     rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
8     sell copies of the Software, and to permit persons to whom the Software is
9     furnished to do so, subject to the following conditions:
10 
11         The above copyright notice and this permission notice shall be
12         included in all copies or substantial portions of the Software.
13 
14     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15     IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16     FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17     THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18     LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20     DEALINGS IN THE SOFTWARE.
21 *******************************************************************************/
22 
23 #ifndef __UVM_TEST_IOCTL_H__
24 #define __UVM_TEST_IOCTL_H__
25 
26 
27 #include "uvm_types.h"
28 #include "uvm_ioctl.h"
29 #include "nv_uvm_types.h"
30 
31 #ifdef __cplusplus
32 extern "C" {
33 #endif
34 
35 // Offset the test ioctl to leave space for the api ones
36 #define UVM_TEST_IOCTL_BASE(i)                          UVM_IOCTL_BASE(200 + i)
37 
38 #define UVM_TEST_GET_GPU_REF_COUNT                       UVM_TEST_IOCTL_BASE(0)
39 typedef struct
40 {
41     // In params
42     NvProcessorUuid gpu_uuid;
43     // Out params
44     NvU64           ref_count NV_ALIGN_BYTES(8);
45     NV_STATUS       rmStatus;
46 } UVM_TEST_GET_GPU_REF_COUNT_PARAMS;
47 
48 #define UVM_TEST_RNG_SANITY                              UVM_TEST_IOCTL_BASE(1)
49 typedef struct
50 {
51     NV_STATUS rmStatus;
52 } UVM_TEST_RNG_SANITY_PARAMS;
53 
54 #define UVM_TEST_RANGE_TREE_DIRECTED                     UVM_TEST_IOCTL_BASE(2)
55 typedef struct
56 {
57     NV_STATUS rmStatus;
58 } UVM_TEST_RANGE_TREE_DIRECTED_PARAMS;
59 
60 #define UVM_TEST_RANGE_TREE_RANDOM                       UVM_TEST_IOCTL_BASE(3)
61 typedef struct
62 {
63     NvU32     seed;                                 // In
64     NvU64     main_iterations    NV_ALIGN_BYTES(8); // In
65     NvU32     verbose;                              // In
66 
67     // Probability (0-100)
68     //
69     // When the test starts up, it adds and splits ranges with high_probability.
70     // Eventually when adds and splits fail too often, they'll invert their
71     // probability to 100 - high_probability. They'll switch back when the tree
72     // becomes too empty.
73     //
74     // This can be < 50, but the test will not be very interesting.
75     NvU32     high_probability;                     // In
76 
77     // Probability (0-100)
78     //
79     // Every main iteration a group of operations is selected with this
80     // probability. The group consists of either "add/remove" or "split/merge."
81     // This is the chance that the "add/remove" group is selected each
82     // iteration.
83     NvU32     add_remove_shrink_group_probability;
84 
85     // Probability (0-100)
86     //
87     // Probability of picking the shrink operation instead of add/remove if the
88     // add/remove/shrink group of operations is selected.
89     NvU32     shrink_probability;
90 
91     // The number of collision verification checks to make each main iteration
92     NvU32     collision_checks;                     // In
93 
94     // The number of tree iterator verification checks to make each main
95     // iteration.
96     NvU32     iterator_checks;                      // In
97 
98     // Highest range value to use
99     NvU64     max_end            NV_ALIGN_BYTES(8); // In
100 
101     // Maximum number of range nodes to put in the tree
102     NvU64     max_ranges         NV_ALIGN_BYTES(8); // In
103 
104     // Maximum number of range nodes to add or remove at one time
105     NvU64     max_batch_count    NV_ALIGN_BYTES(8); // In
106 
107     // add, split, and merge operations all operate on randomly-selected ranges
108     // or nodes. It's possible, sometimes even likely, that the operation cannot
109     // be performed on the selected range or node.
110     //
111     // For example, when a range node is added its range is selected at random
112     // without regard to range nodes already in the tree. If a collision occurs
113     // when the test attempts to add that node to the tree, a new, smaller
114     // random range is selected and the attempt is made again.
115     //
116     // max_attempts is the maximum number of times to keep picking new ranges or
117     // nodes before giving up on the operation.
118     NvU32     max_attempts;                          // In
119 
120     struct
121     {
122         NvU64 total_adds         NV_ALIGN_BYTES(8);
123         NvU64 failed_adds        NV_ALIGN_BYTES(8);
124         NvU64 max_attempts_add   NV_ALIGN_BYTES(8);
125         NvU64 total_removes      NV_ALIGN_BYTES(8);
126         NvU64 total_splits       NV_ALIGN_BYTES(8);
127         NvU64 failed_splits      NV_ALIGN_BYTES(8);
128         NvU64 max_attempts_split NV_ALIGN_BYTES(8);
129         NvU64 total_merges       NV_ALIGN_BYTES(8);
130         NvU64 failed_merges      NV_ALIGN_BYTES(8);
131         NvU64 max_attempts_merge NV_ALIGN_BYTES(8);
132         NvU64 total_shrinks      NV_ALIGN_BYTES(8);
133         NvU64 failed_shrinks     NV_ALIGN_BYTES(8);
134     } stats;                                        // Out
135 
136     NV_STATUS rmStatus;                             // Out
137 } UVM_TEST_RANGE_TREE_RANDOM_PARAMS;
138 
139 // Keep this in sync with uvm_va_range_type_t in uvm_va_range.h
140 typedef enum
141 {
142     UVM_TEST_VA_RANGE_TYPE_INVALID = 0,
143     UVM_TEST_VA_RANGE_TYPE_MANAGED,
144     UVM_TEST_VA_RANGE_TYPE_EXTERNAL,
145     UVM_TEST_VA_RANGE_TYPE_CHANNEL,
146     UVM_TEST_VA_RANGE_TYPE_SKED_REFLECTED,
147     UVM_TEST_VA_RANGE_TYPE_SEMAPHORE_POOL,
148     UVM_TEST_VA_RANGE_TYPE_MAX
149 } UVM_TEST_VA_RANGE_TYPE;
150 
151 typedef enum
152 {
153     UVM_TEST_RANGE_SUBTYPE_INVALID = 0,
154     UVM_TEST_RANGE_SUBTYPE_UVM,
155     UVM_TEST_RANGE_SUBTYPE_HMM,
156     UVM_TEST_RANGE_SUBTYPE_MAX
157 } UVM_TEST_RANGE_SUBTYPE;
158 
159 // Keep this in sync with uvm_read_duplication_t in uvm_va_range.h
160 typedef enum
161 {
162     UVM_TEST_READ_DUPLICATION_UNSET = 0,
163     UVM_TEST_READ_DUPLICATION_ENABLED,
164     UVM_TEST_READ_DUPLICATION_DISABLED,
165     UVM_TEST_READ_DUPLICATION_MAX
166 } UVM_TEST_READ_DUPLICATION_POLICY;
167 
168 typedef struct
169 {
170     // Note: if this is a zombie or not owned by the calling process, the vma info
171     // will not be filled out and is invalid.
172     NvU64  vma_start NV_ALIGN_BYTES(8); // Out
173     NvU64  vma_end   NV_ALIGN_BYTES(8); // Out, inclusive
174     NvBool is_zombie;                   // Out
175     // Note: if this is a zombie, this field is meaningless.
176     NvBool owned_by_calling_process;    // Out
177     NvU32  subtype;                     // Out (UVM_TEST_RANGE_SUBTYPE)
178 } UVM_TEST_VA_RANGE_INFO_MANAGED;
179 
180 #define UVM_TEST_VA_RANGE_INFO                           UVM_TEST_IOCTL_BASE(4)
181 typedef struct
182 {
183     NvU64                           lookup_address                   NV_ALIGN_BYTES(8); // In
184 
185     // For HMM ranges va_range_start/end will contain the lookup address but not
186     // neccessarily the maximal range over which the returned policy applies.
187     // For example there could be adjacent ranges with the same policy, implying
188     // the returned range could be as small as a page in the worst case for HMM.
189     NvU64                           va_range_start                   NV_ALIGN_BYTES(8); // Out
190     NvU64                           va_range_end                     NV_ALIGN_BYTES(8); // Out, inclusive
191     NvU32                           read_duplication;                                   // Out (UVM_TEST_READ_DUPLICATION_POLICY)
192     NvProcessorUuid                 preferred_location;                                 // Out
193     NvS32                           preferred_cpu_nid;                                  // Out
194     NvProcessorUuid                 accessed_by[UVM_MAX_PROCESSORS_V2];                 // Out
195     NvU32                           accessed_by_count;                                  // Out
196     NvU32                           type;                                               // Out (UVM_TEST_VA_RANGE_TYPE)
197     union
198     {
199         UVM_TEST_VA_RANGE_INFO_MANAGED managed                       NV_ALIGN_BYTES(8); // Out
200         // More here eventually
201     };
202 
203     // NV_ERR_INVALID_ADDRESS   lookup_address doesn't match a UVM range
204     NV_STATUS                       rmStatus;                                           // Out
205 } UVM_TEST_VA_RANGE_INFO_PARAMS;
206 
207 #define UVM_TEST_RM_MEM_SANITY                           UVM_TEST_IOCTL_BASE(5)
208 typedef struct
209 {
210     // Out params
211     NV_STATUS rmStatus;
212 } UVM_TEST_RM_MEM_SANITY_PARAMS;
213 
214 #define UVM_TEST_GPU_SEMAPHORE_SANITY                    UVM_TEST_IOCTL_BASE(6)
215 typedef struct
216 {
217     // Out params
218     NV_STATUS rmStatus;
219 } UVM_TEST_GPU_SEMAPHORE_SANITY_PARAMS;
220 
221 #define UVM_TEST_PEER_REF_COUNT                          UVM_TEST_IOCTL_BASE(7)
222 typedef struct
223 {
224     // In params
225     NvProcessorUuid gpu_uuid_1;
226     NvProcessorUuid gpu_uuid_2;
227 
228     // Out params
229     NV_STATUS       rmStatus;
230     NvU64           ref_count   NV_ALIGN_BYTES(8);
231 } UVM_TEST_PEER_REF_COUNT_PARAMS;
232 
233 // Force an existing UVM range to split. split_address will be the new end of
234 // the existing range. A new range will be created covering
235 // [split_address+1, original end].
236 //
237 // Error returns:
238 // NV_ERR_INVALID_ADDRESS
239 //  - split_address+1 isn't page-aligned
240 //  - split_address doesn't match a splittable UVM range
241 //  - The range cannot be split at split_address because split_address is
242 //    already the end of the range.
243 #define UVM_TEST_VA_RANGE_SPLIT                          UVM_TEST_IOCTL_BASE(8)
244 typedef struct
245 {
246     NvU64     split_address NV_ALIGN_BYTES(8); // In
247     NV_STATUS rmStatus;                        // Out
248 } UVM_TEST_VA_RANGE_SPLIT_PARAMS;
249 
250 // Forces the next range split on the range covering lookup_address to fail with
251 // an out-of-memory error. Only the next split will fail. Subsequent ones will
252 // succeed. The split can come from any source, such as vma splitting or
253 // UVM_TEST_VA_RANGE_SPLIT.
254 //
255 // Error returns:
256 // NV_ERR_INVALID_ADDRESS
257 //  - lookup_address doesn't match a UVM range
258 #define UVM_TEST_VA_RANGE_INJECT_SPLIT_ERROR             UVM_TEST_IOCTL_BASE(9)
259 typedef struct
260 {
261     NvU64     lookup_address NV_ALIGN_BYTES(8); // In
262     NV_STATUS rmStatus;                         // Out
263 } UVM_TEST_VA_RANGE_INJECT_SPLIT_ERROR_PARAMS;
264 
265 #define UVM_TEST_PAGE_TREE                               UVM_TEST_IOCTL_BASE(10)
266 typedef struct
267 {
268     NV_STATUS rmStatus;                     // Out
269 } UVM_TEST_PAGE_TREE_PARAMS;
270 
271 // Given a VA and a target processor, forcibly set that processor's mapping to
272 // the VA to the given permissions. This may require changing other processors'
273 // mappings. For example, setting an atomic mapping for a given GPU might make
274 // other GPUs' mappings read-only.
275 //
276 // If the mapping changes from invalid to anything else, this call always
277 // attempts to create direct mappings from the given processor to the current
278 // physical memory backing the target address. If a direct mapping cannot be
279 // created, or no physical memory currently backs the VA,
280 // NV_ERR_INVALID_OPERATION is returned.
281 //
282 // uuid is allowed to be NV_PROCESSOR_UUID_CPU_DEFAULT.
283 //
284 // Error returns:
285 // NV_ERR_INVALID_DEVICE
286 //  - uuid is an unknown value
287 //  - uuid is a GPU that hasn't been registered with this process
288 //
289 // NV_ERR_INVALID_ADDRESS
290 // - VA is unknown to the kernel
291 // - VA isn't aligned to the system page size
292 //
293 // NV_ERR_INVALID_STATE
294 // - A mapping for va can't be accessed because it belongs to another process
295 //
296 // NV_ERR_INVALID_ARGUMENT
297 // - mapping is not a valid enum value
298 //
299 // NV_ERR_INVALID_ACCESS_TYPE
300 // - The mapping permissions aren't logically allowed. For example,
301 //   UVM_TEST_PTE_MAPPING_READ_WRITE can't be set on a read-only mapping.
302 //
303 // NV_ERR_INVALID_OPERATION
304 // - mapping is not UVM_TEST_PTE_MAPPING_INVALID, and a direct mapping from the
305 //   given processor to the physical memory currently backing VA cannot be
306 //   created.
307 #define UVM_TEST_CHANGE_PTE_MAPPING                      UVM_TEST_IOCTL_BASE(11)
308 
309 typedef enum
310 {
311     UVM_TEST_PTE_MAPPING_INVALID = 0,
312     UVM_TEST_PTE_MAPPING_READ_ONLY,
313     UVM_TEST_PTE_MAPPING_READ_WRITE,
314     UVM_TEST_PTE_MAPPING_READ_WRITE_ATOMIC,
315     UVM_TEST_PTE_MAPPING_MAX
316 } UVM_TEST_PTE_MAPPING;
317 
318 typedef struct
319 {
320     NvProcessorUuid      uuid      NV_ALIGN_BYTES(8); // In
321     NvU64                va        NV_ALIGN_BYTES(8); // In
322     NvU32                mapping;                     // In (UVM_TEST_PTE_MAPPING)
323     NV_STATUS            rmStatus;                    // Out
324 } UVM_TEST_CHANGE_PTE_MAPPING_PARAMS;
325 
326 #define UVM_TEST_TRACKER_SANITY                          UVM_TEST_IOCTL_BASE(12)
327 typedef struct
328 {
329     NV_STATUS rmStatus;               // Out
330 } UVM_TEST_TRACKER_SANITY_PARAMS;
331 
332 #define UVM_TEST_PUSH_SANITY                             UVM_TEST_IOCTL_BASE(13)
333 typedef struct
334 {
335     NvBool    skipTimestampTest;      // In
336     NV_STATUS rmStatus;               // Out
337 } UVM_TEST_PUSH_SANITY_PARAMS;
338 
339 #define UVM_TEST_CHANNEL_SANITY                          UVM_TEST_IOCTL_BASE(14)
340 typedef struct
341 {
342     NV_STATUS rmStatus;               // Out
343 } UVM_TEST_CHANNEL_SANITY_PARAMS;
344 
345 typedef enum
346 {
347     UVM_TEST_CHANNEL_STRESS_MODE_NOOP_PUSH = 0,
348     UVM_TEST_CHANNEL_STRESS_MODE_UPDATE_CHANNELS,
349     UVM_TEST_CHANNEL_STRESS_MODE_STREAM,
350     UVM_TEST_CHANNEL_STRESS_MODE_KEY_ROTATION,
351 } UVM_TEST_CHANNEL_STRESS_MODE;
352 
353 typedef enum
354 {
355     UVM_TEST_CHANNEL_STRESS_KEY_ROTATION_OPERATION_CPU_TO_GPU,
356     UVM_TEST_CHANNEL_STRESS_KEY_ROTATION_OPERATION_GPU_TO_CPU,
357     UVM_TEST_CHANNEL_STRESS_KEY_ROTATION_OPERATION_ROTATE,
358 } UVM_TEST_CHANNEL_STRESS_KEY_ROTATION_OPERATION;
359 
360 #define UVM_TEST_CHANNEL_STRESS                          UVM_TEST_IOCTL_BASE(15)
361 typedef struct
362 {
363     NvU32     mode;                   // In, one of UVM_TEST_CHANNEL_STRESS_MODE
364 
365     // Number of iterations:
366     //   mode == NOOP_PUSH: number of noop pushes
367     //   mode == UPDATE_CHANNELS: number of updates
368     //   mode == STREAM: number of iterations per stream
369     //   mode == ROTATION: number of operations
370     NvU32     iterations;
371 
372     NvU32     num_streams;            // In, used only if mode == STREAM
373     NvU32     key_rotation_operation; // In, used only if mode == ROTATION
374     NvU32     seed;                   // In
375     NvU32     verbose;                // In
376     NV_STATUS rmStatus;               // Out
377 } UVM_TEST_CHANNEL_STRESS_PARAMS;
378 
379 #define UVM_TEST_CE_SANITY                               UVM_TEST_IOCTL_BASE(16)
380 typedef struct
381 {
382     NvBool    skipTimestampTest;      // In
383     NV_STATUS rmStatus;               // Out
384 } UVM_TEST_CE_SANITY_PARAMS;
385 
386 #define UVM_TEST_VA_BLOCK_INFO                           UVM_TEST_IOCTL_BASE(17)
387 
388 // See UVM_VA_BLOCK_SIZE in uvm_va_block.h for an explanation of this number
389 #define UVM_TEST_VA_BLOCK_SIZE (2ull*1024*1024)
390 
391 typedef struct
392 {
393     NvU64     lookup_address    NV_ALIGN_BYTES(8); // In
394 
395 
396     NvU64     va_block_start    NV_ALIGN_BYTES(8); // Out
397     NvU64     va_block_end      NV_ALIGN_BYTES(8); // Out, inclusive
398 
399     // NV_ERR_INVALID_ADDRESS   lookup_address doesn't match a UVM range
400     //
401     // NV_ERR_OBJECT_NOT_FOUND  lookup_address matched a UVM range on this file
402     //                          but the corresponding block has not yet been
403     //                          populated
404     NV_STATUS rmStatus;                            // Out
405 } UVM_TEST_VA_BLOCK_INFO_PARAMS;
406 
407 #define UVM_TEST_LOCK_SANITY                             UVM_TEST_IOCTL_BASE(18)
408 typedef struct
409 {
410     NV_STATUS rmStatus; // Out
411 } UVM_TEST_LOCK_SANITY_PARAMS;
412 
413 #define UVM_TEST_PERF_UTILS_SANITY                       UVM_TEST_IOCTL_BASE(19)
414 typedef struct
415 {
416     NV_STATUS rmStatus; // Out
417 } UVM_TEST_PERF_UTILS_SANITY_PARAMS;
418 
419 #define UVM_TEST_KVMALLOC                                UVM_TEST_IOCTL_BASE(20)
420 typedef struct
421 {
422     NV_STATUS rmStatus; // Out
423 } UVM_TEST_KVMALLOC_PARAMS;
424 
425 #define UVM_TEST_PMM_QUERY                               UVM_TEST_IOCTL_BASE(21)
426 typedef enum
427 {
428     // Get the value of valid user allocations as key
429     UVM_TEST_CHUNK_SIZE_GET_USER_SIZE
430 } uvm_test_pmm_query_key_t;
431 
432 typedef struct
433 {
434     // In params
435     NvProcessorUuid gpu_uuid;
436     NvU64 key;
437     // Out params
438     NvU64 value;
439     NV_STATUS rmStatus;
440 } UVM_TEST_PMM_QUERY_PARAMS;
441 
442 #define UVM_TEST_PMM_CHECK_LEAK                          UVM_TEST_IOCTL_BASE(22)
443 
444 typedef struct
445 {
446     NvProcessorUuid gpu_uuid; // In
447     NvU64 chunk_size;         // In
448     NvS64 alloc_limit;        // In. Number of chunks to allocate. -1 means unlimited
449     NvU64 allocated;          // Out. Number of chunks actually allocated
450     NV_STATUS rmStatus;       // Out
451 } UVM_TEST_PMM_CHECK_LEAK_PARAMS;
452 
453 #define UVM_TEST_PERF_EVENTS_SANITY                      UVM_TEST_IOCTL_BASE(23)
454 typedef struct
455 {
456     // Out params
457     NV_STATUS rmStatus;
458 } UVM_TEST_PERF_EVENTS_SANITY_PARAMS;
459 
460 #define UVM_TEST_PERF_MODULE_SANITY                      UVM_TEST_IOCTL_BASE(24)
461 typedef struct
462 {
463     // In params
464     NvU64 range_address              NV_ALIGN_BYTES(8);
465     NvU32 range_size;
466     // Out params
467     NV_STATUS rmStatus;
468 } UVM_TEST_PERF_MODULE_SANITY_PARAMS;
469 
470 #define UVM_TEST_RANGE_ALLOCATOR_SANITY                  UVM_TEST_IOCTL_BASE(25)
471 typedef struct
472 {
473     // In params
474     NvU32 verbose;
475     NvU32 seed;
476     NvU32 iters;
477 
478     // Out params
479     NV_STATUS rmStatus;
480 } UVM_TEST_RANGE_ALLOCATOR_SANITY_PARAMS;
481 
482 #define UVM_TEST_GET_RM_PTES                             UVM_TEST_IOCTL_BASE(26)
483 typedef enum
484 {
485     UVM_TEST_GET_RM_PTES_SINGLE_GPU = 0,
486     UVM_TEST_GET_RM_PTES_MULTI_GPU_SUPPORTED,
487     UVM_TEST_GET_RM_PTES_MULTI_GPU_SLI_SUPPORTED,
488     UVM_TEST_GET_RM_PTES_MULTI_GPU_NOT_SUPPORTED,
489     UVM_TEST_GET_RM_PTES_MAX
490 } UVM_TEST_PTE_RM_PTES_TEST_MODE;
491 
492 typedef struct
493 {
494     // In
495     NvS32 rmCtrlFd;             // For future use. (security check)
496     NvHandle hClient;
497     NvHandle hMemory;
498     NvU32 test_mode;            // (UVM_TEST_PTE_RM_PTES_TEST_MODE)
499     NvU64 size                  NV_ALIGN_BYTES(8);
500     NvProcessorUuid gpu_uuid;
501 
502     // Out
503     NV_STATUS rmStatus;
504 } UVM_TEST_GET_RM_PTES_PARAMS;
505 
506 #define UVM_TEST_FAULT_BUFFER_FLUSH                      UVM_TEST_IOCTL_BASE(27)
507 typedef struct
508 {
509     NvU64 iterations;           // In
510     NV_STATUS rmStatus;         // Out
511 } UVM_TEST_FAULT_BUFFER_FLUSH_PARAMS;
512 
513 #define UVM_TEST_INJECT_TOOLS_EVENT                      UVM_TEST_IOCTL_BASE(28)
514 typedef struct
515 {
516     // In params
517     union
518     {
519         UvmEventEntry_V1 entry_v1; // contains only NvUxx types
520         UvmEventEntry_V2 entry_v2; // contains only NvUxx types
521     };
522     NvU32 version;
523     NvU32 count;
524 
525     // Out param
526     NV_STATUS rmStatus;
527 } UVM_TEST_INJECT_TOOLS_EVENT_PARAMS;
528 
529 #define UVM_TEST_INCREMENT_TOOLS_COUNTER                 UVM_TEST_IOCTL_BASE(29)
530 typedef struct
531 {
532     // In params
533     NvU64 amount                     NV_ALIGN_BYTES(8); // amount to increment
534     NvU32 counter;                                      // name of counter
535     NvProcessorUuid processor;
536     NvU32 count;                                        // number of times to increment
537 
538     // Out param
539     NV_STATUS rmStatus;
540 } UVM_TEST_INCREMENT_TOOLS_COUNTER_PARAMS;
541 
542 #define UVM_TEST_MEM_SANITY                              UVM_TEST_IOCTL_BASE(30)
543 typedef struct
544 {
545     // Out params
546     NV_STATUS rmStatus;
547 } UVM_TEST_MEM_SANITY_PARAMS;
548 
549 #define UVM_TEST_MAKE_CHANNEL_STOPS_IMMEDIATE            UVM_TEST_IOCTL_BASE(32)
550 typedef struct
551 {
552     // Out params
553     NV_STATUS rmStatus;
554 } UVM_TEST_MAKE_CHANNEL_STOPS_IMMEDIATE_PARAMS;
555 
556 // Inject an error into the VA block covering the lookup_address
557 //
558 // If page_table_allocation_retry_force_count is non-0 then the next count
559 // page table allocations under the VA block will be forced to do
560 // allocation-retry.
561 //
562 // If user_pages_allocation_retry_force_count is non-0 then the next count user
563 // memory allocations under the VA block will be forced to do allocation-retry.
564 //
565 // If cpu_pages_allocation_error_count is not zero, the subsequent operations
566 // that need to allocate CPU pages will fail with NV_ERR_NO_MEMORY for
567 // cpu_pages_allocation_error_count times. If cpu_pages_allocation_error_count
568 // is equal to ~0U, the count is infinite.
569 //
570 // If eviction_failure is NV_TRUE, the next eviction attempt from the VA block
571 // will fail with NV_ERR_NO_MEMORY.
572 //
573 // If populate_failure is NV_TRUE, a retry error will be injected after the next
574 // successful user memory allocation under the VA block but before that
575 // allocation is used by the block. This is similar to
576 // user_pages_allocation_retry_force_count, but the injection point simulates
577 // driver metadata allocation failure.
578 //
579 // cpu_chunk_allocation_target_id and cpu_chunk_allocation_actual_id are used
580 // to control the NUMA node IDs for CPU chunk allocations, specifically for
581 // testing overlapping CPU chunk allocations.
582 //
583 // Currently, uvm_api_migrate() does not pass the preferred CPU NUMA node to for
584 // managed memory so it is not possible to request a specific node.
585 // cpu_chunk_allocation_target_id is used to request the allocation be made on
586 // specific node. On the other hand, cpu_chunk_allocation_actual_id is the node
587 // on which the allocation will actually be made.
588 //
589 // The two parameters can be used to force a CPU chunk allocation to overlap a
590 // previously allocated chunk.
591 //
592 // Please note that even when specifying cpu_cpu_allocation_actual_id, the
593 // kernel may end up allocating on a different node.
594 //
595 // Error returns:
596 // NV_ERR_INVALID_ADDRESS
597 //  - lookup_address doesn't match a UVM range
598 #define UVM_TEST_VA_BLOCK_INJECT_ERROR                   UVM_TEST_IOCTL_BASE(33)
599 typedef struct
600 {
601     NvU64     lookup_address NV_ALIGN_BYTES(8);         // In
602     NvU32     page_table_allocation_retry_force_count;  // In
603     NvU32     user_pages_allocation_retry_force_count;  // In
604     NvU32     cpu_chunk_allocation_size_mask;           // In
605     NvS32     cpu_chunk_allocation_target_id;           // In
606     NvS32     cpu_chunk_allocation_actual_id;           // In
607     NvU32     cpu_pages_allocation_error_count;         // In
608     NvBool    eviction_error;                           // In
609     NvBool    populate_error;                           // In
610     NV_STATUS rmStatus;                                 // Out
611 } UVM_TEST_VA_BLOCK_INJECT_ERROR_PARAMS;
612 
613 #define UVM_TEST_PEER_IDENTITY_MAPPINGS                  UVM_TEST_IOCTL_BASE(34)
614 typedef struct
615 {
616     // In params
617     NvProcessorUuid gpuA;
618     NvProcessorUuid gpuB;
619     // Out param
620     NV_STATUS rmStatus;
621 } UVM_TEST_PEER_IDENTITY_MAPPINGS_PARAMS;
622 
623 #define UVM_TEST_VA_RESIDENCY_INFO                       UVM_TEST_IOCTL_BASE(35)
624 typedef struct
625 {
626     NvU64                           lookup_address                   NV_ALIGN_BYTES(8); // In
627 
628     // Whether to wait on the block tracker before returning. Fields like
629     // resident_on and mapped_on represent state which will be valid when the
630     // block tracker is complete. If is_async is true, then those fields will
631     // still be filled out as if the tracker is done, but the actual residency
632     // or mapping changes may not have been performed yet.
633     NvBool                          is_async;                                           // In
634 
635     // Array of processors which have a resident copy of the page containing
636     // lookup_address.
637     NvProcessorUuid                 resident_on[UVM_MAX_PROCESSORS_V2];                 // Out
638     NvU32                           resident_on_count;                                  // Out
639 
640     // If the memory is resident on the CPU, the NUMA node on which the page
641     // is resident. Otherwise, -1.
642     NvS32                           resident_nid;                                       // Out
643 
644     // The size of the physical allocation backing lookup_address. Only the
645     // system-page-sized portion of this allocation which contains
646     // lookup_address is guaranteed to be resident on the corresponding
647     // processor.
648     NvU32                           resident_physical_size[UVM_MAX_PROCESSORS_V2];      // Out
649 
650     // The physical address of the physical allocation backing lookup_address.
651     NvU64                           resident_physical_address[UVM_MAX_PROCESSORS_V2] NV_ALIGN_BYTES(8); // Out
652 
653     // Array of processors which have a virtual mapping covering lookup_address.
654     NvProcessorUuid                 mapped_on[UVM_MAX_PROCESSORS_V2];                   // Out
655     NvU32                           mapping_type[UVM_MAX_PROCESSORS_V2];                // Out
656     NvU64                           mapping_physical_address[UVM_MAX_PROCESSORS_V2] NV_ALIGN_BYTES(8); // Out
657     NvU32                           mapped_on_count;                                    // Out
658 
659     // The size of the virtual mapping covering lookup_address on each
660     // mapped_on processor.
661     NvU32                           page_size[UVM_MAX_PROCESSORS_V2];                   // Out
662 
663     // Array of processors which have physical memory populated that would back
664     // lookup_address if it was resident.
665     NvProcessorUuid                 populated_on[UVM_MAX_PROCESSORS_V2];                // Out
666     NvU32                           populated_on_count;                                 // Out
667 
668     NV_STATUS rmStatus;                                                                 // Out
669 } UVM_TEST_VA_RESIDENCY_INFO_PARAMS;
670 
671 #define UVM_TEST_PMM_ASYNC_ALLOC                         UVM_TEST_IOCTL_BASE(36)
672 typedef struct
673 {
674     NvProcessorUuid gpu_uuid;                           // In
675     NvU32 num_chunks;                                   // In
676     NvU32 num_work_iterations;                          // In
677     NV_STATUS rmStatus;                                 // Out
678 } UVM_TEST_PMM_ASYNC_ALLOC_PARAMS;
679 
680 typedef enum
681 {
682     UVM_TEST_PREFETCH_FILTERING_MODE_FILTER_ALL,  // Disable all prefetch faults
683     UVM_TEST_PREFETCH_FILTERING_MODE_FILTER_NONE, // Enable all prefetch faults
684 } UvmTestPrefetchFilteringMode;
685 
686 #define UVM_TEST_SET_PREFETCH_FILTERING                  UVM_TEST_IOCTL_BASE(37)
687 typedef struct
688 {
689     NvProcessorUuid gpu_uuid;                           // In
690     NvU32           filtering_mode;                     // In (UvmTestPrefetchFilteringMode)
691     NV_STATUS       rmStatus;                           // Out
692 } UVM_TEST_SET_PREFETCH_FILTERING_PARAMS;
693 
694 typedef enum
695 {
696     UvmTestPmmSanityModeFull  = 1,
697     UvmTestPmmSanityModeBasic = 2,
698 } UvmTestPmmSanityMode;
699 
700 #define UVM_TEST_PMM_SANITY                              UVM_TEST_IOCTL_BASE(40)
701 typedef struct
702 {
703     // Test mode of type UvmTestPmmSanityMode
704     NvU32         mode; // In
705     NV_STATUS rmStatus; // Out
706 } UVM_TEST_PMM_SANITY_PARAMS;
707 
708 typedef enum
709 {
710     UvmInvalidateTlbMemBarNone  = 1,
711     UvmInvalidateTlbMemBarSys   = 2,
712     UvmInvalidateTlbMemBarLocal = 3,
713 } UvmInvalidateTlbMembarType;
714 
715 typedef enum
716 {
717     UvmInvalidatePageTableLevelAll = 1,
718     UvmInvalidatePageTableLevelPte = 2,
719     UvmInvalidatePageTableLevelPde0 = 3,
720     UvmInvalidatePageTableLevelPde1 = 4,
721     UvmInvalidatePageTableLevelPde2 = 5,
722     UvmInvalidatePageTableLevelPde3 = 6,
723     UvmInvalidatePageTableLevelPde4 = 7,
724 } UvmInvalidatePageTableLevel;
725 
726 typedef enum
727 {
728     UvmTargetVaModeAll      = 1,
729     UvmTargetVaModeTargeted = 2,
730 } UvmTargetVaMode;
731 
732 #define UVM_TEST_INVALIDATE_TLB                          UVM_TEST_IOCTL_BASE(41)
733 typedef struct
734 {
735     // In params
736     NvProcessorUuid  gpu_uuid;
737     NvU64            va NV_ALIGN_BYTES(8);
738     NvU32            target_va_mode;           // UvmTargetVaMode
739     NvU32            page_table_level;         // UvmInvalidatePageTableLevel
740     NvU32            membar;                   // UvmInvalidateTlbMembarType
741     NvBool           disable_gpc_invalidate;
742 
743     // Out params
744     NV_STATUS        rmStatus;
745 } UVM_TEST_INVALIDATE_TLB_PARAMS;
746 
747 #define UVM_TEST_VA_BLOCK                                UVM_TEST_IOCTL_BASE(42)
748 typedef struct
749 {
750     NV_STATUS rmStatus; // Out
751 } UVM_TEST_VA_BLOCK_PARAMS;
752 
753 typedef enum
754 {
755     // Default policy based eviction
756     //
757     // Evicts a chunk that the default eviction path would pick.
758     UvmTestEvictModeDefault = 1,
759 
760     // Virtual address based eviction
761     //
762     // Evicts the root chunk that the chunk backing the provided virtual address
763     // belongs to.
764     UvmTestEvictModeVirtual,
765 
766     // Physical address based eviction
767     //
768     // Evicts the root chunk covering the provided physical address.
769     UvmTestEvictModePhysical,
770 } UvmTestEvictMode;
771 
772 // Evict a chunk chosen according to one the test eviction modes specified
773 // above. Eviction may not always be possible, but as long as the arguments are
774 // valid NV_OK will be returned. To check whether eviction happened, the
775 // chunk_was_evicted flag needs to be inspected.
776 #define UVM_TEST_EVICT_CHUNK                             UVM_TEST_IOCTL_BASE(43)
777 typedef struct
778 {
779     // The GPU to evict from, has to be registered in the VA space.
780     NvProcessorUuid                 gpu_uuid;                                           // In
781 
782     // UvmTestEvictMode
783     NvU32                           eviction_mode;                                      // In
784 
785     // Virtual or physical address if evictionMode is UvmTestEvictModeVirtual or
786     // UvmTestEvictModePhysical.
787     NvU64                           address                          NV_ALIGN_BYTES(8); // In
788 
789     // Flag indicating whether the eviction was performed.
790     NvBool                          chunk_was_evicted;                                  // Out
791 
792     // Physical address of the evicted root chunk. Notably 0 is a valid physical address.
793     NvU64                           evicted_physical_address         NV_ALIGN_BYTES(8); // Out
794 
795     // For the virtual eviction mode, returns the size of the chunk that was
796     // backing the virtual address before being evicted. 0 otherwise.
797     NvU64                           chunk_size_backing_virtual       NV_ALIGN_BYTES(8); // Out
798 
799     NV_STATUS rmStatus;                                                                 // Out
800 } UVM_TEST_EVICT_CHUNK_PARAMS;
801 
802 typedef enum
803 {
804     // Flush deferred accessed by mappings
805     UvmTestDeferredWorkTypeAcessedByMappings = 1,
806 } UvmTestDeferredWorkType;
807 
808 #define UVM_TEST_FLUSH_DEFERRED_WORK                     UVM_TEST_IOCTL_BASE(44)
809 typedef struct
810 {
811     // UvmTestDeferredWorkType
812     NvU32                           work_type;                                          // In
813 
814     NV_STATUS rmStatus;                                                                 // Out
815 } UVM_TEST_FLUSH_DEFERRED_WORK_PARAMS;
816 
817 #define UVM_TEST_NV_KTHREAD_Q                            UVM_TEST_IOCTL_BASE(45)
818 typedef struct
819 {
820     NV_STATUS rmStatus; // Out
821 } UVM_TEST_NV_KTHREAD_Q_PARAMS;
822 
823 typedef enum
824 {
825     UVM_TEST_PAGE_PREFETCH_POLICY_ENABLE = 0,
826     UVM_TEST_PAGE_PREFETCH_POLICY_DISABLE,
827     UVM_TEST_PAGE_PREFETCH_POLICY_MAX
828 } UVM_TEST_PAGE_PREFETCH_POLICY;
829 
830 #define UVM_TEST_SET_PAGE_PREFETCH_POLICY                UVM_TEST_IOCTL_BASE(46)
831 typedef struct
832 {
833     NvU32       policy; // In (UVM_TEST_PAGE_PREFETCH_POLICY)
834     NV_STATUS rmStatus; // Out
835 } UVM_TEST_SET_PAGE_PREFETCH_POLICY_PARAMS;
836 
837 #define UVM_TEST_RANGE_GROUP_TREE                        UVM_TEST_IOCTL_BASE(47)
838 typedef struct
839 {
840     NvU64 rangeGroupIds[4]                                           NV_ALIGN_BYTES(8); // In
841     NV_STATUS rmStatus;                                                                 // Out
842 } UVM_TEST_RANGE_GROUP_TREE_PARAMS;
843 
844 #define UVM_TEST_RANGE_GROUP_RANGE_INFO                  UVM_TEST_IOCTL_BASE(48)
845 typedef struct
846 {
847     NvU64                           lookup_address                   NV_ALIGN_BYTES(8); // In
848 
849     NvU64                           range_group_range_start          NV_ALIGN_BYTES(8); // Out
850     NvU64                           range_group_range_end            NV_ALIGN_BYTES(8); // Out, inclusive
851     NvU64                           range_group_id                   NV_ALIGN_BYTES(8); // Out
852     NvU32                           range_group_present;                                // Out
853     NV_STATUS                       rmStatus;                                           // Out
854 } UVM_TEST_RANGE_GROUP_RANGE_INFO_PARAMS;
855 
856 #define UVM_TEST_RANGE_GROUP_RANGE_COUNT                 UVM_TEST_IOCTL_BASE(49)
857 typedef struct
858 {
859     NvU64                           rangeGroupId                     NV_ALIGN_BYTES(8); // In
860     NvU64                           count                            NV_ALIGN_BYTES(8); // Out
861     NV_STATUS                       rmStatus;                                           // Out
862 } UVM_TEST_RANGE_GROUP_RANGE_COUNT_PARAMS;
863 
864 #define UVM_TEST_GET_PREFETCH_FAULTS_REENABLE_LAPSE      UVM_TEST_IOCTL_BASE(50)
865 typedef struct
866 {
867     NvU32       reenable_lapse; // Out: Lapse in miliseconds
868     NV_STATUS         rmStatus; // Out
869 } UVM_TEST_GET_PREFETCH_FAULTS_REENABLE_LAPSE_PARAMS;
870 
871 #define UVM_TEST_SET_PREFETCH_FAULTS_REENABLE_LAPSE      UVM_TEST_IOCTL_BASE(51)
872 typedef struct
873 {
874     NvU32       reenable_lapse; // In: Lapse in miliseconds
875     NV_STATUS         rmStatus; // Out
876 } UVM_TEST_SET_PREFETCH_FAULTS_REENABLE_LAPSE_PARAMS;
877 
878 #define UVM_TEST_GET_KERNEL_VIRTUAL_ADDRESS              UVM_TEST_IOCTL_BASE(52)
879 typedef struct
880 {
881     NvU64                           addr                            NV_ALIGN_BYTES(8); // Out
882     NV_STATUS                       rmStatus;                                          // Out
883 } UVM_TEST_GET_KERNEL_VIRTUAL_ADDRESS_PARAMS;
884 
885 // Allocate and free memory directly from PMA with eviction enabled. This allows
886 // to simulate RM-like allocations, but without the RM API lock serializing
887 // everything.
888 #define UVM_TEST_PMA_ALLOC_FREE                          UVM_TEST_IOCTL_BASE(53)
889 typedef struct
890 {
891     NvProcessorUuid                 gpu_uuid;                                           // In
892     NvU32                           page_size;
893     NvBool                          contiguous;
894     NvU64                           num_pages                        NV_ALIGN_BYTES(8); // In
895     NvU64                           phys_begin                       NV_ALIGN_BYTES(8); // In
896     NvU64                           phys_end                         NV_ALIGN_BYTES(8); // In
897     NvU32                           nap_us_before_free;                                 // In
898     NV_STATUS                       rmStatus;                                           // Out
899 } UVM_TEST_PMA_ALLOC_FREE_PARAMS;
900 
901 // Allocate and free user memory directly from PMM with eviction enabled.
902 //
903 // Provides a direct way of exercising PMM allocs, eviction and frees of user
904 // memory type.
905 #define UVM_TEST_PMM_ALLOC_FREE_ROOT                     UVM_TEST_IOCTL_BASE(54)
906 typedef struct
907 {
908     NvProcessorUuid                 gpu_uuid;                                           // In
909     NvU32                           nap_us_before_free;                                 // In
910     NV_STATUS                       rmStatus;                                           // Out
911 } UVM_TEST_PMM_ALLOC_FREE_ROOT_PARAMS;
912 
913 // Inject a PMA eviction error after the specified number of chunks are
914 // evicted.
915 #define UVM_TEST_PMM_INJECT_PMA_EVICT_ERROR              UVM_TEST_IOCTL_BASE(55)
916 typedef struct
917 {
918     NvProcessorUuid                 gpu_uuid;                                           // In
919     NvU32                           error_after_num_chunks;                             // In
920     NV_STATUS                       rmStatus;                                           // Out
921 } UVM_TEST_PMM_INJECT_PMA_EVICT_ERROR_PARAMS;
922 
923 // Change configuration of access counters. This call will disable access
924 // counters and reenable them using the new configuration. All previous
925 // notifications will be lost
926 //
927 // The reconfiguration affects all VA spaces that rely on the access
928 // counters information for the same GPU. To avoid conflicting configurations,
929 // only one VA space is allowed to reconfigure the GPU at a time.
930 //
931 // Error returns:
932 // NV_ERR_INVALID_STATE
933 //  - The GPU has already been reconfigured in a different VA space
934 #define UVM_TEST_RECONFIGURE_ACCESS_COUNTERS             UVM_TEST_IOCTL_BASE(56)
935 typedef struct
936 {
937     NvProcessorUuid                 gpu_uuid;                                           // In
938 
939     // Type UVM_ACCESS_COUNTER_GRANULARITY from nv_uvm_types.h
940     NvU32                           mimc_granularity;                                   // In
941     NvU32                           momc_granularity;                                   // In
942 
943     // Type UVM_ACCESS_COUNTER_USE_LIMIT from nv_uvm_types.h
944     NvU32                           mimc_use_limit;                                     // In
945     NvU32                           momc_use_limit;                                     // In
946 
947     NvU32                           threshold;                                          // In
948     NvBool                          enable_mimc_migrations;                             // In
949     NvBool                          enable_momc_migrations;                             // In
950 
951     NV_STATUS                       rmStatus;                                           // Out
952 } UVM_TEST_RECONFIGURE_ACCESS_COUNTERS_PARAMS;
953 
954 typedef enum
955 {
956     UVM_TEST_ACCESS_COUNTER_RESET_MODE_ALL = 0,
957     UVM_TEST_ACCESS_COUNTER_RESET_MODE_TARGETED,
958     UVM_TEST_ACCESS_COUNTER_RESET_MODE_MAX
959 } UVM_TEST_ACCESS_COUNTER_RESET_MODE;
960 
961 typedef enum
962 {
963     UVM_TEST_ACCESS_COUNTER_TYPE_MIMC = 0,
964     UVM_TEST_ACCESS_COUNTER_TYPE_MOMC,
965     UVM_TEST_ACCESS_COUNTER_TYPE_MAX
966 } UVM_TEST_ACCESS_COUNTER_TYPE;
967 
968 // Clear the contents of the access counters. This call supports different
969 // modes for targeted/global resets.
970 #define UVM_TEST_RESET_ACCESS_COUNTERS                   UVM_TEST_IOCTL_BASE(57)
971 typedef struct
972 {
973     NvProcessorUuid                 gpu_uuid;                                           // In
974 
975     // Type UVM_TEST_ACCESS_COUNTER_RESET_MODE
976     NvU32                           mode;                                               // In
977 
978     // Type UVM_TEST_ACCESS_COUNTER_TYPE
979     NvU32                           counter_type;                                       // In
980 
981     NvU32                           bank;                                               // In
982     NvU32                           tag;                                                // In
983     NV_STATUS                       rmStatus;                                           // Out
984 } UVM_TEST_RESET_ACCESS_COUNTERS_PARAMS;
985 
986 // Do not handle access counter notifications when they arrive. This call is
987 // used to force an overflow of the access counter notification buffer
988 #define UVM_TEST_SET_IGNORE_ACCESS_COUNTERS              UVM_TEST_IOCTL_BASE(58)
989 typedef struct
990 {
991     NvProcessorUuid                 gpu_uuid;                                           // In
992     NvBool                          ignore;                                             // In
993     NV_STATUS                       rmStatus;                                           // Out
994 } UVM_TEST_SET_IGNORE_ACCESS_COUNTERS_PARAMS;
995 
996 // Verifies that the given channel is registered under the UVM VA space of
997 // vaSpaceFd. Returns NV_OK if so, NV_ERR_INVALID_CHANNEL if not.
998 #define UVM_TEST_CHECK_CHANNEL_VA_SPACE                  UVM_TEST_IOCTL_BASE(59)
999 typedef struct
1000 {
1001     NvProcessorUuid                 gpu_uuid;                                           // In
1002     NvS32                           rm_ctrl_fd;                                         // In
1003     NvHandle                        client;                                             // In
1004     NvHandle                        channel;                                            // In
1005     NvU32                           ve_id;                                              // In
1006     NvS32                           va_space_fd;                                        // In
1007     NV_STATUS                       rmStatus;                                           // Out
1008 } UVM_TEST_CHECK_CHANNEL_VA_SPACE_PARAMS;
1009 
1010 //
1011 // UvmTestEnableNvlinkPeerAccess
1012 //
1013 #define UVM_TEST_ENABLE_NVLINK_PEER_ACCESS               UVM_TEST_IOCTL_BASE(60)
1014 typedef struct
1015 {
1016     NvProcessorUuid gpuUuidA; // IN
1017     NvProcessorUuid gpuUuidB; // IN
1018     NV_STATUS  rmStatus; // OUT
1019 } UVM_TEST_ENABLE_NVLINK_PEER_ACCESS_PARAMS;
1020 
1021 //
1022 // UvmTestDisableNvlinkPeerAccess
1023 //
1024 #define UVM_TEST_DISABLE_NVLINK_PEER_ACCESS              UVM_TEST_IOCTL_BASE(61)
1025 typedef struct
1026 {
1027     NvProcessorUuid gpuUuidA; // IN
1028     NvProcessorUuid gpuUuidB; // IN
1029     NV_STATUS  rmStatus; // OUT
1030 } UVM_TEST_DISABLE_NVLINK_PEER_ACCESS_PARAMS;
1031 
1032 typedef enum
1033 {
1034     UVM_TEST_PAGE_THRASHING_POLICY_ENABLE = 0,
1035     UVM_TEST_PAGE_THRASHING_POLICY_DISABLE,
1036     UVM_TEST_PAGE_THRASHING_POLICY_MAX
1037 } UVM_TEST_PAGE_THRASHING_POLICY;
1038 
1039 // This ioctl returns the thrashing mitigation parameters on the current VA
1040 // space. Note that these values may change after a simulated/emulated GPU is
1041 // registered on the VA space.
1042 #define UVM_TEST_GET_PAGE_THRASHING_POLICY               UVM_TEST_IOCTL_BASE(62)
1043 typedef struct
1044 {
1045     NvU32                           policy;                                             // Out (UVM_TEST_PAGE_THRASHING_POLICY)
1046     NvU64                           nap_ns                           NV_ALIGN_BYTES(8); // Out
1047     NvU64                           pin_ns                           NV_ALIGN_BYTES(8); // Out
1048     NvBool                          map_remote_on_native_atomics_fault;                 // Out
1049     NV_STATUS                       rmStatus;                                           // Out
1050 } UVM_TEST_GET_PAGE_THRASHING_POLICY_PARAMS;
1051 
1052 #define UVM_TEST_SET_PAGE_THRASHING_POLICY               UVM_TEST_IOCTL_BASE(63)
1053 typedef struct
1054 {
1055     NvU32                           policy;                                             // In (UVM_TEST_PAGE_THRASHING_POLICY)
1056     NvU64                           pin_ns                           NV_ALIGN_BYTES(8); // In
1057     NV_STATUS                       rmStatus;                                           // Out
1058 } UVM_TEST_SET_PAGE_THRASHING_POLICY_PARAMS;
1059 
1060 #define UVM_TEST_PMM_SYSMEM                              UVM_TEST_IOCTL_BASE(64)
1061 typedef struct
1062 {
1063     NvU64                           range_address1                   NV_ALIGN_BYTES(8); // In
1064     NvU64                           range_address2                   NV_ALIGN_BYTES(8); // In
1065     NV_STATUS                       rmStatus;                                           // Out
1066 } UVM_TEST_PMM_SYSMEM_PARAMS;
1067 
1068 #define UVM_TEST_PMM_REVERSE_MAP                         UVM_TEST_IOCTL_BASE(65)
1069 typedef struct
1070 {
1071     NvProcessorUuid                 gpu_uuid;                                           // In
1072     NvU64                           range_address1                   NV_ALIGN_BYTES(8); // In
1073     NvU64                           range_address2                   NV_ALIGN_BYTES(8); // In
1074     NvU64                           range_size2                      NV_ALIGN_BYTES(8); // In
1075     NV_STATUS                       rmStatus;                                           // Out
1076 } UVM_TEST_PMM_REVERSE_MAP_PARAMS;
1077 
1078 #define UVM_TEST_PMM_INDIRECT_PEERS                      UVM_TEST_IOCTL_BASE(66)
1079 typedef struct
1080 {
1081     NV_STATUS                       rmStatus;                                           // Out
1082 } UVM_TEST_PMM_INDIRECT_PEERS_PARAMS;
1083 
1084 // Calls uvm_va_space_mm_retain on a VA space, operates on the mm, optionally
1085 // sleeps for a while, then releases the va_space_mm and returns. The idea is to
1086 // simulate retaining a va_space_mm from a thread like the GPU fault handler
1087 // which operates outside of the normal context of the VA space.
1088 #define UVM_TEST_VA_SPACE_MM_RETAIN                      UVM_TEST_IOCTL_BASE(67)
1089 typedef struct
1090 {
1091     // The kernel virtual address of the uvm_va_space on which to attempt
1092     // retain. This can be obtained via UVM_TEST_GET_KERNEL_VIRTUAL_ADDRESS.
1093     //
1094     // The reason to use this instead of looking it up from an fd as normal is
1095     // to allow testing of calling threads which race with UVM VA space destroy
1096     // (file close). We wouldn't be able to test that path if this was an fd.
1097     NvU64 va_space_ptr                                               NV_ALIGN_BYTES(8); // In
1098 
1099     // User virtual address within the va_space_mm. If the va_space_mm is
1100     // successfully retained, this address is read once before sleeping and once
1101     // after (if sleep_us > 0).
1102     NvU64 addr                                                       NV_ALIGN_BYTES(8); // In
1103 
1104     // On success, this contains the value of addr read prior to the sleep.
1105     NvU64 val_before                                                 NV_ALIGN_BYTES(8); // In
1106 
1107     // On success, and if sleep_us > 0, this contains the value of addr read
1108     // after the sleep. This is invalid if sleep_us == 0.
1109     NvU64 val_after                                                  NV_ALIGN_BYTES(8); // In
1110 
1111     // Approximate duration for which to sleep with the va_space_mm retained.
1112     NvU64 sleep_us                                                   NV_ALIGN_BYTES(8); // In
1113 
1114     // NV_ERR_MISSING_TABLE_ENTRY   va_space_ptr is not a valid VA space
1115     // NV_ERR_PAGE_TABLE_NOT_AVAIL  Could not retain va_space_mm
1116     //                              (uvm_va_space_mm_retain returned NULL)
1117     // NV_ERR_INVALID_ADDRESS       addr is invalid in va_space_mm
1118     NV_STATUS rmStatus;                                                                 // Out
1119 } UVM_TEST_VA_SPACE_MM_RETAIN_PARAMS;
1120 
1121 #define UVM_TEST_PMM_CHUNK_WITH_ELEVATED_PAGE            UVM_TEST_IOCTL_BASE(69)
1122 typedef struct
1123 {
1124     NV_STATUS                       rmStatus;                                           // Out
1125 } UVM_TEST_PMM_CHUNK_WITH_ELEVATED_PAGE_PARAMS;
1126 
1127 #define UVM_TEST_GET_GPU_TIME                            UVM_TEST_IOCTL_BASE(70)
1128 typedef struct
1129 {
1130     // GPU to query time from. GPU must have been previously registered
1131     NvProcessorUuid                 gpu_uuid;                                           // In
1132 
1133     NvU64                           timestamp_ns                     NV_ALIGN_BYTES(8); // Out
1134     NV_STATUS                       rmStatus;                                           // Out
1135 } UVM_TEST_GET_GPU_TIME_PARAMS;
1136 
1137 // Check if access counters are enabled upon registration of the given GPU
1138 #define UVM_TEST_ACCESS_COUNTERS_ENABLED_BY_DEFAULT      UVM_TEST_IOCTL_BASE(71)
1139 typedef struct
1140 {
1141     NvProcessorUuid                 gpu_uuid;                                           // In
1142     NvBool                          enabled;                                            // Out
1143 
1144     NV_STATUS                       rmStatus;                                           // Out
1145 } UVM_TEST_ACCESS_COUNTERS_ENABLED_BY_DEFAULT_PARAMS;
1146 
1147 // Inject an error into the VA space
1148 //
1149 // If migrate_vma_allocation_fail_nth is greater than 0, the nth page
1150 // allocation within migrate_vma will fail.
1151 //
1152 // If va_block_allocation_fail_nth is greater than 0, the nth call to
1153 // uvm_va_block_find_create() will fail with NV_ERR_NO_MEMORY.
1154 #define UVM_TEST_VA_SPACE_INJECT_ERROR                   UVM_TEST_IOCTL_BASE(72)
1155 typedef struct
1156 {
1157     NvU32                           migrate_vma_allocation_fail_nth;                    // In
1158     NvU32                           va_block_allocation_fail_nth;                       // In
1159 
1160     NV_STATUS                       rmStatus;                                           // Out
1161 } UVM_TEST_VA_SPACE_INJECT_ERROR_PARAMS;
1162 
1163 // Release to PMA all free root chunks
1164 #define UVM_TEST_PMM_RELEASE_FREE_ROOT_CHUNKS            UVM_TEST_IOCTL_BASE(73)
1165 typedef struct
1166 {
1167     NvProcessorUuid                 gpu_uuid;                                           // In
1168 
1169     NV_STATUS                       rmStatus;                                           // Out
1170 } UVM_TEST_PMM_RELEASE_FREE_ROOT_CHUNKS_PARAMS;
1171 
1172 // Wait until all pending replayable faults have been processed. If there are
1173 // still pending packets when timeout_ns is reached, the ioctl returns
1174 // NV_ERR_TIMEOUT.
1175 //
1176 // This function should be called after the kernel producing the faults has been
1177 // synchronized. This should ensure that PUT != GET and faults will not be
1178 // missed even if the driver has not started to process them, yet.
1179 #define UVM_TEST_DRAIN_REPLAYABLE_FAULTS                 UVM_TEST_IOCTL_BASE(74)
1180 typedef struct
1181 {
1182     NvProcessorUuid                 gpu_uuid;                                           // In
1183     NvU64                           timeout_ns;                                         // In
1184 
1185     NV_STATUS                       rmStatus;                                           // Out
1186 } UVM_TEST_DRAIN_REPLAYABLE_FAULTS_PARAMS;
1187 
1188 // Get module config PMA batch size in bytes
1189 #define UVM_TEST_PMA_GET_BATCH_SIZE                      UVM_TEST_IOCTL_BASE(75)
1190 typedef struct
1191 {
1192     NvProcessorUuid                 gpu_uuid;                                           // In
1193     NvU64                           pma_batch_size;     NV_ALIGN_BYTES(8)               // Out
1194 
1195     NV_STATUS                       rmStatus;                                           // Out
1196 } UVM_TEST_PMA_GET_BATCH_SIZE_PARAMS;
1197 
1198 // Request PMA's global statistics
1199 #define UVM_TEST_PMM_QUERY_PMA_STATS                     UVM_TEST_IOCTL_BASE(76)
1200 typedef struct
1201 {
1202     NvProcessorUuid                 gpu_uuid;                                           // In
1203     UvmPmaStatistics                pma_stats;                                          // Out
1204 
1205     NV_STATUS                       rmStatus;                                           // Out
1206 } UVM_TEST_PMM_QUERY_PMA_STATS_PARAMS;
1207 
1208 // Test whether the bottom halves have run on the correct CPUs based on the
1209 // NUMA node locality of the GPU.
1210 //
1211 // Failure is reported if:
1212 //   1. The GPU has serviced faults but the mask tracking which CPUs the
1213 //      bottom half ran on was empty, or
1214 //   2. The set of CPUs where the bottom half ran is not a subset of the CPUs
1215 //      attached to the NUMA node.
1216 //
1217 // This IOCTL returns NV_OK on success, NV_ERR_INVALID_STATE on failure, or
1218 // NV_ERR_NOT_SUPPORTED if UVM thread affinity is not supported.
1219 #define UVM_TEST_NUMA_CHECK_AFFINITY                     UVM_TEST_IOCTL_BASE(78)
1220 typedef struct
1221 {
1222     NvProcessorUuid                 gpu_uuid;                                           // In
1223 
1224     NV_STATUS                       rmStatus;                                           // Out
1225 } UVM_TEST_NUMA_CHECK_AFFINITY_PARAMS;
1226 
1227 #define UVM_TEST_VA_SPACE_ADD_DUMMY_THREAD_CONTEXTS      UVM_TEST_IOCTL_BASE(79)
1228 typedef struct
1229 {
1230     // Number of thread contexts to add per thread context table entry
1231     NvU32                           num_dummy_thread_contexts;                          // In
1232 
1233     NV_STATUS                       rmStatus;                                           // Out
1234 } UVM_TEST_VA_SPACE_ADD_DUMMY_THREAD_CONTEXTS_PARAMS;
1235 
1236 #define UVM_TEST_VA_SPACE_REMOVE_DUMMY_THREAD_CONTEXTS   UVM_TEST_IOCTL_BASE(80)
1237 typedef struct
1238 {
1239     NV_STATUS                       rmStatus;                                           // Out
1240 } UVM_TEST_VA_SPACE_REMOVE_DUMMY_THREAD_CONTEXTS_PARAMS;
1241 
1242 #define UVM_TEST_THREAD_CONTEXT_SANITY                   UVM_TEST_IOCTL_BASE(81)
1243 typedef struct
1244 {
1245     // Iterations to run.
1246     NvU32                           iterations;                                         // In
1247 
1248     NV_STATUS                       rmStatus;                                           // Out
1249 } UVM_TEST_THREAD_CONTEXT_SANITY_PARAMS;
1250 
1251 #define UVM_TEST_THREAD_CONTEXT_PERF                     UVM_TEST_IOCTL_BASE(82)
1252 typedef struct
1253 {
1254     // Iterations to run.
1255     NvU32                           iterations;                                         // In
1256 
1257     // Delay, in microseconds, between thread context addition and removal
1258     NvU32                           delay_us;                                           // In
1259 
1260     // Median time, in nanoseconds, spent in adding and then deleting a thread
1261     // context.
1262     NvU64                           ns NV_ALIGN_BYTES(8);                               // Out
1263 
1264     NV_STATUS                       rmStatus;                                           // Out
1265 } UVM_TEST_THREAD_CONTEXT_PERF_PARAMS;
1266 
1267 typedef enum
1268 {
1269     UVM_TEST_PAGEABLE_MEM_ACCESS_TYPE_NONE = 0,
1270 
1271     // Pageable memory cannot be accessed, but there is an association between
1272     // this VA space and its owning process. For example, this enables the GPU
1273     // fault handler to establish CPU mappings.
1274     UVM_TEST_PAGEABLE_MEM_ACCESS_TYPE_MMU_NOTIFIER,
1275 
1276     UVM_TEST_PAGEABLE_MEM_ACCESS_TYPE_HMM,
1277     UVM_TEST_PAGEABLE_MEM_ACCESS_TYPE_ATS_KERNEL,
1278     UVM_TEST_PAGEABLE_MEM_ACCESS_TYPE_ATS_DRIVER,
1279     UVM_TEST_PAGEABLE_MEM_ACCESS_TYPE_COUNT
1280 } UVM_TEST_PAGEABLE_MEM_ACCESS_TYPE;
1281 
1282 #define UVM_TEST_GET_PAGEABLE_MEM_ACCESS_TYPE            UVM_TEST_IOCTL_BASE(83)
1283 typedef struct
1284 {
1285     // UVM_TEST_PAGEABLE_MEM_ACCESS_TYPE
1286     NvU32                           type;                                               // Out
1287 
1288     NV_STATUS                       rmStatus;                                           // Out
1289 } UVM_TEST_GET_PAGEABLE_MEM_ACCESS_TYPE_PARAMS;
1290 
1291 // Some events, like fault replays, may not immediately show up in the events
1292 // queue despite calling UVM_TOOLS_FLUSH_EVENTS since that will only flush
1293 // completed events but not pending events. Successful completion of this IOCTL
1294 // guarantees that any replays issued on the given GPU prior to the call will
1295 // have its event enqueued in all the tools sessions which have replay events
1296 // enabled. Also, this IOCTL includes an implicit UVM_TOOLS_FLUSH_EVENTS call.
1297 // Hence, this IOCTL is a superset of UVM_TOOLS_FLUSH_EVENTS. Since this call is
1298 // more expensive than UVM_TOOLS_FLUSH_EVENTS, callers who don't need the above
1299 // mentioned guarantee should consider calling UVM_TOOLS_FLUSH_EVENTS instead.
1300 #define UVM_TEST_TOOLS_FLUSH_REPLAY_EVENTS               UVM_TEST_IOCTL_BASE(84)
1301 typedef struct
1302 {
1303     NvProcessorUuid                 gpuUuid;                                            // In
1304 
1305     NV_STATUS                       rmStatus;                                           // Out
1306 } UVM_TEST_TOOLS_FLUSH_REPLAY_EVENTS_PARAMS;
1307 
1308 // Many checks are performed when the driver is unloaded. In the event of an
1309 // error, a warning message may be printed to the kernel log. In automated
1310 // testing, a systematic way to check the state of the driver after it is
1311 // unloaded is required for additional test coverage. One userland process may
1312 // register to receive the driver state after its unload, since we cannot use
1313 // /proc or /sys to retrieve driver-specific information for an unloaded driver.
1314 // Any userland process registers the given address (unload_state_buf) with the
1315 // UVM driver. On module unload, if an address has been registered, debugging
1316 // state is written to that address. The data in the address is valid once
1317 // module unload completes.
1318 // Error returns:
1319 // NV_ERR_IN_USE
1320 //  - The unload state buffer has already been registered.
1321 // NV_ERR_INVALID_ADDRESS
1322 //  - unload_state_buf is invalid.
1323 //  - unload_state_buf is not 8-byte aligned.
1324 
1325 #define UVM_TEST_REGISTER_UNLOAD_STATE_BUFFER            UVM_TEST_IOCTL_BASE(85)
1326 
1327 // Unload debugging states:
1328 #define UVM_TEST_UNLOAD_STATE_MEMORY_LEAK        ((NvU64)0x1)
1329 
1330 typedef struct
1331 {
1332     // unload_state_buf points to a 8-byte buf and must be aligned to 8 bytes.
1333     NvU64                           unload_state_buf;                                   // In
1334 
1335     NV_STATUS                       rmStatus;                                           // Out
1336 } UVM_TEST_REGISTER_UNLOAD_STATE_BUFFER_PARAMS;
1337 
1338 #define UVM_TEST_RB_TREE_DIRECTED                        UVM_TEST_IOCTL_BASE(86)
1339 
1340 typedef struct
1341 {
1342     NV_STATUS                       rmStatus;                                           // Out
1343 } UVM_TEST_RB_TREE_DIRECTED_PARAMS;
1344 
1345 #define UVM_TEST_RB_TREE_RANDOM                          UVM_TEST_IOCTL_BASE(87)
1346 
1347 typedef struct
1348 {
1349     NvU64                           iterations                       NV_ALIGN_BYTES(8); // In
1350 
1351     // Upper key range bound. Randomly generated node keys will not exceed this
1352     // value.
1353     NvU64                           range_max;                                          // In
1354 
1355     // This parameter is used to control the size of the tree.
1356     // The number of nodes in the tree will bounce between 0 and this limit.
1357     // See uvm_rb_tree_test.c:rbtt_test_get_random_op() for full description.
1358     NvU32                           node_limit;                                         // In
1359     NvU32                           seed;                                               // In
1360 
1361     NV_STATUS                       rmStatus;                                           // Out
1362 } UVM_TEST_RB_TREE_RANDOM_PARAMS;
1363 
1364 #define UVM_TEST_HOST_SANITY                             UVM_TEST_IOCTL_BASE(88)
1365 typedef struct
1366 {
1367     NV_STATUS                       rmStatus;                                           // Out
1368 } UVM_TEST_HOST_SANITY_PARAMS;
1369 
1370 // Calls uvm_va_space_mm_or_current_retain() on a VA space,
1371 // then releases the va_space_mm and returns.
1372 #define UVM_TEST_VA_SPACE_MM_OR_CURRENT_RETAIN           UVM_TEST_IOCTL_BASE(89)
1373 typedef struct
1374 {
1375     // User address of a flag to act as a semaphore. If non-NULL, the address
1376     // is set to 1 after successful retain but before the sleep.
1377     NvU64 retain_done_ptr                                            NV_ALIGN_BYTES(8); // In
1378 
1379     // Approximate duration for which to sleep with the va_space_mm retained.
1380     NvU64 sleep_us                                                   NV_ALIGN_BYTES(8); // In
1381 
1382     // NV_ERR_PAGE_TABLE_NOT_AVAIL  Could not retain va_space_mm
1383     //                              (uvm_va_space_mm_or_current_retain returned
1384     //                              NULL)
1385     NV_STATUS rmStatus;                                                                 // Out
1386 } UVM_TEST_VA_SPACE_MM_OR_CURRENT_RETAIN_PARAMS;
1387 
1388 #define UVM_TEST_GET_USER_SPACE_END_ADDRESS              UVM_TEST_IOCTL_BASE(90)
1389 typedef struct
1390 {
1391     NvU64                           user_space_end_address;                             // Out
1392     NV_STATUS                       rmStatus;                                           // Out
1393 } UVM_TEST_GET_USER_SPACE_END_ADDRESS_PARAMS;
1394 
1395 #define UVM_TEST_GET_CPU_CHUNK_ALLOC_SIZES               UVM_TEST_IOCTL_BASE(91)
1396 typedef struct
1397 {
1398     NvU32                           alloc_size_mask;                                    // Out
1399     NvU32                           rmStatus;                                           // Out
1400 } UVM_TEST_GET_CPU_CHUNK_ALLOC_SIZES_PARAMS;
1401 
1402 // Forces the next range covering the lookup_address to fail in
1403 // uvm_va_range_add_gpu_va_space() with an out-of-memory error. Only the next
1404 // uvm_va_range_add_gpu_va_space() will fail. Subsequent ones will succeed.
1405 //
1406 // Error returns:
1407 // NV_ERR_INVALID_ADDRESS
1408 //  - lookup_address doesn't match a UVM range
1409 #define UVM_TEST_VA_RANGE_INJECT_ADD_GPU_VA_SPACE_ERROR  UVM_TEST_IOCTL_BASE(93)
1410 typedef struct
1411 {
1412     NvU64     lookup_address NV_ALIGN_BYTES(8);          // In
1413     NV_STATUS rmStatus;                                  // Out
1414 } UVM_TEST_VA_RANGE_INJECT_ADD_GPU_VA_SPACE_ERROR_PARAMS;
1415 
1416 // Forces destroy_gpu_va_space() to delay execution. This provides a high
1417 // probability of exercising the race condition between concurrent
1418 // UvmRegisterGpuVaSpace() calls on the same {va_space, gpu} pair in the
1419 // ATS_KERNEL case.
1420 #define UVM_TEST_DESTROY_GPU_VA_SPACE_DELAY              UVM_TEST_IOCTL_BASE(94)
1421 typedef struct
1422 {
1423     NvU64 delay_us;                                      // In
1424     NV_STATUS rmStatus;                                  // Out
1425 } UVM_TEST_DESTROY_GPU_VA_SPACE_DELAY_PARAMS;
1426 
1427 #define UVM_TEST_SEC2_SANITY                             UVM_TEST_IOCTL_BASE(95)
1428 typedef struct
1429 {
1430     NV_STATUS rmStatus;                                  // Out
1431 } UVM_TEST_SEC2_SANITY_PARAMS;
1432 
1433 #define UVM_TEST_CGROUP_ACCOUNTING_SUPPORTED             UVM_TEST_IOCTL_BASE(96)
1434 typedef struct
1435 {
1436     NV_STATUS rmStatus;                                  // Out
1437 } UVM_TEST_CGROUP_ACCOUNTING_SUPPORTED_PARAMS;
1438 
1439 #define UVM_TEST_SPLIT_INVALIDATE_DELAY                  UVM_TEST_IOCTL_BASE(98)
1440 typedef struct
1441 {
1442     NvU64 delay_us;                                      // In
1443     NV_STATUS rmStatus;                                  // Out
1444 } UVM_TEST_SPLIT_INVALIDATE_DELAY_PARAMS;
1445 
1446 // Tests the CSL/SEC2 encryption/decryption methods by doing a secure transfer
1447 // of memory from CPU->GPU and a subsequent GPU->CPU transfer.
1448 #define UVM_TEST_SEC2_CPU_GPU_ROUNDTRIP                  UVM_TEST_IOCTL_BASE(99)
1449 typedef struct
1450 {
1451     NV_STATUS rmStatus;                                  // Out
1452 } UVM_TEST_SEC2_CPU_GPU_ROUNDTRIP_PARAMS;
1453 
1454 #define UVM_TEST_CPU_CHUNK_API                           UVM_TEST_IOCTL_BASE(100)
1455 typedef struct
1456 {
1457     NV_STATUS rmStatus;                                  // Out
1458 } UVM_TEST_CPU_CHUNK_API_PARAMS;
1459 
1460 #define UVM_TEST_FORCE_CPU_TO_CPU_COPY_WITH_CE          UVM_TEST_IOCTL_BASE(101)
1461 typedef struct
1462 {
1463     NvBool force_copy_with_ce;                          // In
1464     NV_STATUS rmStatus;                                 // Out
1465 } UVM_TEST_FORCE_CPU_TO_CPU_COPY_WITH_CE_PARAMS;
1466 
1467 #define UVM_TEST_VA_SPACE_ALLOW_MOVABLE_ALLOCATIONS     UVM_TEST_IOCTL_BASE(102)
1468 typedef struct
1469 {
1470     NvBool allow_movable;                               // In
1471     NV_STATUS rmStatus;                                 // Out
1472 } UVM_TEST_VA_SPACE_ALLOW_MOVABLE_ALLOCATIONS_PARAMS;
1473 
1474 #define UVM_TEST_SKIP_MIGRATE_VMA                        UVM_TEST_IOCTL_BASE(103)
1475 typedef struct
1476 {
1477     NvBool skip;                                         // In
1478     NV_STATUS rmStatus;                                  // Out
1479 } UVM_TEST_SKIP_MIGRATE_VMA_PARAMS;
1480 
1481 #ifdef __cplusplus
1482 }
1483 #endif
1484 
1485 #endif // __UVM_TEST_IOCTL_H__
1486