1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2014-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 //
25 // This file provides common types for both UVM driver and RM's UVM interface.
26 //
27 
28 #ifndef _NV_UVM_TYPES_H_
29 #define _NV_UVM_TYPES_H_
30 
31 #include "nvtypes.h"
32 #include "nvstatus.h"
33 #include "nvgputypes.h"
34 #include "nvCpuUuid.h"
35 
36 
37 //
38 // Default Page Size if left "0" because in RM BIG page size is default & there
39 // are multiple BIG page sizes in RM. These defines are used as flags to "0"
40 // should be OK when user is not sure which pagesize allocation it wants
41 //
42 #define UVM_PAGE_SIZE_DEFAULT    0x0
43 #define UVM_PAGE_SIZE_4K         0x1000
44 #define UVM_PAGE_SIZE_64K        0x10000
45 #define UVM_PAGE_SIZE_128K       0x20000
46 #define UVM_PAGE_SIZE_2M         0x200000
47 #define UVM_PAGE_SIZE_512M       0x20000000
48 
49 //
50 // When modifying flags, make sure they are compatible with the mirrored
51 // PMA_* flags in phys_mem_allocator.h.
52 //
53 // Input flags
54 #define UVM_PMA_ALLOCATE_DONT_EVICT             NVBIT(0)
55 #define UVM_PMA_ALLOCATE_PINNED                 NVBIT(1)
56 #define UVM_PMA_ALLOCATE_SPECIFY_MINIMUM_SPEED  NVBIT(2)
57 #define UVM_PMA_ALLOCATE_SPECIFY_ADDRESS_RANGE  NVBIT(3)
58 #define UVM_PMA_ALLOCATE_SPECIFY_REGION_ID      NVBIT(4)
59 #define UVM_PMA_ALLOCATE_PREFER_SLOWEST         NVBIT(5)
60 #define UVM_PMA_ALLOCATE_CONTIGUOUS             NVBIT(6)
61 #define UVM_PMA_ALLOCATE_PERSISTENT             NVBIT(7)
62 #define UVM_PMA_ALLOCATE_PROTECTED_REGION       NVBIT(8)
63 #define UVM_PMA_ALLOCATE_FORCE_ALIGNMENT        NVBIT(9)
64 #define UVM_PMA_ALLOCATE_NO_ZERO                NVBIT(10)
65 #define UVM_PMA_ALLOCATE_TURN_BLACKLIST_OFF     NVBIT(11)
66 #define UVM_PMA_ALLOCATE_ALLOW_PARTIAL          NVBIT(12)
67 
68 // Output flags
69 #define UVM_PMA_ALLOCATE_RESULT_IS_ZERO         NVBIT(0)
70 
71 // Input flags to pmaFree
72 #define UVM_PMA_FREE_IS_ZERO                    NVBIT(0)
73 
74 //
75 // Indicate that the PMA operation is being done from one of the PMA eviction
76 // callbacks.
77 //
78 // Notably this flag is currently used only by the UVM/RM interface and not
79 // mirrored in PMA.
80 //
81 #define UVM_PMA_CALLED_FROM_PMA_EVICTION        16384
82 
83 #define UVM_UUID_LEN 16
84 #define UVM_SW_OBJ_SUBCHANNEL 5
85 
86 typedef unsigned long long UvmGpuPointer;
87 
88 //
89 // The following typedefs serve to explain the resources they point to.
90 // The actual resources remain RM internal and not exposed.
91 //
92 typedef struct uvmGpuSession_tag       *uvmGpuSessionHandle;       // gpuSessionHandle
93 typedef struct uvmGpuDevice_tag        *uvmGpuDeviceHandle;        // gpuDeviceHandle
94 typedef struct uvmGpuAddressSpace_tag  *uvmGpuAddressSpaceHandle;  // gpuAddressSpaceHandle
95 typedef struct uvmGpuTsg_tag           *uvmGpuTsgHandle;           // gpuTsgHandle
96 typedef struct uvmGpuChannel_tag       *uvmGpuChannelHandle;       // gpuChannelHandle
97 typedef struct uvmGpuCopyEngine_tag    *uvmGpuCopyEngineHandle;    // gpuObjectHandle
98 
99 typedef struct UvmGpuMemoryInfo_tag
100 {
101     // Out: Memory layout.
102     NvU32 kind;
103 
104     // Out: Set to TRUE, if the allocation is in sysmem.
105     NvBool sysmem;
106 
107     // Out: Set to TRUE, if the allocation is a constructed
108     //      under a Device or Subdevice.
109     //      All permutations of sysmem and deviceDescendant are valid.
110     //      !sysmem && !deviceDescendant implies a fabric allocation.
111     NvBool deviceDescendant;
112 
113     // Out: Page size associated with the phys alloc.
114     NvU64 pageSize;
115 
116     // Out: Set to TRUE, if the allocation is contiguous.
117     NvBool contig;
118 
119     // Out: Starting Addr if the allocation is contiguous.
120     //      This is only valid if contig is NV_TRUE.
121     NvU64 physAddr;
122 
123     // Out: Total size of the allocation.
124     NvU64 size;
125 
126     // Out: Uuid of the GPU to which the allocation belongs.
127     //      This is only valid if deviceDescendant is NV_TRUE.
128     //      Note: If the allocation is owned by a device in
129     //      an SLI group and the allocation is broadcast
130     //      across the SLI group, this UUID will be any one
131     //      of the subdevices in the SLI group.
132     NvProcessorUuid uuid;
133 } UvmGpuMemoryInfo;
134 
135 // Some resources must share the same virtual mappings across channels. A mapped
136 // resource must be shared by a channel iff:
137 //
138 // 1) The channel belongs to a TSG (UvmGpuChannelInstanceInfo::bTsgChannel is
139 //    NV_TRUE).
140 //
141 // 2) The channel is in the same TSG as all other channels sharing that mapping
142 //    (UvmGpuChannelInstanceInfo::tsgId matches among channels).
143 //
144 // 3) The channel is in the same GPU address space as the other channels
145 //    sharing that mapping.
146 //
147 // 4) The resource handle(s) match those of the shared mapping
148 //    (UvmGpuChannelResourceInfo::resourceDescriptor and
149 //    UvmGpuChannelResourceInfo::resourceId).
150 typedef struct UvmGpuChannelResourceInfo_tag
151 {
152     // Out: Ptr to the RM memDesc of the channel resource.
153     NvP64 resourceDescriptor;
154 
155     // Out: RM ID of the channel resource.
156     NvU32 resourceId;
157 
158     // Out: Alignment needed for the resource allocation.
159     NvU64 alignment;
160 
161     // Out: Info about the resource allocation.
162     UvmGpuMemoryInfo resourceInfo;
163 } UvmGpuChannelResourceInfo;
164 
165 typedef struct UvmGpuPagingChannelInfo_tag
166 {
167     // Pointer to a shadown buffer mirroring the contents of the error notifier
168     // for the paging channel
169     NvNotification    *shadowErrorNotifier;
170 } UvmGpuPagingChannelInfo;
171 
172 typedef enum
173 {
174     UVM_GPU_CHANNEL_ENGINE_TYPE_GR = 1,
175     UVM_GPU_CHANNEL_ENGINE_TYPE_CE = 2,
176     UVM_GPU_CHANNEL_ENGINE_TYPE_SEC2 = 3,
177 } UVM_GPU_CHANNEL_ENGINE_TYPE;
178 
179 #define UVM_GPU_CHANNEL_MAX_RESOURCES   13
180 
181 typedef struct UvmGpuChannelInstanceInfo_tag
182 {
183     // Out: Starting address of the channel instance.
184     NvU64 base;
185 
186     // Out: Set to NV_TRUE, if the instance is in sysmem.
187     //      Set to NV_FALSE, if the instance is in vidmem.
188     NvBool sysmem;
189 
190     // Out: Hardware runlist ID.
191     NvU32 runlistId;
192 
193     // Out: Hardware channel ID.
194     NvU32 chId;
195 
196     // Out: NV_TRUE if the channel belongs to a subcontext or NV_FALSE if it
197     // belongs to a regular context.
198     NvBool bInSubctx;
199 
200     // Out: ID of the subcontext to which the channel belongs.
201     NvU32 subctxId;
202 
203     // Out: Whether the channel belongs to a TSG or not
204     NvBool bTsgChannel;
205 
206     // Out: ID of the TSG to which the channel belongs
207     NvU32 tsgId;
208 
209     // Out: Maximum number of subcontexts in the TSG to which the channel belongs
210     NvU32 tsgMaxSubctxCount;
211 
212     // Out: Info of channel resources associated with the channel.
213     UvmGpuChannelResourceInfo resourceInfo[UVM_GPU_CHANNEL_MAX_RESOURCES];
214 
215     // Out: Number of valid entries in resourceInfo array.
216     NvU32 resourceCount;
217 
218     // Out: Type of the engine the channel is bound to
219     NvU32 channelEngineType;
220 
221     // Out: Channel handle to be used in the CLEAR_FAULTED method
222     NvU32 clearFaultedToken;
223 
224     // Out: Address of the NV_CHRAM_CHANNEL register required to clear the
225     // ENG_FAULTED/PBDMA_FAULTED bits after servicing non-replayable faults on
226     // Ampere+ GPUs
227     volatile NvU32 *pChramChannelRegister;
228 
229     // Out: Address of the Runlist PRI Base Register required to ring the
230     // doorbell after clearing the faulted bit.
231     volatile NvU32 *pRunlistPRIBaseRegister;
232 
233     // Out: SMC engine id to which the GR channel is bound, or zero if the GPU
234     // does not support SMC or it is a CE channel
235     NvU32 smcEngineId;
236 
237     // Out: Start of the VEID range assigned to the SMC engine the GR channel
238     // is bound to, or zero if the GPU does not support SMC or it is a CE
239     // channel
240     NvU32 smcEngineVeIdOffset;
241 } UvmGpuChannelInstanceInfo;
242 
243 typedef struct UvmGpuChannelResourceBindParams_tag
244 {
245     // In: RM ID of the channel resource.
246     NvU32 resourceId;
247 
248     // In: Starting VA at which the channel resource is mapped.
249     NvU64 resourceVa;
250 } UvmGpuChannelResourceBindParams;
251 
252 typedef struct UvmGpuChannelInfo_tag
253 {
254     volatile unsigned *gpGet;
255     volatile unsigned *gpPut;
256     UvmGpuPointer     *gpFifoEntries;
257     unsigned           numGpFifoEntries;
258     unsigned           channelClassNum;
259 
260     // The errorNotifier is filled out when the channel hits an RC error.
261     NvNotification    *errorNotifier;
262 
263     NvU32              hwRunlistId;
264     NvU32              hwChannelId;
265 
266     volatile unsigned *dummyBar1Mapping;
267 
268     // These values are filled by nvUvmInterfaceCopyEngineAlloc. The work
269     // submission token requires the channel to be bound to a runlist and that
270     // happens after CE allocation.
271     volatile NvU32    *workSubmissionOffset;
272 
273     // To be deprecated. See pWorkSubmissionToken below.
274     NvU32              workSubmissionToken;
275 
276     //
277     // This is the memory location where the most recently updated work
278     // submission token for this channel will be written to. After submitting
279     // new work and updating GP_PUT with the appropriate fence, the token must
280     // be read from this location before writing it to the workSubmissionOffset
281     // to kick off the new work.
282     //
283     volatile NvU32    *pWorkSubmissionToken;
284 
285     // GPU VAs of both GPFIFO and GPPUT are needed in Confidential Computing
286     // so a channel can be controlled via another channel (SEC2 or WLC/LCIC)
287     NvU64             gpFifoGpuVa;
288     NvU64             gpPutGpuVa;
289     // GPU VA of work submission offset is needed in Confidential Computing
290     // so CE channels can ring doorbell of other channels as required for
291     // WLC/LCIC work submission
292     NvU64             workSubmissionOffsetGpuVa;
293 } UvmGpuChannelInfo;
294 
295 typedef enum
296 {
297     // This value must be passed by Pascal and pre-Pascal GPUs for those
298     // allocations for which a specific location cannot be enforced.
299     UVM_BUFFER_LOCATION_DEFAULT = 0,
300 
301     UVM_BUFFER_LOCATION_SYS  = 1,
302     UVM_BUFFER_LOCATION_VID  = 2,
303 } UVM_BUFFER_LOCATION;
304 
305 typedef struct UvmGpuTsgAllocParams_tag
306 {
307     // Interpreted as UVM_GPU_CHANNEL_ENGINE_TYPE
308     NvU32 engineType;
309 
310     // Index of the engine the TSG is bound to.
311     // Ignored if engineType is anything other than
312     // UVM_GPU_CHANNEL_ENGINE_TYPE_CE.
313     NvU32 engineIndex;
314 } UvmGpuTsgAllocParams;
315 
316 typedef struct UvmGpuChannelAllocParams_tag
317 {
318     NvU32 numGpFifoEntries;
319 
320     // The next two fields store UVM_BUFFER_LOCATION values
321     NvU32 gpFifoLoc;
322     NvU32 gpPutLoc;
323 
324     // Allocate the channel as secure. This flag should only be set when
325     // Confidential Compute is enabled.
326     NvBool secure;
327 } UvmGpuChannelAllocParams;
328 
329 typedef struct UvmGpuPagingChannelAllocParams_tag
330 {
331     // Index of the LCE engine the channel will be bound to, a zero-based offset
332     // from NV2080_ENGINE_TYPE_COPY0.
333     NvU32 engineIndex;
334 } UvmGpuPagingChannelAllocParams;
335 
336 // The max number of Copy Engines supported by a GPU.
337 // The gpu ops build has a static assert that this is the correct number.
338 #define UVM_COPY_ENGINE_COUNT_MAX 10
339 
340 typedef struct
341 {
342     // True if the CE is supported at all
343     NvBool supported:1;
344 
345     // True if the CE is synchronous with GR
346     NvBool grce:1;
347 
348     // True if the CE shares physical CEs with any other CE
349     //
350     // The value returned by RM for this field may change when a GPU is
351     // registered with RM for the first time, so UVM needs to query it
352     // again each time a GPU is registered.
353     NvBool shared:1;
354 
355     // True if the CE can give enhanced performance for SYSMEM reads over other CEs
356     NvBool sysmemRead:1;
357 
358     // True if the CE can give enhanced performance for SYSMEM writes over other CEs
359     NvBool sysmemWrite:1;
360 
361     // True if the CE can be used for SYSMEM transactions
362     NvBool sysmem:1;
363 
364     // True if the CE can be used for P2P transactions using NVLINK
365     NvBool nvlinkP2p:1;
366 
367     // True if the CE can be used for P2P transactions
368     NvBool p2p:1;
369 
370     // True if the CE supports encryption
371     NvBool secure:1;
372 
373     // Mask of physical CEs assigned to this LCE
374     //
375     // The value returned by RM for this field may change when a GPU is
376     // registered with RM for the first time, so UVM needs to query it
377     // again each time a GPU is registered.
378     NvU32 cePceMask;
379 } UvmGpuCopyEngineCaps;
380 
381 typedef struct UvmGpuCopyEnginesCaps_tag
382 {
383     // Supported CEs may not be contiguous
384     UvmGpuCopyEngineCaps copyEngineCaps[UVM_COPY_ENGINE_COUNT_MAX];
385 } UvmGpuCopyEnginesCaps;
386 
387 typedef enum
388 {
389     UVM_LINK_TYPE_NONE,
390     UVM_LINK_TYPE_PCIE,
391     UVM_LINK_TYPE_NVLINK_1,
392     UVM_LINK_TYPE_NVLINK_2,
393     UVM_LINK_TYPE_NVLINK_3,
394     UVM_LINK_TYPE_NVLINK_4,
395     UVM_LINK_TYPE_C2C,
396 } UVM_LINK_TYPE;
397 
398 typedef struct UvmGpuCaps_tag
399 {
400     // If numaEnabled is NV_TRUE, then the system address of allocated GPU
401     // memory can be converted to struct pages. See
402     // UvmGpuInfo::systemMemoryWindowStart.
403     NvBool   numaEnabled;
404     NvU32    numaNodeId;
405 } UvmGpuCaps;
406 
407 typedef struct UvmGpuAddressSpaceInfo_tag
408 {
409     NvU64           bigPageSize;
410 
411     NvBool          atsEnabled;
412 
413     // Mapped registers that contain the current GPU time
414     volatile NvU32  *time0Offset;
415     volatile NvU32  *time1Offset;
416 
417     // Maximum number of subcontexts supported under this GPU address space
418     NvU32           maxSubctxCount;
419 
420     NvBool          smcEnabled;
421 
422     NvU32           smcSwizzId;
423 
424     NvU32           smcGpcCount;
425 } UvmGpuAddressSpaceInfo;
426 
427 typedef struct UvmGpuAllocInfo_tag
428 {
429     NvU64   gpuPhysOffset;          // Returns gpuPhysOffset if contiguous requested
430     NvU64   pageSize;               // default is RM big page size - 64K or 128 K" else use 4K or 2M
431     NvU64   alignment;              // Virtual alignment
432     NvBool  bContiguousPhysAlloc;   // Flag to request contiguous physical allocation
433     NvBool  bMemGrowsDown;          // Causes RM to reserve physical heap from top of FB
434     NvBool  bPersistentVidmem;      // Causes RM to allocate persistent video memory
435     NvHandle hPhysHandle;           // Handle for phys allocation either provided or retrieved
436     NvBool   bUnprotected;            // Allocation to be made in unprotected memory whenever
437                                       // SEV or GPU CC modes are enabled. Ignored otherwise
438 } UvmGpuAllocInfo;
439 
440 typedef enum
441 {
442     UVM_VIRT_MODE_NONE = 0,             // Baremetal or passthrough virtualization
443     UVM_VIRT_MODE_LEGACY = 1,           // Virtualization without SRIOV support
444     UVM_VIRT_MODE_SRIOV_HEAVY = 2,      // Virtualization with SRIOV Heavy configured
445     UVM_VIRT_MODE_SRIOV_STANDARD = 3,   // Virtualization with SRIOV Standard configured
446     UVM_VIRT_MODE_COUNT = 4,
447 } UVM_VIRT_MODE;
448 
449 // !!! The following enums (with UvmRm prefix) are defined and documented in
450 // mm/uvm/interface/uvm_types.h and must be mirrored. Please refer to that file
451 // for more details.
452 
453 // UVM GPU mapping types
454 typedef enum
455 {
456     UvmRmGpuMappingTypeDefault = 0,
457     UvmRmGpuMappingTypeReadWriteAtomic = 1,
458     UvmRmGpuMappingTypeReadWrite = 2,
459     UvmRmGpuMappingTypeReadOnly = 3,
460     UvmRmGpuMappingTypeCount = 4
461 } UvmRmGpuMappingType;
462 
463 // UVM GPU caching types
464 typedef enum
465 {
466     UvmRmGpuCachingTypeDefault = 0,
467     UvmRmGpuCachingTypeForceUncached = 1,
468     UvmRmGpuCachingTypeForceCached = 2,
469     UvmRmGpuCachingTypeCount = 3
470 } UvmRmGpuCachingType;
471 
472 // UVM GPU format types
473 typedef enum {
474    UvmRmGpuFormatTypeDefault = 0,
475    UvmRmGpuFormatTypeBlockLinear = 1,
476    UvmRmGpuFormatTypeCount = 2
477 } UvmRmGpuFormatType;
478 
479 // UVM GPU Element bits types
480 typedef enum {
481    UvmRmGpuFormatElementBitsDefault = 0,
482    UvmRmGpuFormatElementBits8 = 1,
483    UvmRmGpuFormatElementBits16 = 2,
484    // Cuda does not support 24-bit width
485    UvmRmGpuFormatElementBits32 = 4,
486    UvmRmGpuFormatElementBits64 = 5,
487    UvmRmGpuFormatElementBits128 = 6,
488    UvmRmGpuFormatElementBitsCount = 7
489 } UvmRmGpuFormatElementBits;
490 
491 // UVM GPU Compression types
492 typedef enum {
493     UvmRmGpuCompressionTypeDefault = 0,
494     UvmRmGpuCompressionTypeEnabledNoPlc = 1,
495     UvmRmGpuCompressionTypeCount = 2
496 } UvmRmGpuCompressionType;
497 
498 typedef struct UvmGpuExternalMappingInfo_tag
499 {
500     // In: GPU caching ability.
501     UvmRmGpuCachingType cachingType;
502 
503     // In: Virtual permissions.
504     UvmRmGpuMappingType mappingType;
505 
506     // In: RM virtual mapping memory format
507     UvmRmGpuFormatType formatType;
508 
509     // In: RM virtual mapping element bits
510     UvmRmGpuFormatElementBits elementBits;
511 
512     // In: RM virtual compression type
513     UvmRmGpuCompressionType compressionType;
514 
515     // In: Size of the buffer to store PTEs (in bytes).
516     NvU64 pteBufferSize;
517 
518     // In: Page size for mapping
519     //     If this field is passed as 0, the page size
520     //     of the allocation is used for mapping.
521     //     nvUvmInterfaceGetExternalAllocPtes must pass
522     //     this field as zero.
523     NvU64 mappingPageSize;
524 
525     // In: Pointer to a buffer to store PTEs.
526     // Out: The interface will fill the buffer with PTEs
527     NvU64 *pteBuffer;
528 
529     // Out: Number of PTEs filled in to the buffer.
530     NvU64 numWrittenPtes;
531 
532     // Out: Number of PTEs remaining to be filled
533     //      if the buffer is not sufficient to accommodate
534     //      requested PTEs.
535     NvU64 numRemainingPtes;
536 
537     // Out: PTE size (in bytes)
538     NvU32 pteSize;
539 } UvmGpuExternalMappingInfo;
540 
541 typedef struct UvmGpuP2PCapsParams_tag
542 {
543     // Out: peerId[i] contains gpu[i]'s peer id of gpu[1 - i]. Only defined if
544     // the GPUs are direct peers.
545     NvU32 peerIds[2];
546 
547     // Out: UVM_LINK_TYPE
548     NvU32 p2pLink;
549 
550     // Out: optimalNvlinkWriteCEs[i] contains gpu[i]'s optimal CE for writing to
551     // gpu[1 - i]. The CE indexes are valid only if the GPUs are NVLink peers.
552     //
553     // The value returned by RM for this field may change when a GPU is
554     // registered with RM for the first time, so UVM needs to query it again
555     // each time a GPU is registered.
556     NvU32 optimalNvlinkWriteCEs[2];
557 
558     // Out: Maximum unidirectional bandwidth between the peers in megabytes per
559     // second, not taking into account the protocols overhead. The reported
560     // bandwidth for indirect peers is zero.
561     NvU32 totalLinkLineRateMBps;
562 
563     // Out: True if the peers have a indirect link to communicate. On P9
564     // systems, this is true if peers are connected to different NPUs that
565     // forward the requests between them.
566     NvU32 indirectAccess      : 1;
567 } UvmGpuP2PCapsParams;
568 
569 // Platform-wide information
570 typedef struct UvmPlatformInfo_tag
571 {
572     // Out: ATS (Address Translation Services) is supported
573     NvBool atsSupported;
574 
575     // Out: AMD SEV (Secure Encrypted Virtualization) is enabled
576     NvBool sevEnabled;
577 } UvmPlatformInfo;
578 
579 typedef struct UvmGpuClientInfo_tag
580 {
581     NvHandle hClient;
582 
583     NvHandle hSmcPartRef;
584 } UvmGpuClientInfo;
585 
586 typedef enum
587 {
588     UVM_GPU_CONF_COMPUTE_MODE_NONE,
589     UVM_GPU_CONF_COMPUTE_MODE_APM,
590     UVM_GPU_CONF_COMPUTE_MODE_HCC,
591     UVM_GPU_CONF_COMPUTE_MODE_COUNT
592 } UvmGpuConfComputeMode;
593 
594 typedef struct UvmGpuConfComputeCaps_tag
595 {
596     // Out: GPU's confidential compute mode
597     UvmGpuConfComputeMode mode;
598 } UvmGpuConfComputeCaps;
599 
600 #define UVM_GPU_NAME_LENGTH 0x40
601 
602 typedef struct UvmGpuInfo_tag
603 {
604     // Printable gpu name
605     char name[UVM_GPU_NAME_LENGTH];
606 
607     // Uuid of this gpu
608     NvProcessorUuid uuid;
609 
610     // Gpu architecture; NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_*
611     NvU32 gpuArch;
612 
613     // Gpu implementation; NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_*
614     NvU32 gpuImplementation;
615 
616     // Host (gpfifo) class; *_CHANNEL_GPFIFO_*, e.g. KEPLER_CHANNEL_GPFIFO_A
617     NvU32 hostClass;
618 
619     // Copy engine (dma) class; *_DMA_COPY_*, e.g. KEPLER_DMA_COPY_A
620     NvU32 ceClass;
621 
622     // Compute class; *_COMPUTE_*, e.g. KEPLER_COMPUTE_A
623     NvU32 computeClass;
624 
625     // Set if GPU supports TCC Mode & is in TCC mode.
626     NvBool gpuInTcc;
627 
628     // Number of subdevices in SLI group.
629     NvU32 subdeviceCount;
630 
631     // Virtualization mode of this gpu.
632     NvU32 virtMode;         // UVM_VIRT_MODE
633 
634     // NV_TRUE if this is a simulated/emulated GPU. NV_FALSE, otherwise.
635     NvBool isSimulated;
636 
637     // Number of GPCs
638     // If SMC is enabled, this is the currently configured number of GPCs for
639     // the given partition (also see the smcSwizzId field below).
640     NvU32 gpcCount;
641 
642     // Maximum number of GPCs; NV_SCAL_LITTER_NUM_GPCS
643     // This number is independent of the partition configuration, and can be
644     // used to conservatively size GPU-global constructs.
645     NvU32 maxGpcCount;
646 
647     // Number of TPCs
648     NvU32 tpcCount;
649 
650     // Maximum number of TPCs per GPC
651     NvU32 maxTpcPerGpcCount;
652 
653     // NV_TRUE if SMC is enabled on this GPU.
654     NvBool smcEnabled;
655 
656     // SMC partition ID (unique per GPU); note: valid when first looked up in
657     // nvUvmInterfaceGetGpuInfo(), but not guaranteed to remain valid.
658     // nvUvmInterfaceDeviceCreate() re-verifies the swizzId and fails if it is
659     // no longer valid.
660     NvU32 smcSwizzId;
661 
662     UvmGpuClientInfo smcUserClientInfo;
663 
664     // Confidential Compute capabilities of this GPU
665     UvmGpuConfComputeCaps gpuConfComputeCaps;
666 
667     // UVM_LINK_TYPE
668     NvU32 sysmemLink;
669 
670     // See UvmGpuP2PCapsParams::totalLinkLineRateMBps
671     NvU32 sysmemLinkRateMBps;
672 
673     // On coherent systems each GPU maps its memory to a window in the System
674     // Physical Address (SPA) space. The following fields describe that window.
675     //
676     // systemMemoryWindowSize > 0 indicates that the window is valid. meaning
677     // that GPU memory can be mapped by the CPU as cache-coherent by adding the
678     // GPU address to the window start.
679     NvU64 systemMemoryWindowStart;
680     NvU64 systemMemoryWindowSize;
681 
682     // This tells if the GPU is connected to NVSwitch. On systems with NVSwitch
683     // all GPUs are connected to it. If connectedToSwitch is NV_TRUE,
684     // nvswitchMemoryWindowStart tells the base address for the GPU in the
685     // NVSwitch address space. It is used when creating PTEs of memory mappings
686     // to NVSwitch peers.
687     NvBool connectedToSwitch;
688     NvU64 nvswitchMemoryWindowStart;
689 } UvmGpuInfo;
690 
691 typedef struct UvmGpuFbInfo_tag
692 {
693     // Max physical address that can be allocated by UVM. This excludes internal
694     // RM regions that are not registered with PMA either.
695     NvU64 maxAllocatableAddress;
696 
697     NvU32 heapSize;         // RAM in KB available for user allocations
698     NvU32 reservedHeapSize; // RAM in KB reserved for internal RM allocation
699     NvBool bZeroFb;         // Zero FB mode enabled.
700 } UvmGpuFbInfo;
701 
702 typedef struct UvmGpuEccInfo_tag
703 {
704     unsigned eccMask;
705     unsigned eccOffset;
706     void    *eccReadLocation;
707     NvBool  *eccErrorNotifier;
708     NvBool   bEccEnabled;
709 } UvmGpuEccInfo;
710 
711 typedef struct UvmPmaAllocationOptions_tag
712 {
713     NvU32 flags;
714     NvU32 minimumSpeed;         // valid if flags & UVM_PMA_ALLOCATE_SPECIFY_MININUM_SPEED
715     NvU64 physBegin, physEnd;   // valid if flags & UVM_PMA_ALLOCATE_SPECIFY_ADDRESS_RANGE
716     NvU32 regionId;             // valid if flags & UVM_PMA_ALLOCATE_SPECIFY_REGION_ID
717     NvU64 alignment;            // valid if flags & UVM_PMA_ALLOCATE_FORCE_ALIGNMENT
718     NvLength numPagesAllocated; // valid if flags & UVM_PMA_ALLOCATE_ALLOW_PARTIAL
719 
720     NvU32 resultFlags;          // valid if the allocation function returns NV_OK
721 } UvmPmaAllocationOptions;
722 
723 //
724 // Mirrored in PMA (PMA_STATS)
725 //
726 typedef struct UvmPmaStatistics_tag
727 {
728     volatile NvU64 numPages2m;                // PMA-wide 2MB pages count across all regions
729     volatile NvU64 numFreePages64k;           // PMA-wide free 64KB page count across all regions
730     volatile NvU64 numFreePages2m;            // PMA-wide free 2MB pages count across all regions
731     volatile NvU64 numPages2mProtected;       // PMA-wide 2MB pages count in protected memory
732     volatile NvU64 numFreePages64kProtected;  // PMA-wide free 64KB page count in protected memory
733     volatile NvU64 numFreePages2mProtected;   // PMA-wide free 2MB pages count in protected memory
734 } UvmPmaStatistics;
735 
736 /*******************************************************************************
737     uvmEventSuspend
738     This function will be called by the GPU driver to signal to UVM that the
739     system is about to enter a sleep state.  When it is called, the
740     following assumptions/guarantees are valid/made:
741 
742       * User channels have been preempted and disabled
743       * UVM channels are still running normally and will continue to do
744         so until after this function returns control
745       * User threads are still running, but can no longer issue system
746         system calls to the GPU driver
747       * Until exit from this function, UVM is allowed to make full use of
748         the GPUs under its control, as well as of the GPU driver
749 
750     Upon return from this function, UVM may not access GPUs under its control
751     until the GPU driver calls uvmEventResume().  It may still receive
752     calls to uvmEventIsrTopHalf() during this time, which it should return
753     NV_ERR_NO_INTR_PENDING from.  It will not receive any other calls.
754 */
755 typedef NV_STATUS (*uvmEventSuspend_t) (void);
756 
757 /*******************************************************************************
758     uvmEventResume
759     This function will be called by the GPU driver to signal to UVM that the
760     system has exited a previously entered sleep state.  When it is called,
761     the following assumptions/guarantees are valid/made:
762 
763       * UVM is again allowed to make full use of the GPUs under its
764         control, as well as of the GPU driver
765       * UVM channels are running normally
766       * User channels are still preempted and disabled
767       * User threads are again running, but still cannot issue system
768         calls to the GPU driver, nor submit new work
769 
770     Upon return from this function, UVM is expected to be fully functional.
771 */
772 typedef NV_STATUS (*uvmEventResume_t) (void);
773 
774 /*******************************************************************************
775     uvmEventStartDevice
776     This function will be called by the GPU driver once it has finished its
777     initialization to tell the UVM driver that this GPU has come up.
778 */
779 typedef NV_STATUS (*uvmEventStartDevice_t) (const NvProcessorUuid *pGpuUuidStruct);
780 
781 /*******************************************************************************
782     uvmEventStopDevice
783     This function will be called by the GPU driver to let UVM know that a GPU
784     is going down.
785 */
786 typedef NV_STATUS (*uvmEventStopDevice_t) (const NvProcessorUuid *pGpuUuidStruct);
787 
788 /*******************************************************************************
789     uvmEventIsrTopHalf_t
790     This function will be called by the GPU driver to let UVM know
791     that an interrupt has occurred.
792 
793     Returns:
794         NV_OK if the UVM driver handled the interrupt
795         NV_ERR_NO_INTR_PENDING if the interrupt is not for the UVM driver
796 */
797 #if defined (__linux__)
798 typedef NV_STATUS (*uvmEventIsrTopHalf_t) (const NvProcessorUuid *pGpuUuidStruct);
799 #else
800 typedef void (*uvmEventIsrTopHalf_t) (void);
801 #endif
802 
803 struct UvmOpsUvmEvents
804 {
805     uvmEventSuspend_t     suspend;
806     uvmEventResume_t      resume;
807     uvmEventStartDevice_t startDevice;
808     uvmEventStopDevice_t  stopDevice;
809     uvmEventIsrTopHalf_t  isrTopHalf;
810 };
811 
812 #define UVM_CSL_SIGN_AUTH_TAG_SIZE_BYTES 32
813 #define UVM_CSL_CRYPT_AUTH_TAG_SIZE_BYTES 16
814 
815 typedef union UvmFaultMetadataPacket_tag
816 {
817     struct {
818         NvU8   authTag[UVM_CSL_CRYPT_AUTH_TAG_SIZE_BYTES];
819         NvBool valid;
820     };
821     // padding to 32Bytes
822     NvU8 _padding[32];
823 } UvmFaultMetadataPacket;
824 
825 typedef struct UvmGpuFaultInfo_tag
826 {
827     struct
828     {
829         // Fault buffer GET register mapping.
830         //
831         // When Confidential Computing is enabled, GET refers to the shadow
832         // buffer (see bufferAddress below), and not to the actual HW buffer.
833         // In this setup, writes of GET (by UVM) do not result on re-evaluation
834         // of any interrupt condition.
835         volatile NvU32* pFaultBufferGet;
836 
837         // Fault buffer PUT register mapping.
838         //
839         // When Confidential Computing is enabled, PUT refers to the shadow
840         // buffer (see bufferAddress below), and not to the actual HW buffer.
841         // In this setup, writes of PUT (by GSP-RM) do not result on
842         // re-evaluation of any interrupt condition.
843         volatile NvU32* pFaultBufferPut;
844 
845         // Note: this variable is deprecated since buffer overflow is not a
846         // separate register from future chips.
847         volatile NvU32* pFaultBufferInfo;
848 
849         // Register mapping used to clear a replayable fault interrupt in
850         // Turing+ GPUs.
851         volatile NvU32* pPmcIntr;
852 
853         // Register mapping used to enable replayable fault interrupts.
854         volatile NvU32* pPmcIntrEnSet;
855 
856         // Register mapping used to disable replayable fault interrupts.
857         volatile NvU32* pPmcIntrEnClear;
858 
859         // Register used to enable, or disable, faults on prefetches.
860         volatile NvU32* pPrefetchCtrl;
861 
862         // Replayable fault interrupt mask identifier.
863         NvU32 replayableFaultMask;
864 
865         // Fault buffer CPU mapping
866         void*  bufferAddress;
867         //
868         // When Confidential Computing is disabled, the mapping points to the
869         // actual HW fault buffer.
870         //
871         // When Confidential Computing is enabled, the mapping points to a
872         // copy of the HW fault buffer. This "shadow buffer" is maintained
873         // by GSP-RM.
874 
875         // Size, in bytes, of the fault buffer pointed by bufferAddress.
876         NvU32  bufferSize;
877         // Mapping pointing to the start of the fault buffer metadata containing
878         // a 16Byte authentication tag and a valid byte. Always NULL when
879         // Confidential Computing is disabled.
880         UvmFaultMetadataPacket *bufferMetadata;
881 
882         // Indicates whether UVM owns the replayable fault buffer.
883         // The value of this field is always NV_TRUE When Confidential Computing
884         // is disabled.
885         NvBool bUvmOwnsHwFaultBuffer;
886     } replayable;
887     struct
888     {
889         // Shadow buffer for non-replayable faults on cpu memory. Resman copies
890         // here the non-replayable faults that need to be handled by UVM
891         void* shadowBufferAddress;
892 
893         // Execution context for the queue associated with the fault buffer
894         void* shadowBufferContext;
895 
896         // Fault buffer size
897         NvU32  bufferSize;
898 
899         // Preallocated stack for functions called from the UVM isr top half
900         void *isr_sp;
901 
902         // Preallocated stack for functions called from the UVM isr bottom half
903         void *isr_bh_sp;
904 
905         // Used only when Hopper Confidential Compute is enabled
906         // Register mappings obtained from RM
907         volatile NvU32* pFaultBufferPut;
908 
909         // Used only when Hopper Confidential Compute is enabled
910         // Cached get index of the non-replayable shadow buffer
911         NvU32 shadowBufferGet;
912 
913         // See replayable.bufferMetadata
914         UvmFaultMetadataPacket  *shadowBufferMetadata;
915     } nonReplayable;
916     NvHandle faultBufferHandle;
917     struct Device *pDevice;
918 } UvmGpuFaultInfo;
919 
920 struct Device;
921 
922 typedef struct UvmGpuPagingChannel_tag
923 {
924     struct gpuDevice  *device;
925     NvNotification    *errorNotifier;
926     NvHandle          channelHandle;
927     NvHandle          errorNotifierHandle;
928     void              *pushStreamSp;
929     struct Device     *pDevice;
930 } UvmGpuPagingChannel, *UvmGpuPagingChannelHandle;
931 
932 typedef struct UvmGpuAccessCntrInfo_tag
933 {
934     // Register mappings obtained from RM
935     // pointer to the Get register for the access counter buffer
936     volatile NvU32* pAccessCntrBufferGet;
937     // pointer to the Put register for the access counter buffer
938     volatile NvU32* pAccessCntrBufferPut;
939     // pointer to the Full register for the access counter buffer
940     volatile NvU32* pAccessCntrBufferFull;
941     // pointer to the hub interrupt
942     volatile NvU32* pHubIntr;
943     // pointer to interrupt enable register
944     volatile NvU32* pHubIntrEnSet;
945     // pointer to interrupt disable register
946     volatile NvU32* pHubIntrEnClear;
947     // mask for the access counter buffer
948     NvU32 accessCounterMask;
949     // access counter buffer cpu mapping and size
950     void* bufferAddress;
951     NvU32  bufferSize;
952     NvHandle accessCntrBufferHandle;
953 } UvmGpuAccessCntrInfo;
954 
955 typedef enum
956 {
957     UVM_ACCESS_COUNTER_GRANULARITY_64K = 1,
958     UVM_ACCESS_COUNTER_GRANULARITY_2M  = 2,
959     UVM_ACCESS_COUNTER_GRANULARITY_16M = 3,
960     UVM_ACCESS_COUNTER_GRANULARITY_16G = 4,
961 } UVM_ACCESS_COUNTER_GRANULARITY;
962 
963 typedef enum
964 {
965     UVM_ACCESS_COUNTER_USE_LIMIT_NONE = 1,
966     UVM_ACCESS_COUNTER_USE_LIMIT_QTR  = 2,
967     UVM_ACCESS_COUNTER_USE_LIMIT_HALF = 3,
968     UVM_ACCESS_COUNTER_USE_LIMIT_FULL = 4,
969 } UVM_ACCESS_COUNTER_USE_LIMIT;
970 
971 typedef struct UvmGpuAccessCntrConfig_tag
972 {
973     NvU32 mimcGranularity;
974 
975     NvU32 momcGranularity;
976 
977     NvU32 mimcUseLimit;
978 
979     NvU32 momcUseLimit;
980 
981     NvU32 threshold;
982 } UvmGpuAccessCntrConfig;
983 
984 //
985 // When modifying this enum, make sure they are compatible with the mirrored
986 // MEMORY_PROTECTION enum in phys_mem_allocator.h.
987 //
988 typedef enum UvmPmaGpuMemoryType_tag
989 {
990     UVM_PMA_GPU_MEMORY_TYPE_UNPROTECTED = 0,
991     UVM_PMA_GPU_MEMORY_TYPE_PROTECTED   = 1
992 } UVM_PMA_GPU_MEMORY_TYPE;
993 
994 typedef UvmGpuChannelInfo gpuChannelInfo;
995 typedef UvmGpuTsgAllocParams gpuTsgAllocParams;
996 typedef UvmGpuChannelAllocParams gpuChannelAllocParams;
997 typedef UvmGpuCaps gpuCaps;
998 typedef UvmGpuCopyEngineCaps gpuCeCaps;
999 typedef UvmGpuCopyEnginesCaps gpuCesCaps;
1000 typedef UvmGpuP2PCapsParams getP2PCapsParams;
1001 typedef UvmGpuAddressSpaceInfo gpuAddressSpaceInfo;
1002 typedef UvmGpuAllocInfo gpuAllocInfo;
1003 typedef UvmGpuInfo gpuInfo;
1004 typedef UvmGpuClientInfo gpuClientInfo;
1005 typedef UvmGpuAccessCntrInfo gpuAccessCntrInfo;
1006 typedef UvmGpuAccessCntrConfig gpuAccessCntrConfig;
1007 typedef UvmGpuFaultInfo gpuFaultInfo;
1008 typedef UvmGpuMemoryInfo gpuMemoryInfo;
1009 typedef UvmGpuExternalMappingInfo gpuExternalMappingInfo;
1010 typedef UvmGpuChannelResourceInfo gpuChannelResourceInfo;
1011 typedef UvmGpuChannelInstanceInfo gpuChannelInstanceInfo;
1012 typedef UvmGpuChannelResourceBindParams gpuChannelResourceBindParams;
1013 typedef UvmGpuFbInfo gpuFbInfo;
1014 typedef UvmGpuEccInfo gpuEccInfo;
1015 typedef UvmGpuPagingChannel *gpuPagingChannelHandle;
1016 typedef UvmGpuPagingChannelInfo gpuPagingChannelInfo;
1017 typedef UvmGpuPagingChannelAllocParams gpuPagingChannelAllocParams;
1018 typedef UvmPmaAllocationOptions gpuPmaAllocationOptions;
1019 
1020 // This struct shall not be accessed nor modified directly by UVM as it is
1021 // entirely managed by the RM layer
1022 typedef struct UvmCslContext_tag
1023 {
1024     struct ccslContext_t *ctx;
1025     void *nvidia_stack;
1026 } UvmCslContext;
1027 
1028 typedef struct UvmCslIv
1029 {
1030     NvU8 iv[12];
1031     NvU8 fresh;
1032 } UvmCslIv;
1033 
1034 typedef enum UvmCslDirection
1035 {
1036     UVM_CSL_DIR_CPU_TO_GPU,
1037     UVM_CSL_DIR_GPU_TO_CPU
1038 } UvmCslDirection;
1039 
1040 #endif // _NV_UVM_TYPES_H_
1041