1 /*******************************************************************************
2     Copyright (c) 2020-2023 NVIDIA Corporation
3 
4     Permission is hereby granted, free of charge, to any person obtaining a copy
5     of this software and associated documentation files (the "Software"), to
6     deal in the Software without restriction, including without limitation the
7     rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
8     sell copies of the Software, and to permit persons to whom the Software is
9     furnished to do so, subject to the following conditions:
10 
11         The above copyright notice and this permission notice shall be
12         included in all copies or substantial portions of the Software.
13 
14     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15     IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16     FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17     THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18     LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20     DEALINGS IN THE SOFTWARE.
21 
22 *******************************************************************************/
23 
24 // For Hopper, the UVM page tree 'depth' maps to hardware as follows:
25 //
26 // UVM depth   HW level                            VA bits
27 // 0           PDE4                                56:56
28 // 1           PDE3                                55:47
29 // 2           PDE2                                46:38
30 // 3           PDE1 (or 512M PTE)                  37:29
31 // 4           PDE0 (dual 64k/4k PDE, or 2M PTE)   28:21
32 // 5           PTE_64K / PTE_4K                    20:16 / 20:12
33 
34 #include "uvm_types.h"
35 #include "uvm_global.h"
36 #include "uvm_hal.h"
37 #include "uvm_hal_types.h"
38 #include "uvm_hopper_fault_buffer.h"
39 #include "hwref/hopper/gh100/dev_fault.h"
40 #include "hwref/hopper/gh100/dev_mmu.h"
41 
42 #define MMU_BIG 0
43 #define MMU_SMALL 1
44 
45 uvm_mmu_engine_type_t uvm_hal_hopper_mmu_engine_id_to_type(NvU16 mmu_engine_id)
46 {
47     if (mmu_engine_id >= NV_PFAULT_MMU_ENG_ID_HOST0 && mmu_engine_id <= NV_PFAULT_MMU_ENG_ID_HOST44)
48         return UVM_MMU_ENGINE_TYPE_HOST;
49 
50     if (mmu_engine_id >= NV_PFAULT_MMU_ENG_ID_CE0 && mmu_engine_id <= NV_PFAULT_MMU_ENG_ID_CE9)
51         return UVM_MMU_ENGINE_TYPE_CE;
52 
53     // We shouldn't be servicing faults from any other engines
54     UVM_ASSERT_MSG(mmu_engine_id >= NV_PFAULT_MMU_ENG_ID_GRAPHICS, "Unexpected engine ID: 0x%x\n", mmu_engine_id);
55 
56     return UVM_MMU_ENGINE_TYPE_GRAPHICS;
57 }
58 
59 static NvU32 page_table_depth_hopper(NvU32 page_size)
60 {
61     // The common-case is page_size == UVM_PAGE_SIZE_2M, hence the first check
62     if (page_size == UVM_PAGE_SIZE_2M)
63         return 4;
64     else if (page_size == UVM_PAGE_SIZE_512M)
65         return 3;
66     return 5;
67 }
68 
69 static NvU32 entries_per_index_hopper(NvU32 depth)
70 {
71     UVM_ASSERT(depth < 6);
72     if (depth == 4)
73         return 2;
74     return 1;
75 }
76 
77 static NvLength entry_offset_hopper(NvU32 depth, NvU32 page_size)
78 {
79     UVM_ASSERT(depth < 6);
80     if ((page_size == UVM_PAGE_SIZE_4K) && (depth == 4))
81         return MMU_SMALL;
82     return MMU_BIG;
83 }
84 
85 static NvLength entry_size_hopper(NvU32 depth)
86 {
87     return entries_per_index_hopper(depth) * 8;
88 }
89 
90 static NvU32 index_bits_hopper(NvU32 depth, NvU32 page_size)
91 {
92     static const NvU32 bit_widths[] = {1, 9, 9, 9, 8};
93 
94     // some code paths keep on querying this until they get a 0, meaning only
95     // the page offset remains.
96     UVM_ASSERT(depth < 6);
97     if (depth < 5) {
98         return bit_widths[depth];
99     }
100     else if (depth == 5) {
101         switch (page_size) {
102             case UVM_PAGE_SIZE_4K:
103                 return 9;
104             case UVM_PAGE_SIZE_64K:
105                 return 5;
106             default:
107                 break;
108         }
109     }
110     return 0;
111 }
112 
113 static NvU32 num_va_bits_hopper(void)
114 {
115     return 57;
116 }
117 
118 static NvLength allocation_size_hopper(NvU32 depth, NvU32 page_size)
119 {
120     UVM_ASSERT(depth < 6);
121     if (depth == 5 && page_size == UVM_PAGE_SIZE_64K)
122         return 256;
123 
124     // depth 0 requires only a 16-byte allocation, but it must be 4k aligned.
125     return 4096;
126 }
127 
128 // PTE Permission Control Flags
129 static NvU64 pte_pcf(uvm_prot_t prot, NvU64 flags)
130 {
131     bool ac = !(flags & UVM_MMU_PTE_FLAGS_ACCESS_COUNTERS_DISABLED);
132     bool cached = flags & UVM_MMU_PTE_FLAGS_CACHED;
133 
134     UVM_ASSERT(prot != UVM_PROT_NONE);
135     UVM_ASSERT((flags & ~UVM_MMU_PTE_FLAGS_MASK) == 0);
136 
137     if (ac) {
138         switch (prot) {
139             case UVM_PROT_READ_ONLY:
140                 return cached ? NV_MMU_VER3_PTE_PCF_REGULAR_RO_NO_ATOMIC_CACHED_ACE :
141                                 NV_MMU_VER3_PTE_PCF_REGULAR_RO_NO_ATOMIC_UNCACHED_ACE;
142             case UVM_PROT_READ_WRITE:
143                 return cached ? NV_MMU_VER3_PTE_PCF_REGULAR_RW_NO_ATOMIC_CACHED_ACE :
144                                 NV_MMU_VER3_PTE_PCF_REGULAR_RW_NO_ATOMIC_UNCACHED_ACE;
145             case UVM_PROT_READ_WRITE_ATOMIC:
146                 return cached ? NV_MMU_VER3_PTE_PCF_REGULAR_RW_ATOMIC_CACHED_ACE :
147                                 NV_MMU_VER3_PTE_PCF_REGULAR_RW_ATOMIC_UNCACHED_ACE;
148             default:
149                 break;
150         }
151     }
152     else {
153         switch (prot) {
154             case UVM_PROT_READ_ONLY:
155                 return cached ? NV_MMU_VER3_PTE_PCF_REGULAR_RO_NO_ATOMIC_CACHED_ACD :
156                                 NV_MMU_VER3_PTE_PCF_REGULAR_RO_NO_ATOMIC_UNCACHED_ACD;
157             case UVM_PROT_READ_WRITE:
158                 return cached ? NV_MMU_VER3_PTE_PCF_REGULAR_RW_NO_ATOMIC_CACHED_ACD :
159                                 NV_MMU_VER3_PTE_PCF_REGULAR_RW_NO_ATOMIC_UNCACHED_ACD;
160             case UVM_PROT_READ_WRITE_ATOMIC:
161                 return cached ? NV_MMU_VER3_PTE_PCF_REGULAR_RW_ATOMIC_CACHED_ACD :
162                                 NV_MMU_VER3_PTE_PCF_REGULAR_RW_ATOMIC_UNCACHED_ACD;
163             default:
164                 break;
165         }
166     }
167 
168     // Unsupported PCF
169     UVM_ASSERT_MSG(0, "Unsupported PTE PCF: prot: %s, ac: %d, cached: %d\n", uvm_prot_string(prot), ac, cached);
170 
171     return NV_MMU_VER3_PTE_PCF_INVALID;
172 }
173 
174 static NvU64 make_pte_hopper(uvm_aperture_t aperture, NvU64 address, uvm_prot_t prot, NvU64 flags)
175 {
176     NvU8 aperture_bits = 0;
177     NvU64 pte_bits = 0;
178 
179     // valid 0:0
180     pte_bits |= HWCONST64(_MMU_VER3, PTE, VALID, TRUE);
181 
182     // aperture 2:1
183     if (aperture == UVM_APERTURE_SYS)
184         aperture_bits = NV_MMU_VER3_PTE_APERTURE_SYSTEM_COHERENT_MEMORY;
185     else if (aperture == UVM_APERTURE_VID)
186         aperture_bits = NV_MMU_VER3_PTE_APERTURE_VIDEO_MEMORY;
187     else if (aperture >= UVM_APERTURE_PEER_0 && aperture <= UVM_APERTURE_PEER_7)
188         aperture_bits = NV_MMU_VER3_PTE_APERTURE_PEER_MEMORY;
189     else
190         UVM_ASSERT_MSG(0, "Invalid aperture: %d\n", aperture);
191 
192     pte_bits |= HWVALUE64(_MMU_VER3, PTE, APERTURE, aperture_bits);
193 
194     // PCF (permission control flags) 7:3
195     pte_bits |= HWVALUE64(_MMU_VER3, PTE, PCF, pte_pcf(prot, flags));
196 
197     // kind 11:8
198     pte_bits |= HWVALUE64(_MMU_VER3, PTE, KIND, NV_MMU_PTE_KIND_GENERIC_MEMORY);
199 
200     address >>= NV_MMU_VER3_PTE_ADDRESS_SHIFT;
201 
202     if (aperture == UVM_APERTURE_VID) {
203         // vid address 39:12
204         pte_bits |= HWVALUE64(_MMU_VER3, PTE, ADDRESS_VID, address);
205     }
206     else {
207         // sys/peer address 51:12
208         pte_bits |= HWVALUE64(_MMU_VER3, PTE, ADDRESS, address);
209 
210         // peer id 63:61
211         if (aperture >= UVM_APERTURE_PEER_0 && aperture <= UVM_APERTURE_PEER_7)
212             pte_bits |= HWVALUE64(_MMU_VER3, PTE, PEER_ID, UVM_APERTURE_PEER_ID(aperture));
213     }
214 
215     return pte_bits;
216 }
217 
218 static NvU64 make_sked_reflected_pte_hopper(void)
219 {
220     return HWCONST64(_MMU_VER3, PTE, VALID, TRUE) |
221            HWVALUE64(_MMU_VER3, PTE, PCF, pte_pcf(UVM_PROT_READ_WRITE_ATOMIC, UVM_MMU_PTE_FLAGS_NONE)) |
222            HWVALUE64(_MMU_VER3, PTE, KIND, NV_MMU_PTE_KIND_SMSKED_MESSAGE);
223 }
224 
225 static NvU64 make_sparse_pte_hopper(void)
226 {
227     return HWCONST64(_MMU_VER3, PTE, VALID, FALSE) |
228            HWCONST64(_MMU_VER3, PTE, PCF, SPARSE);
229 }
230 
231 static NvU64 unmapped_pte_hopper(NvU32 page_size)
232 {
233     // Setting PCF to NO_VALID_4KB_PAGE on an otherwise-zeroed big PTE causes
234     // the corresponding 4k PTEs to be ignored. This allows the invalidation of
235     // a mixed PDE range to be much faster.
236     if (page_size != UVM_PAGE_SIZE_64K)
237         return 0;
238 
239     // When VALID == 0, GMMU still reads the PCF field, which indicates the PTE
240     // is sparse (make_sparse_pte_hopper) or an unmapped big-page PTE.
241     return HWCONST64(_MMU_VER3, PTE, VALID, FALSE) |
242            HWCONST64(_MMU_VER3, PTE, PCF, NO_VALID_4KB_PAGE);
243 }
244 
245 static NvU64 poisoned_pte_hopper(void)
246 {
247     // An invalid PTE won't be fatal from faultable units like SM, which is the
248     // most likely source of bad PTE accesses.
249 
250     // Engines with priv accesses won't fault on the priv PTE, so add a backup
251     // mechanism using an impossible memory address. MMU will trigger an
252     // interrupt when it detects a bad physical address, i.e., a physical
253     // address > GPU memory size.
254     //
255     // This address has to fit within 38 bits (max address width of vidmem) and
256     // be aligned to page_size.
257     NvU64 phys_addr = 0x2bad000000ULL;
258 
259     NvU64 pte_bits = make_pte_hopper(UVM_APERTURE_VID, phys_addr, UVM_PROT_READ_ONLY, UVM_MMU_PTE_FLAGS_NONE);
260     return WRITE_HWCONST64(pte_bits, _MMU_VER3, PTE, PCF, PRIVILEGE_RO_NO_ATOMIC_UNCACHED_ACD);
261 }
262 
263 static NvU64 single_pde_hopper(uvm_mmu_page_table_alloc_t *phys_alloc, NvU32 depth)
264 {
265     NvU64 pde_bits = 0;
266 
267     if (phys_alloc != NULL) {
268         NvU64 address = phys_alloc->addr.address >> NV_MMU_VER3_PDE_ADDRESS_SHIFT;
269         pde_bits |= HWCONST64(_MMU_VER3, PDE, IS_PTE, FALSE);
270 
271         switch (phys_alloc->addr.aperture) {
272             case UVM_APERTURE_SYS:
273                 pde_bits |= HWCONST64(_MMU_VER3, PDE, APERTURE, SYSTEM_COHERENT_MEMORY);
274                 break;
275             case UVM_APERTURE_VID:
276                 pde_bits |= HWCONST64(_MMU_VER3, PDE, APERTURE, VIDEO_MEMORY);
277                 break;
278             default:
279                 UVM_ASSERT_MSG(0, "Invalid aperture: %d\n", phys_alloc->addr.aperture);
280                 break;
281         }
282 
283         // PCF (permission control flags) 5:3
284         // Hopper GPUs on ATS-enabled systems, perform a parallel lookup on both
285         // ATS and GMMU page tables. For managed memory we need to prevent this
286         // parallel lookup since we would not get any GPU fault if the CPU has
287         // a valid mapping. Also, for external ranges that are known to be
288         // mapped entirely on the GMMU page table we can skip the ATS lookup
289         // for performance reasons. Parallel ATS lookup is disabled in PDE1
290         // (depth 3) and, therefore, it applies to the underlying 512MB VA
291         // range.
292         //
293         // UVM sets ATS_NOT_ALLOWED for all Hopper+ mappings on ATS systems.
294         // This is fine because CUDA ensures that all managed and external
295         // allocations are properly compartmentalized in 512MB-aligned VA
296         // regions. For cudaHostRegister CUDA cannot control the VA range, but
297         // we rely on ATS for those allocations so they can't choose the
298         // ATS_NOT_ALLOWED mode.
299         //
300         // TODO: Bug 3254055: Relax the NO_ATS setting from 512MB (pde1) range
301         // to PTEs.
302         if (depth == 3 && g_uvm_global.ats.enabled)
303             pde_bits |= HWCONST64(_MMU_VER3, PDE, PCF, VALID_UNCACHED_ATS_NOT_ALLOWED);
304         else
305             pde_bits |= HWCONST64(_MMU_VER3, PDE, PCF, VALID_UNCACHED_ATS_ALLOWED);
306 
307         // address 51:12
308         pde_bits |= HWVALUE64(_MMU_VER3, PDE, ADDRESS, address);
309     }
310 
311     return pde_bits;
312 }
313 
314 static NvU64 big_half_pde_hopper(uvm_mmu_page_table_alloc_t *phys_alloc)
315 {
316     NvU64 pde_bits = 0;
317 
318     if (phys_alloc != NULL) {
319         NvU64 address = phys_alloc->addr.address >> NV_MMU_VER3_DUAL_PDE_ADDRESS_BIG_SHIFT;
320 
321         switch (phys_alloc->addr.aperture) {
322             case UVM_APERTURE_SYS:
323                 pde_bits |= HWCONST64(_MMU_VER3, DUAL_PDE, APERTURE_BIG, SYSTEM_COHERENT_MEMORY);
324                 break;
325             case UVM_APERTURE_VID:
326                 pde_bits |= HWCONST64(_MMU_VER3, DUAL_PDE, APERTURE_BIG, VIDEO_MEMORY);
327                 break;
328             default:
329                 UVM_ASSERT_MSG(0, "Invalid big aperture %d\n", phys_alloc->addr.aperture);
330                 break;
331         }
332 
333         // PCF (permission control flags) 5:3
334         pde_bits |= HWCONST64(_MMU_VER3, DUAL_PDE, PCF_BIG, VALID_UNCACHED_ATS_NOT_ALLOWED);
335 
336         // address 51:8
337         pde_bits |= HWVALUE64(_MMU_VER3, DUAL_PDE, ADDRESS_BIG, address);
338     }
339 
340     return pde_bits;
341 }
342 
343 static NvU64 small_half_pde_hopper(uvm_mmu_page_table_alloc_t *phys_alloc)
344 {
345     NvU64 pde_bits = 0;
346 
347     if (phys_alloc != NULL) {
348         NvU64 address = phys_alloc->addr.address >> NV_MMU_VER3_DUAL_PDE_ADDRESS_SHIFT;
349 
350         switch (phys_alloc->addr.aperture) {
351             case UVM_APERTURE_SYS:
352                 pde_bits |= HWCONST64(_MMU_VER3, DUAL_PDE, APERTURE_SMALL, SYSTEM_COHERENT_MEMORY);
353                 break;
354             case UVM_APERTURE_VID:
355                 pde_bits |= HWCONST64(_MMU_VER3, DUAL_PDE, APERTURE_SMALL, VIDEO_MEMORY);
356                 break;
357             default:
358                 UVM_ASSERT_MSG(0, "Invalid small aperture %d\n", phys_alloc->addr.aperture);
359                 break;
360         }
361 
362         // PCF (permission control flags) 69:67 [5:3]
363         pde_bits |= HWCONST64(_MMU_VER3, DUAL_PDE, PCF_SMALL, VALID_UNCACHED_ATS_NOT_ALLOWED);
364 
365         // address 115:76 [51:12]
366         pde_bits |= HWVALUE64(_MMU_VER3, DUAL_PDE, ADDRESS_SMALL, address);
367     }
368     return pde_bits;
369 }
370 
371 static void make_pde_hopper(void *entry,
372                             uvm_mmu_page_table_alloc_t **phys_allocs,
373                             NvU32 depth,
374                             uvm_page_directory_t *child_dir)
375 {
376     NvU32 entry_count = entries_per_index_hopper(depth);
377     NvU64 *entry_bits = (NvU64 *)entry;
378 
379     if (entry_count == 1) {
380         *entry_bits = single_pde_hopper(*phys_allocs, depth);
381     }
382     else if (entry_count == 2) {
383         entry_bits[MMU_BIG] = big_half_pde_hopper(phys_allocs[MMU_BIG]);
384         entry_bits[MMU_SMALL] = small_half_pde_hopper(phys_allocs[MMU_SMALL]);
385 
386         // This entry applies to the whole dual PDE but is stored in the lower
387         // bits
388         entry_bits[MMU_BIG] |= HWCONST64(_MMU_VER3, DUAL_PDE, IS_PTE, FALSE);
389     }
390     else {
391         UVM_ASSERT_MSG(0, "Invalid number of entries per index: %d\n", entry_count);
392     }
393 }
394 
395 static uvm_mmu_mode_hal_t hopper_mmu_mode_hal;
396 
397 uvm_mmu_mode_hal_t *uvm_hal_mmu_mode_hopper(NvU32 big_page_size)
398 {
399     static bool initialized = false;
400 
401     UVM_ASSERT(big_page_size == UVM_PAGE_SIZE_64K || big_page_size == UVM_PAGE_SIZE_128K);
402 
403     // TODO: Bug 1789555: RM should reject the creation of GPU VA spaces with
404     // 128K big page size for Pascal+ GPUs
405     if (big_page_size == UVM_PAGE_SIZE_128K)
406         return NULL;
407 
408     if (!initialized) {
409         uvm_mmu_mode_hal_t *ampere_mmu_mode_hal = uvm_hal_mmu_mode_ampere(big_page_size);
410         UVM_ASSERT(ampere_mmu_mode_hal);
411 
412         // The assumption made is that arch_hal->mmu_mode_hal() will be called
413         // under the global lock the first time, so check it here.
414         uvm_assert_mutex_locked(&g_uvm_global.global_lock);
415 
416         hopper_mmu_mode_hal = *ampere_mmu_mode_hal;
417         hopper_mmu_mode_hal.entry_size = entry_size_hopper;
418         hopper_mmu_mode_hal.index_bits = index_bits_hopper;
419         hopper_mmu_mode_hal.entries_per_index = entries_per_index_hopper;
420         hopper_mmu_mode_hal.entry_offset = entry_offset_hopper;
421         hopper_mmu_mode_hal.num_va_bits = num_va_bits_hopper;
422         hopper_mmu_mode_hal.allocation_size = allocation_size_hopper;
423         hopper_mmu_mode_hal.page_table_depth = page_table_depth_hopper;
424         hopper_mmu_mode_hal.make_pte = make_pte_hopper;
425         hopper_mmu_mode_hal.make_sked_reflected_pte = make_sked_reflected_pte_hopper;
426         hopper_mmu_mode_hal.make_sparse_pte = make_sparse_pte_hopper;
427         hopper_mmu_mode_hal.unmapped_pte = unmapped_pte_hopper;
428         hopper_mmu_mode_hal.poisoned_pte = poisoned_pte_hopper;
429         hopper_mmu_mode_hal.make_pde = make_pde_hopper;
430 
431         initialized = true;
432     }
433 
434     return &hopper_mmu_mode_hal;
435 }
436 
437 NvU16 uvm_hal_hopper_mmu_client_id_to_utlb_id(NvU16 client_id)
438 {
439     switch (client_id) {
440         case NV_PFAULT_CLIENT_GPC_RAST:
441         case NV_PFAULT_CLIENT_GPC_GCC:
442         case NV_PFAULT_CLIENT_GPC_GPCCS:
443             return UVM_HOPPER_GPC_UTLB_ID_RGG;
444         case NV_PFAULT_CLIENT_GPC_T1_0:
445             return UVM_HOPPER_GPC_UTLB_ID_LTP0;
446         case NV_PFAULT_CLIENT_GPC_T1_1:
447         case NV_PFAULT_CLIENT_GPC_PE_0:
448         case NV_PFAULT_CLIENT_GPC_TPCCS_0:
449             return UVM_HOPPER_GPC_UTLB_ID_LTP1;
450         case NV_PFAULT_CLIENT_GPC_T1_2:
451             return UVM_HOPPER_GPC_UTLB_ID_LTP2;
452         case NV_PFAULT_CLIENT_GPC_T1_3:
453         case NV_PFAULT_CLIENT_GPC_PE_1:
454         case NV_PFAULT_CLIENT_GPC_TPCCS_1:
455             return UVM_HOPPER_GPC_UTLB_ID_LTP3;
456         case NV_PFAULT_CLIENT_GPC_T1_4:
457             return UVM_HOPPER_GPC_UTLB_ID_LTP4;
458         case NV_PFAULT_CLIENT_GPC_T1_5:
459         case NV_PFAULT_CLIENT_GPC_PE_2:
460         case NV_PFAULT_CLIENT_GPC_TPCCS_2:
461             return UVM_HOPPER_GPC_UTLB_ID_LTP5;
462         case NV_PFAULT_CLIENT_GPC_T1_6:
463             return UVM_HOPPER_GPC_UTLB_ID_LTP6;
464         case NV_PFAULT_CLIENT_GPC_T1_7:
465         case NV_PFAULT_CLIENT_GPC_PE_3:
466         case NV_PFAULT_CLIENT_GPC_TPCCS_3:
467             return UVM_HOPPER_GPC_UTLB_ID_LTP7;
468         case NV_PFAULT_CLIENT_GPC_T1_8:
469             return UVM_HOPPER_GPC_UTLB_ID_LTP8;
470         case NV_PFAULT_CLIENT_GPC_T1_9:
471         case NV_PFAULT_CLIENT_GPC_PE_4:
472         case NV_PFAULT_CLIENT_GPC_TPCCS_4:
473             return UVM_HOPPER_GPC_UTLB_ID_LTP9;
474         case NV_PFAULT_CLIENT_GPC_T1_10:
475             return UVM_HOPPER_GPC_UTLB_ID_LTP10;
476         case NV_PFAULT_CLIENT_GPC_T1_11:
477         case NV_PFAULT_CLIENT_GPC_PE_5:
478         case NV_PFAULT_CLIENT_GPC_TPCCS_5:
479             return UVM_HOPPER_GPC_UTLB_ID_LTP11;
480         case NV_PFAULT_CLIENT_GPC_T1_12:
481             return UVM_HOPPER_GPC_UTLB_ID_LTP12;
482         case NV_PFAULT_CLIENT_GPC_T1_13:
483         case NV_PFAULT_CLIENT_GPC_PE_6:
484         case NV_PFAULT_CLIENT_GPC_TPCCS_6:
485             return UVM_HOPPER_GPC_UTLB_ID_LTP13;
486         case NV_PFAULT_CLIENT_GPC_T1_14:
487             return UVM_HOPPER_GPC_UTLB_ID_LTP14;
488         case NV_PFAULT_CLIENT_GPC_T1_15:
489         case NV_PFAULT_CLIENT_GPC_PE_7:
490         case NV_PFAULT_CLIENT_GPC_TPCCS_7:
491             return UVM_HOPPER_GPC_UTLB_ID_LTP15;
492         case NV_PFAULT_CLIENT_GPC_T1_16:
493             return UVM_HOPPER_GPC_UTLB_ID_LTP16;
494         case NV_PFAULT_CLIENT_GPC_T1_17:
495         case NV_PFAULT_CLIENT_GPC_PE_8:
496         case NV_PFAULT_CLIENT_GPC_TPCCS_8:
497             return UVM_HOPPER_GPC_UTLB_ID_LTP17;
498 
499         default:
500             UVM_ASSERT_MSG(false, "Invalid client value: 0x%x\n", client_id);
501     }
502 
503     return 0;
504 }
505