Home
last modified time | relevance | path

Searched refs:cache (Results 1 – 4 of 4) sorted by relevance

/open-nvidia-gpu/src/nvidia-modeset/src/
H A Dnvkms-surface.c928 struct ClearSurfaceUsageCache cache = { }; in nvEvoFreeClientSurfaces() local
941 ClearSurfaceUsageCollect(pDevEvo, pSurfaceEvo, &cache); in nvEvoFreeClientSurfaces()
948 ClearSurfaceUsageApply(pDevEvo, &cache, FALSE); in nvEvoFreeClientSurfaces()
996 struct ClearSurfaceUsageCache cache = { }; in nvEvoUnregisterSurface() local
998 ClearSurfaceUsageCollect(pDevEvo, pSurfaceEvo, &cache); in nvEvoUnregisterSurface()
999 ClearSurfaceUsageApply(pDevEvo, &cache, skipUpdate); in nvEvoUnregisterSurface()
H A Dnvkms-evo.c3591 } cache[NV_MAX_DEVICES][NVKMS_MAX_HEADS_PER_DISP]; in UpdateEvoLockState() local
3593 nvkms_memset(cache, 0, sizeof(cache)); in UpdateEvoLockState()
3776 cache[GpuIndex(pDevEvo, sd)][head].disableServer = TRUE; in UpdateEvoLockState()
3783 cache[GpuIndex(pDevEvo, sd)][head].disableClient = TRUE; in UpdateEvoLockState()
3801 cache[GpuIndex(pDevEvo, sd)][head].enableServer = TRUE; in UpdateEvoLockState()
3808 cache[GpuIndex(pDevEvo, sd)][head].enableClient = TRUE; in UpdateEvoLockState()
3934 if (cache[GpuIndex(pDevEvo, sd)][head].disableClient) { in UpdateEvoLockState()
3941 if (cache[GpuIndex(pDevEvo, sd)][head].disableServer) { in UpdateEvoLockState()
3948 if (cache[GpuIndex(pDevEvo, sd)][head].enableServer) { in UpdateEvoLockState()
3955 if (cache[GpuIndex(pDevEvo, sd)][head].enableClient) { in UpdateEvoLockState()
/open-nvidia-gpu/kernel-open/nvidia-uvm/
H A Duvm_pmm_gpu.c192 struct kmem_cache *cache; member
239 #define CHUNK_CACHE chunk_split_cache[0].cache
2343 UVM_ASSERT(chunk_split_cache[cache_idx].cache != NULL); in split_gpu_chunk()
3023 UVM_ASSERT(chunk_split_cache[subchunk_count_log2].cache); in deinit_chunk_split_cache()
3037 if (!chunk_split_cache[level].cache) { in init_chunk_split_cache_level()
3051 chunk_split_cache[level].cache = in init_chunk_split_cache_level()
3055 if (!chunk_split_cache[level].cache) in init_chunk_split_cache_level()
3112 if (!g_pma_address_batch_cache_ref.cache) { in init_pma_address_batch_cache()
3118 g_pma_address_batch_cache_ref.cache = in init_pma_address_batch_cache()
3122 if (!g_pma_address_batch_cache_ref.cache) in init_pma_address_batch_cache()
[all …]
/open-nvidia-gpu/kernel-open/common/inc/
H A Dnv-linux.h1729 struct kmem_cache *cache; in nv_kmem_cache_create() local
1757 cache = kmem_cache_create(name_unique, size, align, 0, nv_kmem_ctor_dummy); in nv_kmem_cache_create()
1761 return cache; in nv_kmem_cache_create()