1 /* 2 * SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 3 * SPDX-License-Identifier: MIT 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24 #include "nvkms-lut.h" 25 #include "nvkms-rm.h" 26 #include "nvkms-rmapi.h" 27 #include "nvkms-dma.h" 28 #include "nvkms-utils.h" 29 #include "nvos.h" 30 31 #include <class/cl0040.h> /* NV01_MEMORY_LOCAL_USER */ 32 33 static void FreeLutSurfaceEvoInVidmem(NVLutSurfaceEvoPtr pSurfEvo) 34 { 35 NVDevEvoPtr pDevEvo; 36 37 if (pSurfEvo == NULL) { 38 return; 39 } 40 41 pDevEvo = pSurfEvo->pDevEvo; 42 43 if (pSurfEvo->gpuAddress) { 44 nvRmApiUnmapMemoryDma(nvEvoGlobal.clientHandle, 45 pDevEvo->deviceHandle, 46 pDevEvo->nvkmsGpuVASpace, 47 pSurfEvo->handle, 48 0, 49 (NvU64)pSurfEvo->gpuAddress); 50 } 51 52 nvRmEvoUnMapVideoMemory(pDevEvo, pSurfEvo->handle, 53 pSurfEvo->subDeviceAddress); 54 55 /* Free display context dmas for the surface, if any */ 56 nvRmEvoFreeDispContextDMA(pDevEvo, &pSurfEvo->dispCtxDma); 57 58 /* Free the surface */ 59 if (pSurfEvo->handle) { 60 NvU32 result; 61 62 result = nvRmApiFree(nvEvoGlobal.clientHandle, 63 pDevEvo->deviceHandle, pSurfEvo->handle); 64 if (result != NVOS_STATUS_SUCCESS) { 65 nvAssert(!"Freeing LUT surface failed"); 66 } 67 68 nvFreeUnixRmHandle(&pDevEvo->handleAllocator, 69 pSurfEvo->handle); 70 pSurfEvo->handle = 0; 71 } 72 73 nvFree(pSurfEvo); 74 } 75 76 static NVLutSurfaceEvoPtr AllocLutSurfaceEvoInVidmem(NVDevEvoPtr pDevEvo) 77 { 78 NV_MEMORY_ALLOCATION_PARAMS memAllocParams = { }; 79 NvU32 ret = NVOS_STATUS_ERROR_GENERIC; 80 NvU32 attr = 0, attr2 = 0; 81 NvU32 allocFlags = NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN | 82 NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE; 83 NvU64 size = 0, alignment = 4096; 84 85 NVLutSurfaceEvoPtr pSurfEvo; 86 87 pSurfEvo = nvCalloc(1, sizeof(*pSurfEvo)); 88 if (pSurfEvo == NULL) { 89 return NULL; 90 } 91 92 pSurfEvo->pDevEvo = pDevEvo; 93 94 size = (sizeof(NVEvoLutDataRec) + 63) & ~63; 95 96 pSurfEvo->size = size; 97 98 pSurfEvo->handle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); 99 100 if (pSurfEvo->handle == 0) { 101 goto fail; 102 } 103 104 attr = FLD_SET_DRF(OS32, _ATTR, _LOCATION, _VIDMEM, attr); 105 attr2 = FLD_SET_DRF(OS32, _ATTR2, _GPU_CACHEABLE, _DEFAULT, attr2); 106 107 alignment = NV_MAX(alignment, NV_EVO_SURFACE_ALIGNMENT); 108 if (alignment != 0) { 109 allocFlags |= NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE; 110 } 111 112 memAllocParams.owner = NVKMS_RM_HEAP_ID; 113 memAllocParams.type = NVOS32_TYPE_IMAGE; 114 memAllocParams.size = size; 115 memAllocParams.attr = attr; 116 memAllocParams.attr2 = attr2; 117 memAllocParams.flags = allocFlags; 118 memAllocParams.alignment = alignment; 119 120 ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, 121 pDevEvo->deviceHandle, 122 pSurfEvo->handle, 123 NV01_MEMORY_LOCAL_USER, 124 &memAllocParams); 125 126 /* If we failed the allocation above, abort */ 127 if (ret != NVOS_STATUS_SUCCESS) { 128 nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pSurfEvo->handle); 129 pSurfEvo->handle = 0; 130 131 goto fail; 132 } 133 134 /* Allocate a display context dma */ 135 pSurfEvo->dispCtxDma = 136 nvRmEvoAllocateAndBindDispContextDMA(pDevEvo, 137 pSurfEvo->handle, 138 NvKmsSurfaceMemoryLayoutPitch, 139 pSurfEvo->size - 1); 140 141 if (!pSurfEvo->dispCtxDma) { 142 goto fail; 143 } 144 145 /* Map the surface for the CPU */ 146 if (!nvRmEvoMapVideoMemory(pSurfEvo->pDevEvo, 147 pSurfEvo->handle, pSurfEvo->size, 148 pSurfEvo->subDeviceAddress, 149 SUBDEVICE_MASK_ALL)) { 150 goto fail; 151 } 152 153 /* 154 * The GPU mapping is only needed for prefetching LUT surfaces for DIFR. 155 * It isn't worth failing alone but we want to keep gpuAddress coherent. 156 */ 157 ret = nvRmApiMapMemoryDma(nvEvoGlobal.clientHandle, 158 pDevEvo->deviceHandle, 159 pDevEvo->nvkmsGpuVASpace, 160 pSurfEvo->handle, 161 0, 162 pSurfEvo->size, 163 DRF_DEF(OS46, _FLAGS, _CACHE_SNOOP, _ENABLE) | 164 DRF_DEF(OS46, _FLAGS, _ACCESS, _READ_ONLY), 165 &pSurfEvo->gpuAddress); 166 167 if (ret != NVOS_STATUS_SUCCESS) { 168 pSurfEvo->gpuAddress = 0ULL; 169 } 170 171 return pSurfEvo; 172 173 fail: 174 /* An error occurred -- free the surface */ 175 FreeLutSurfaceEvoInVidmem(pSurfEvo); 176 177 return NULL; 178 179 } 180 181 static void FreeLutSurfaceEvoInSysmem(NVLutSurfaceEvoPtr pSurfEvo) 182 { 183 NVDevEvoPtr pDevEvo; 184 185 if (pSurfEvo == NULL) { 186 return; 187 } 188 189 pDevEvo = pSurfEvo->pDevEvo; 190 191 /* Free display context dmas for the surface, if any */ 192 nvRmEvoFreeDispContextDMA(pDevEvo, &pSurfEvo->dispCtxDma); 193 194 /* Free the surface */ 195 if (pSurfEvo->handle) { 196 NvU32 result; 197 198 if (pSurfEvo->subDeviceAddress[0] != NULL) { 199 /* 200 * SOC display devices should only have one subdevice 201 * (and therefore it is safe to unmap only subDeviceAddress[0]) 202 * for reasons described in AllocLutSurfaceEvoInSysmem 203 */ 204 nvAssert(pDevEvo->numSubDevices == 1); 205 206 result = nvRmApiUnmapMemory(nvEvoGlobal.clientHandle, 207 pDevEvo->deviceHandle, 208 pSurfEvo->handle, 209 pSurfEvo->subDeviceAddress[0], 210 0); 211 if (result != NVOS_STATUS_SUCCESS) { 212 nvAssert(!"Unmapping LUT surface failed"); 213 } 214 pSurfEvo->subDeviceAddress[0] = NULL; 215 } 216 217 result = nvRmApiFree(nvEvoGlobal.clientHandle, 218 pDevEvo->deviceHandle, pSurfEvo->handle); 219 if (result != NVOS_STATUS_SUCCESS) { 220 nvAssert(!"Freeing LUT surface failed"); 221 } 222 223 nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pSurfEvo->handle); 224 } 225 226 nvFree(pSurfEvo); 227 } 228 229 static NVLutSurfaceEvoPtr AllocLutSurfaceEvoInSysmem(NVDevEvoPtr pDevEvo) 230 { 231 NvU32 memoryHandle = 0; 232 void *pBase = NULL; 233 NvU64 size = 0; 234 NVLutSurfaceEvoPtr pSurfEvo; 235 236 pSurfEvo = nvCalloc(1, sizeof(*pSurfEvo)); 237 if (pSurfEvo == NULL) { 238 return NULL; 239 } 240 241 pSurfEvo->pDevEvo = pDevEvo; 242 243 size = (sizeof(NVEvoLutDataRec) + 63) & ~63; 244 245 pSurfEvo->size = size; 246 247 memoryHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); 248 if (memoryHandle == 0) { 249 goto fail; 250 } 251 252 /* Allocate the LUT memory from sysmem */ 253 if (!nvRmAllocSysmem(pDevEvo, memoryHandle, NULL, &pBase, size, 254 NVKMS_MEMORY_ISO)) { 255 nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, 256 "Unable to allocate LUT memory from sysmem"); 257 nvFreeUnixRmHandle(&pDevEvo->handleAllocator, memoryHandle); 258 259 goto fail; 260 } 261 262 pSurfEvo->handle = memoryHandle; 263 264 /* Allocate and bind a display context dma */ 265 pSurfEvo->dispCtxDma = 266 nvRmEvoAllocateAndBindDispContextDMA(pDevEvo, 267 pSurfEvo->handle, 268 NvKmsSurfaceMemoryLayoutPitch, 269 pSurfEvo->size - 1); 270 if (!pSurfEvo->dispCtxDma) { 271 goto fail; 272 } 273 274 /* 275 * AllocLutSurfaceEvoInSysmem() will only be called if 276 * pDevEvo->requiresAllAllocationsInSysmem is TRUE. NVKMS will only set this 277 * cap bit for SOC display devices, and these devices should only have one 278 * subdevice. 279 */ 280 nvAssert(pDevEvo->numSubDevices == 1); 281 pSurfEvo->subDeviceAddress[0] = pBase; 282 283 return pSurfEvo; 284 285 fail: 286 /* An error occurred -- free the surface */ 287 FreeLutSurfaceEvoInSysmem(pSurfEvo); 288 289 return NULL; 290 } 291 292 static void FreeLutSurfaceEvo(NVLutSurfaceEvoPtr pSurfEvo) 293 { 294 NVDevEvoPtr pDevEvo; 295 296 if (pSurfEvo == NULL) { 297 return; 298 } 299 300 pDevEvo = pSurfEvo->pDevEvo; 301 302 if (pDevEvo->requiresAllAllocationsInSysmem) { 303 FreeLutSurfaceEvoInSysmem(pSurfEvo); 304 } else { 305 FreeLutSurfaceEvoInVidmem(pSurfEvo); 306 } 307 } 308 309 static NVLutSurfaceEvoPtr AllocLutSurfaceEvo(NVDevEvoPtr pDevEvo) 310 { 311 if (pDevEvo->requiresAllAllocationsInSysmem) { 312 return AllocLutSurfaceEvoInSysmem(pDevEvo); 313 } else { 314 return AllocLutSurfaceEvoInVidmem(pDevEvo); 315 } 316 } 317 318 NvBool nvSetTmoLutSurfacesEvo(NVDevEvoPtr pDevEvo, 319 NVFlipEvoHwState *pFlipState, 320 NvU32 head) 321 { 322 NvU32 layer; 323 for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { 324 if (pFlipState->layer[layer].hdrStaticMetadata.enabled) { 325 if (!pFlipState->layer[layer].tmoLut.pLutSurfaceEvo) { 326 pFlipState->layer[layer].tmoLut.pLutSurfaceEvo = 327 AllocLutSurfaceEvo(pDevEvo); 328 if (!pFlipState->layer[layer].tmoLut.pLutSurfaceEvo) { 329 return FALSE; 330 } 331 332 // Will be referenced via nvRefTmoLutSurfacesEvo() on new state 333 pFlipState->layer[layer].tmoLut.pLutSurfaceEvo->allocRefCnt = 0; 334 } 335 } else { 336 // Will be freed via nvUnrefTmoLutSurfacesEvo() on old state 337 pFlipState->layer[layer].tmoLut.pLutSurfaceEvo = NULL; 338 } 339 } 340 341 return TRUE; 342 } 343 344 void nvRefTmoLutSurfacesEvo(NVDevEvoPtr pDevEvo, 345 NVFlipEvoHwState *pFlipState, 346 NvU32 head) 347 { 348 // Reference new state layers that have hdrStaticMetadata enabled. 349 NvU32 layer; 350 for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { 351 if (pFlipState->layer[layer].hdrStaticMetadata.enabled) { 352 nvAssert(pFlipState->layer[layer].tmoLut.pLutSurfaceEvo); 353 pFlipState->layer[layer].tmoLut.pLutSurfaceEvo->allocRefCnt++; 354 } 355 } 356 } 357 358 void nvUnrefTmoLutSurfacesEvo(NVDevEvoPtr pDevEvo, 359 NVFlipEvoHwState *pFlipState, 360 NvU32 head) 361 { 362 // Unref old state layers that had hdrStaticMetadata enabled. 363 NvU32 layer; 364 for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { 365 if (pFlipState->layer[layer].hdrStaticMetadata.enabled) { 366 nvAssert(pFlipState->layer[layer].tmoLut.pLutSurfaceEvo); 367 368 if (pFlipState->layer[layer].tmoLut.pLutSurfaceEvo->allocRefCnt <= 1) { 369 // Wait for any outstanding LUT updates before freeing. 370 if (pDevEvo->core) { 371 nvRMSyncEvoChannel(pDevEvo, pDevEvo->core, __LINE__); 372 } 373 374 FreeLutSurfaceEvo( 375 pFlipState->layer[layer].tmoLut.pLutSurfaceEvo); 376 377 pFlipState->layer[layer].tmoLut.pLutSurfaceEvo = NULL; 378 } else { 379 pFlipState->layer[layer].tmoLut.pLutSurfaceEvo->allocRefCnt--; 380 } 381 } 382 } 383 } 384 385 NvBool nvAllocLutSurfacesEvo(NVDevEvoPtr pDevEvo) 386 { 387 NVDispEvoPtr pDispEvo; 388 NvU32 apiHead, dispIndex, i, sd; 389 390 for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { 391 for (i = 0; i < ARRAY_LEN(pDevEvo->lut.apiHead[apiHead].LUT); i++) { 392 pDevEvo->lut.apiHead[apiHead].LUT[i] = AllocLutSurfaceEvo(pDevEvo); 393 394 if (pDevEvo->lut.apiHead[apiHead].LUT[i] == NULL) { 395 nvFreeLutSurfacesEvo(pDevEvo); 396 return FALSE; 397 } 398 } 399 400 FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { 401 // No palette has been loaded yet, so disable the LUT. 402 pDevEvo->lut.apiHead[apiHead].disp[dispIndex].waitForPreviousUpdate = FALSE; 403 pDevEvo->lut.apiHead[apiHead].disp[dispIndex].curBaseLutEnabled = FALSE; 404 pDevEvo->lut.apiHead[apiHead].disp[dispIndex].curOutputLutEnabled = FALSE; 405 } 406 } 407 408 if (pDevEvo->hal->caps.needDefaultLutSurface) { 409 pDevEvo->lut.defaultLut = AllocLutSurfaceEvo(pDevEvo); 410 if (pDevEvo->lut.defaultLut == NULL) { 411 nvFreeLutSurfacesEvo(pDevEvo); 412 return FALSE; 413 } 414 415 for (sd = 0; sd < NVKMS_MAX_SUBDEVICES; sd++) { 416 pDevEvo->lut.defaultBaseLUTState[sd] = 417 pDevEvo->lut.defaultOutputLUTState[sd] = 418 NvKmsLUTStateUninitialized; 419 } 420 421 pDevEvo->hal->InitDefaultLut(pDevEvo); 422 } 423 424 return TRUE; 425 } 426 427 void nvFreeLutSurfacesEvo(NVDevEvoPtr pDevEvo) 428 { 429 NvU32 head, i, dispIndex, apiHead; 430 NVDispEvoPtr pDispEvo; 431 432 /* Cancel any queued LUT update timers */ 433 FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { 434 for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { 435 nvCancelLutUpdateEvo(pDispEvo, apiHead); 436 } 437 } 438 439 /* wait for any outstanding LUT updates before freeing the surface */ 440 if (pDevEvo->core) { 441 nvRMSyncEvoChannel(pDevEvo, pDevEvo->core, __LINE__); 442 } 443 444 /* Clear the current lut surface stored in the hardware head state */ 445 FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { 446 for (head = 0; head < pDevEvo->numHeads; head++) { 447 pDispEvo->headState[head].lut.pCurrSurface = NULL; 448 pDispEvo->headState[head].lut.baseLutEnabled = FALSE; 449 pDispEvo->headState[head].lut.outputLutEnabled = FALSE; 450 } 451 } 452 453 if (pDevEvo->lut.defaultLut != NULL) { 454 FreeLutSurfaceEvo(pDevEvo->lut.defaultLut); 455 pDevEvo->lut.defaultLut = NULL; 456 } 457 458 for (apiHead = 0; apiHead < pDevEvo->numApiHeads; apiHead++) { 459 for (i = 0; i < ARRAY_LEN(pDevEvo->lut.apiHead[apiHead].LUT); i++) { 460 if (pDevEvo->lut.apiHead[apiHead].LUT[i] != NULL) { 461 FreeLutSurfaceEvo(pDevEvo->lut.apiHead[apiHead].LUT[i]); 462 pDevEvo->lut.apiHead[apiHead].LUT[i] = NULL; 463 } 464 } 465 } 466 } 467 468 void nvUploadDataToLutSurfaceEvo(NVLutSurfaceEvoPtr pSurfEvo, 469 const NVEvoLutDataRec *pLUTBuffer, 470 NVDispEvoPtr pDispEvo) 471 { 472 const NvU32* data = (const NvU32*)pLUTBuffer; 473 size_t size = sizeof(*pLUTBuffer); 474 const int sd = pDispEvo->displayOwner; 475 NvU32 *dst; 476 const NvU32 *src; 477 int dword; 478 479 if (pSurfEvo == NULL) { 480 nvAssert(pSurfEvo); 481 return; 482 } 483 484 nvAssert(pSurfEvo->subDeviceAddress[sd]); 485 486 /* The size to copy should not be larger than the surface. */ 487 nvAssert(size <= pSurfEvo->size); 488 489 /* The source, destination, and size should be 4-byte aligned. */ 490 nvAssert((((NvUPtr)data) & 0x3) == 0); 491 nvAssert((((NvUPtr)pSurfEvo->subDeviceAddress[sd]) & 0x3) == 0); 492 nvAssert((size % 4) == 0); 493 494 src = data; 495 dst = (NvU32*)pSurfEvo->subDeviceAddress[sd]; 496 497 for (dword = 0; dword < (size/4); dword++) { 498 *(dst++) = *(src++); 499 } 500 } 501