1 /* 2 * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 3 * SPDX-License-Identifier: MIT 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24 #include "core/core.h" 25 #include "gpu/gpu.h" 26 #include "gpu/ce/kernel_ce.h" 27 #include "gpu/mem_mgr/mem_mgr.h" 28 #include "gpu/mem_mgr/heap.h" 29 #include "kernel/gpu/mig_mgr/kernel_mig_manager.h" 30 #include "gpu/bus/kern_bus.h" 31 #include "kernel/gpu/fifo/kernel_fifo.h" 32 #include "objtmr.h" 33 #include "gpu/mem_mgr/mem_desc.h" 34 #include "kernel/gpu/intr/intr.h" 35 36 #include "gpu/mem_mgr/channel_utils.h" 37 #include "gpu/mem_mgr/mem_scrub.h" 38 #include "os/os.h" 39 #include "gpu/mem_mgr/phys_mem_allocator/phys_mem_allocator.h" 40 #include "gpu/mem_mgr/mem_mgr.h" 41 #include "utils/nvprintf.h" 42 #include "utils/nvassert.h" 43 #include "nvgputypes.h" 44 #include "nvtypes.h" 45 #include "nvstatus.h" 46 #include "rmapi/rs_utils.h" 47 #include "core/locks.h" 48 #include "class/cl0050.h" 49 #include "class/clb0b5.h" // MAXWELL_DMA_COPY_A 50 #include "class/clc0b5.h" // PASCAL_DMA_COPY_A 51 #include "class/clc1b5.h" // PASCAL_DMA_COPY_B 52 #include "class/clc3b5.h" // VOLTA_DMA_COPY_A 53 #include "class/clc5b5.h" // TURING_DMA_COPY_A 54 #include "class/clc6b5.h" // AMPERE_DMA_COPY_A 55 #include "class/clc7b5.h" // AMPERE_DMA_COPY_B 56 57 #include "class/clc8b5.h" // HOPPER_DMA_COPY_A 58 59 #include "class/clc86f.h" // HOPPER_CHANNEL_GPFIFO_A 60 61 static NvU64 _scrubCheckProgress(OBJMEMSCRUB *pScrubber); 62 static NvU64 _searchScrubList(OBJMEMSCRUB *pScrubber, RmPhysAddr base, NvU64 size); 63 static void _waitForPayload(OBJMEMSCRUB *pScrubber, RmPhysAddr base, RmPhysAddr end); 64 static void _scrubAddWorkToList(OBJMEMSCRUB *pScrubber, RmPhysAddr base, NvU64 size, NvU64 newId); 65 static NvU32 _scrubMemory(OBJMEMSCRUB *pScrubber, RmPhysAddr base, NvU64 size, 66 NvU32 dstCpuCacheAttrib, NvU32 freeToken); 67 static void _scrubWaitAndSave(OBJMEMSCRUB *pScrubber, PSCRUB_NODE pList, NvLength itemsToSave); 68 static NvU64 _scrubGetFreeEntries(OBJMEMSCRUB *pScrubber); 69 static NvU64 _scrubCheckAndSubmit(OBJMEMSCRUB *pScrubber, NvU64 chunkSize, NvU64 *pPages, 70 NvU64 pageCount, PSCRUB_NODE pList, NvLength pagesToScrubCheck); 71 static void _scrubCopyListItems(OBJMEMSCRUB *pScrubber, PSCRUB_NODE pList, NvLength itemsToSave); 72 73 static NV_STATUS _scrubCheckLocked(OBJMEMSCRUB *pScrubber, PSCRUB_NODE *ppList, NvU64 *pSize); 74 75 /** 76 * Constructs the memory scrubber object and signals 77 * RM to create CE channels for submitting scrubbing work 78 * 79 * @param[in] pGpu OBJGPU pointer 80 * @param[in] pHeap Heap object pointer 81 * 82 * @returns NV_STATUS on success. 83 * error, if something fails 84 */ 85 NV_STATUS 86 scrubberConstruct 87 ( 88 OBJGPU *pGpu, 89 Heap *pHeap 90 ) 91 { 92 OBJMEMSCRUB *pScrubber; 93 MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); 94 KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); 95 NV_STATUS status = NV_OK; 96 NvBool bMIGInUse = IS_MIG_IN_USE(pGpu); 97 PMA *pPma = NULL; 98 KERNEL_MIG_GPU_INSTANCE *pKernelMIGGPUInstance = NULL; 99 100 if (pHeap == NULL) 101 { 102 return NV_ERR_INVALID_ARGUMENT; 103 } 104 pPma = &pHeap->pmaObject; 105 106 if (pPma->pScrubObj != NULL) 107 return NV_OK; 108 109 pScrubber = (OBJMEMSCRUB *)portMemAllocNonPaged(sizeof(OBJMEMSCRUB)); 110 if (pScrubber == NULL) 111 { 112 return NV_ERR_INSUFFICIENT_RESOURCES; 113 } 114 115 portMemSet(pScrubber, 0, sizeof(OBJMEMSCRUB)); 116 117 pScrubber->pScrubberMutex = (PORT_MUTEX *)portMemAllocNonPaged(portSyncMutexSize); 118 if (pScrubber->pScrubberMutex == NULL) 119 { 120 status = NV_ERR_INSUFFICIENT_RESOURCES; 121 goto error; 122 } 123 124 NV_ASSERT_OK_OR_GOTO(status, 125 portSyncMutexInitialize(pScrubber->pScrubberMutex), freemutex); 126 127 pScrubber->pScrubList = (PSCRUB_NODE) 128 portMemAllocNonPaged(sizeof(SCRUB_NODE) * MAX_SCRUB_ITEMS); 129 if (pScrubber->pScrubList == NULL) 130 { 131 status = NV_ERR_INSUFFICIENT_RESOURCES; 132 goto deinitmutex; 133 } 134 portMemSet(pScrubber->pScrubList, 0, sizeof(SCRUB_NODE) * MAX_SCRUB_ITEMS); 135 136 pScrubber->pGpu = pGpu; 137 138 { 139 NV_PRINTF(LEVEL_INFO, "Starting to init CeUtils for scrubber.\n"); 140 NV0050_ALLOCATION_PARAMETERS ceUtilsAllocParams = {0}; 141 142 if (memmgrUseVasForCeMemoryOps(pMemoryManager)) 143 { 144 ceUtilsAllocParams.flags = DRF_DEF(0050, _CEUTILS_FLAGS, _VIRTUAL_MODE, _TRUE); 145 } 146 147 if (bMIGInUse) 148 { 149 KERNEL_MIG_GPU_INSTANCE *pCurrKernelMIGGPUInstance; 150 151 FOR_EACH_VALID_GPU_INSTANCE(pGpu, pKernelMIGManager, pCurrKernelMIGGPUInstance) 152 { 153 if (pCurrKernelMIGGPUInstance->pMemoryPartitionHeap == pHeap) 154 { 155 pKernelMIGGPUInstance = pCurrKernelMIGGPUInstance; 156 break; 157 } 158 } 159 FOR_EACH_VALID_GPU_INSTANCE_END(); 160 } 161 162 NV_ASSERT_OK_OR_GOTO(status, objCreate(&pScrubber->pCeUtils, pHeap, CeUtils, pGpu, pKernelMIGGPUInstance, &ceUtilsAllocParams), destroyscrublist); 163 NV_ASSERT_OK_OR_GOTO(status, pmaRegMemScrub(pPma, pScrubber), destroyscrublist); 164 } 165 166 return status; 167 168 destroyscrublist: 169 portMemFree(pScrubber->pScrubList); 170 171 deinitmutex: 172 portSyncMutexDestroy(pScrubber->pScrubberMutex); 173 174 freemutex: 175 portMemFree(pScrubber->pScrubberMutex); 176 pScrubber->pScrubberMutex = NULL; 177 178 error: 179 portMemFree(pScrubber); 180 return status; 181 } 182 183 static NvBool 184 _isScrubWorkPending( 185 OBJMEMSCRUB *pScrubber 186 ) 187 { 188 NvBool workPending = NV_FALSE; 189 190 if (pScrubber->bVgpuScrubberEnabled) 191 { 192 if (pScrubber->lastSubmittedWorkId != pScrubber->vgpuScrubBuffRing.pScrubBuffRingHeader->lastSWSemaphoreDone) 193 workPending = NV_TRUE; 194 } 195 else 196 { 197 if (pScrubber->lastSubmittedWorkId != ceutilsUpdateProgress(pScrubber->pCeUtils)) 198 workPending = NV_TRUE; 199 } 200 return workPending; 201 } 202 203 /** 204 * Destructs the scrubber 205 * 1. De-registers the scrubber from the PMA object 206 * 2. Free the scrubber list and scrubber lock 207 * 208 * @param[in] pGpu OBJGPU pointer 209 * @param[in] pHeap Heap object pointer 210 * @param[in] pScrubber OBJMEMSCRUB pointer 211 * 212 */ 213 void 214 scrubberDestruct 215 ( 216 OBJGPU *pGpu, 217 Heap *pHeap, 218 OBJMEMSCRUB *pScrubber 219 ) 220 { 221 PMA *pPma = NULL; 222 PSCRUB_NODE pPmaScrubList = NULL; 223 NvU64 count = 0; 224 NV_STATUS status = NV_OK; 225 226 if (pHeap == NULL) 227 { 228 return; 229 } 230 pPma = &pHeap->pmaObject; 231 232 if (pScrubber == NULL) 233 return; 234 235 pmaUnregMemScrub(pPma); 236 portSyncMutexAcquire(pScrubber->pScrubberMutex); 237 238 if (!API_GPU_IN_RESET_SANITY_CHECK(pGpu)) 239 { 240 RMTIMEOUT timeout; 241 gpuSetTimeout(pGpu, GPU_TIMEOUT_DEFAULT, &timeout, 0); 242 243 while (_isScrubWorkPending(pScrubber)) 244 { 245 // just wait till it finishes 246 // Since the default RM Timeout is violated by this, added this for FMODEL 247 if (!IS_FMODEL(pGpu)) 248 { 249 if (gpuCheckTimeout(pGpu, &timeout) == NV_ERR_TIMEOUT) 250 { 251 NV_PRINTF(LEVEL_FATAL, 252 " Timed out when waiting for the scrub to complete the pending work .\n"); 253 DBG_BREAKPOINT(); 254 break; 255 } 256 } 257 } 258 } 259 260 // check for the completed scrub work items 261 NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(status, _scrubCheckLocked(pScrubber, &pPmaScrubList, &count)); 262 263 // Make sure all scrubbed pages are returned to PMA 264 if (count > 0) 265 pmaClearScrubbedPages(pPma, pPmaScrubList, count); 266 267 portMemFree(pPmaScrubList); 268 269 portMemFree(pScrubber->pScrubList); 270 { 271 objDelete(pScrubber->pCeUtils); 272 } 273 274 portSyncMutexRelease(pScrubber->pScrubberMutex); 275 portSyncMutexDestroy(pScrubber->pScrubberMutex); 276 portMemFree(pScrubber->pScrubberMutex); 277 portMemFree(pScrubber); 278 } 279 280 static NV_STATUS 281 _scrubCheckLocked 282 ( 283 OBJMEMSCRUB *pScrubber, 284 PSCRUB_NODE *ppList, 285 NvU64 *pSize 286 ) 287 { 288 NV_STATUS status = NV_OK; 289 PSCRUB_NODE pList = NULL; 290 NvLength itemsToSave = 0; 291 NvU64 currentCompletedId = 0; 292 293 *ppList = NULL; 294 *pSize = 0; 295 currentCompletedId = _scrubCheckProgress(pScrubber); 296 297 itemsToSave = (NvLength)(currentCompletedId - pScrubber->lastSeenIdByClient); 298 299 NV_ASSERT(itemsToSave <= MAX_SCRUB_ITEMS); 300 301 if(itemsToSave == 0) 302 goto exit; 303 304 pList = (PSCRUB_NODE)portMemAllocNonPaged(itemsToSave * sizeof(SCRUB_NODE)); 305 if (pList == NULL) 306 { 307 status = NV_ERR_INSUFFICIENT_RESOURCES; 308 goto exit; 309 } 310 portMemSet(pList, 0, sizeof(SCRUB_NODE) * itemsToSave); 311 312 _scrubCopyListItems(pScrubber, pList, itemsToSave); 313 314 exit: 315 *ppList = pList; 316 *pSize = itemsToSave; 317 return status; 318 } 319 320 /** 321 * This function checks for the completed scrub work items, 322 * and populates the SCRUB_NODE in the array. 323 * @param[in] pScrubber OBJMEMSCRUB pointer 324 * @param[out] ppList SCRUB_NODE double pointer 325 * @param[out] pSize NvU64 pointer 326 * @returns NV_OK on success, 327 * NV_ERR_INSUFFICIENT_RESOURCES when the list allocation fails. 328 */ 329 330 NV_STATUS 331 scrubCheck 332 ( 333 OBJMEMSCRUB *pScrubber, 334 PSCRUB_NODE *ppList, 335 NvU64 *pSize 336 ) 337 { 338 NV_STATUS status; 339 portSyncMutexAcquire(pScrubber->pScrubberMutex); 340 status = _scrubCheckLocked(pScrubber, ppList, pSize); 341 portSyncMutexRelease(pScrubber->pScrubberMutex); 342 return status; 343 } 344 345 /** 346 * This function submits work to the memory scrubber. 347 * This function interface is changed to return a list of scrubbed pages to the 348 * client, since the scrubber work list resources are limited, if the submission 349 * page count is more than scrubber list resources the completed scrubbed pages 350 * are saved in the list and the submission progresses. 351 * 352 * @param[in] pScrubber OBJMEMSCRUB pointer 353 * @param[in] chunkSize NvU64 size of each page 354 * @param[in] pPages NvU64 array of base address 355 * @param[in] pageCount NvU64 number of pages 356 * @param[out] ppList SCRUB_NODE double pointer to hand off the list 357 * @param[out] pSize NvU64 pointer to store the size 358 * 359 * @returns NV_OK on success, NV_ERR_GENERIC on HW Failure 360 */ 361 NV_STATUS 362 scrubSubmitPages 363 ( 364 OBJMEMSCRUB *pScrubber, 365 NvU64 chunkSize, 366 NvU64 *pPages, 367 NvU64 pageCount, 368 PSCRUB_NODE *ppList, 369 NvU64 *pSize 370 ) 371 { 372 NvU64 curPagesSaved = 0; 373 PSCRUB_NODE pScrubList = NULL; 374 NvLength pagesToScrubCheck = 0; 375 NvU64 totalSubmitted = 0; 376 NvU64 numFinished = 0; 377 NvU64 freeEntriesInList = 0; 378 NvU64 scrubCount = 0; 379 NvU64 numPagesToScrub = pageCount; 380 NV_STATUS status = NV_OK; 381 382 portSyncMutexAcquire(pScrubber->pScrubberMutex); 383 *pSize = 0; 384 *ppList = pScrubList; 385 386 NV_PRINTF(LEVEL_INFO, "submitting pages, pageCount:%llx\n", pageCount); 387 388 freeEntriesInList = _scrubGetFreeEntries(pScrubber); 389 if (freeEntriesInList < pageCount) 390 { 391 pScrubList = (PSCRUB_NODE) 392 portMemAllocNonPaged((NvLength)(sizeof(SCRUB_NODE) * (pageCount - freeEntriesInList))); 393 394 if (pScrubList == NULL) 395 { 396 status = NV_ERR_NO_MEMORY; 397 goto cleanup; 398 } 399 400 while (freeEntriesInList < pageCount) 401 { 402 if (pageCount > MAX_SCRUB_ITEMS) 403 { 404 pagesToScrubCheck = (NvLength)(MAX_SCRUB_ITEMS - freeEntriesInList); 405 scrubCount = MAX_SCRUB_ITEMS; 406 } 407 else 408 { 409 pagesToScrubCheck = (NvLength)(pageCount - freeEntriesInList); 410 scrubCount = pageCount; 411 } 412 413 numFinished = _scrubCheckAndSubmit(pScrubber, chunkSize, &pPages[totalSubmitted], 414 scrubCount, &pScrubList[curPagesSaved], 415 pagesToScrubCheck); 416 417 pageCount -= numFinished; 418 curPagesSaved += pagesToScrubCheck; 419 totalSubmitted += numFinished; 420 freeEntriesInList = _scrubGetFreeEntries(pScrubber); 421 } 422 423 *ppList = pScrubList; 424 *pSize = curPagesSaved; 425 } 426 else 427 { 428 totalSubmitted = _scrubCheckAndSubmit(pScrubber, chunkSize, pPages, 429 pageCount, NULL, 430 0); 431 *ppList = NULL; 432 *pSize = 0; 433 } 434 435 cleanup: 436 portSyncMutexRelease(pScrubber->pScrubberMutex); 437 438 NV_CHECK_OK_OR_RETURN(LEVEL_INFO, status); 439 440 if (totalSubmitted == numPagesToScrub) 441 { 442 status = NV_OK; 443 } 444 else 445 { 446 NV_PRINTF(LEVEL_FATAL, "totalSubmitted :%llx != pageCount: %llx\n", 447 totalSubmitted, pageCount); 448 DBG_BREAKPOINT(); 449 status = NV_ERR_GENERIC; 450 } 451 452 return status; 453 } 454 455 /** 456 * This function waits for the memory scrubber to wait for the scrubbing of 457 * pages within the range [pagesStart, pagesEnd] for the for the array of pages 458 * of size pageCount 459 * 460 * @param[in] pScrubber OBJMEMSCRUB pointer 461 * @param[in] chunkSize NvU64 size of each page 462 * @param[in] pPages NvU64 pointer to store the base address 463 * @param[in] pageCount NvU64 number of pages in the array 464 * 465 * @returns NV_OK 466 */ 467 468 NV_STATUS 469 scrubWaitPages 470 ( 471 OBJMEMSCRUB *pScrubber, 472 NvU64 chunkSize, 473 NvU64 *pPages, 474 NvU32 pageCount 475 ) 476 { 477 478 NvU32 iter = 0; 479 NV_STATUS status = NV_OK; 480 481 portSyncMutexAcquire(pScrubber->pScrubberMutex); 482 for (iter = 0; iter < pageCount; iter++) 483 { 484 _waitForPayload(pScrubber, pPages[iter], (pPages[iter] + chunkSize - 1)); 485 } 486 portSyncMutexRelease(pScrubber->pScrubberMutex); 487 return status; 488 489 } 490 491 /** 492 * This function waits for the scrubber to finish scrubbing enough items 493 * to have numPages fully scrubbed and then saves the work items to the list 494 * passed to the client. 495 * 496 * @param[in] pScrubber OBJMEMSCRUB pointer 497 * @param[in] numPages the number of pages we should wait to be scrubbed 498 * @param[in] pageSize the page size 499 * @param[out] ppList SCRUB_NODE double pointer to return the saved list pointer 500 * @param[out] pSize NvU64 pointer to return the size of saved work. 501 * 502 * @returns NV_OK if at least one work is pending in the scrubber list 503 * NV_ERR_NO_MEMORY when no work is pending in the scrubber list 504 */ 505 506 NV_STATUS 507 scrubCheckAndWaitForSize 508 ( 509 OBJMEMSCRUB *pScrubber, 510 NvU64 numPages, 511 NvU64 pageSize, 512 PSCRUB_NODE *ppList, 513 NvU64 *pSize 514 ) 515 { 516 PSCRUB_NODE pList = NULL; 517 NV_STATUS status = NV_OK; 518 NvLength totalItems = 0; 519 portSyncMutexAcquire(pScrubber->pScrubberMutex); 520 totalItems = (NvLength)pScrubber->scrubListSize; 521 *pSize = 0; 522 *ppList = pList; 523 524 NvLength startIdx = pScrubber->lastSeenIdByClient; 525 NvU64 totalScrubbedPages = 0; 526 NvLength requiredItemsToSave = 0; 527 528 for (; requiredItemsToSave < totalItems && totalScrubbedPages <= numPages; requiredItemsToSave++) { 529 totalScrubbedPages += (pScrubber->pScrubList[(startIdx + requiredItemsToSave) % MAX_SCRUB_ITEMS].size / pageSize); 530 } 531 532 if (requiredItemsToSave != 0) { 533 pList = (PSCRUB_NODE) portMemAllocNonPaged(sizeof(SCRUB_NODE) * requiredItemsToSave); 534 if (pList == NULL) 535 { 536 status = NV_ERR_INSUFFICIENT_RESOURCES; 537 goto exit; 538 } 539 540 _scrubWaitAndSave(pScrubber, pList, requiredItemsToSave); 541 } 542 else { 543 // since there is no scrub remaining, its upto the user about how to handle that. 544 status = NV_ERR_NO_MEMORY; 545 } 546 547 *pSize = (NvU64)requiredItemsToSave; 548 *ppList = pList; 549 550 exit: 551 portSyncMutexRelease(pScrubber->pScrubberMutex); 552 return status; 553 } 554 555 /** 556 * helper function to copy elements from scrub list to the temporary list to 557 * return to the caller. 558 * @param[in] pScrubber OBJMEMSCRUB pointer 559 * @param[in] SCRUB_NODE pointer to copy the element 560 * @param[in] NvLength number of elements to copy 561 * 562 */ 563 564 static void 565 _scrubCopyListItems 566 ( 567 OBJMEMSCRUB *pScrubber, 568 PSCRUB_NODE pList, 569 NvLength itemsToSave 570 ) 571 { 572 NvLength startIdx = pScrubber->lastSeenIdByClient%MAX_SCRUB_ITEMS; 573 NvLength endIdx = (pScrubber->lastSeenIdByClient + itemsToSave)% 574 MAX_SCRUB_ITEMS; 575 576 NV_ASSERT(pList != NULL); 577 NV_ASSERT(itemsToSave <= MAX_SCRUB_ITEMS); 578 579 if (startIdx < endIdx) 580 { 581 portMemCopy(pList, 582 sizeof(SCRUB_NODE) * itemsToSave, 583 &pScrubber->pScrubList[startIdx], 584 sizeof(SCRUB_NODE) * itemsToSave); 585 portMemSet(&pScrubber->pScrubList[startIdx], 0, sizeof(SCRUB_NODE) * itemsToSave); 586 } 587 else 588 { 589 NvLength itemsFromStartToLastItem = (NvLength)(MAX_SCRUB_ITEMS - startIdx); 590 591 // copy from startIdx to (MAX_SCRUB_ITEMS -1) idx 592 portMemCopy(pList, 593 sizeof(SCRUB_NODE) * itemsFromStartToLastItem, 594 &pScrubber->pScrubList[startIdx], 595 sizeof(SCRUB_NODE) * itemsFromStartToLastItem); 596 portMemSet(&pScrubber->pScrubList[startIdx], 0, sizeof(SCRUB_NODE) * itemsFromStartToLastItem); 597 598 // now copy from from 0 to endIdx 599 portMemCopy(&pList[itemsFromStartToLastItem], 600 sizeof(SCRUB_NODE) * endIdx, 601 &pScrubber->pScrubList[0], 602 sizeof(SCRUB_NODE) * endIdx); 603 604 portMemSet(&pScrubber->pScrubList[0], 0, sizeof(SCRUB_NODE) * endIdx); 605 } 606 607 pScrubber->lastSeenIdByClient += itemsToSave; 608 pScrubber->scrubListSize -= itemsToSave; 609 NV_ASSERT(_scrubGetFreeEntries(pScrubber) <= MAX_SCRUB_ITEMS); 610 } 611 612 /* This function is used to check and submit work items always within the 613 * available / maximum scrub list size. 614 * 615 * @param[in] pScrubber OBJMEMSCRUB pointer 616 * @param[in] chunkSize size of each page 617 * @param[in] pPages Array of base address 618 * @param[in] pageCount number of pages in the array 619 * @param[in] pList pointer will store the return check array 620 * @returns the number of work successfully submitted, else 0 621 */ 622 static NvU64 623 _scrubCheckAndSubmit 624 ( 625 OBJMEMSCRUB *pScrubber, 626 NvU64 chunkSize, 627 NvU64 *pPages, 628 NvU64 pageCount, 629 PSCRUB_NODE pList, 630 NvLength pagesToScrubCheck 631 ) 632 { 633 NvU64 iter = 0; 634 NvU64 newId; 635 NV_STATUS status; 636 637 if (pList == NULL && pagesToScrubCheck != 0) 638 { 639 NV_PRINTF(LEVEL_ERROR, 640 "pages need to be saved off, but stash list is invalid\n"); 641 goto exit; 642 } 643 644 _scrubWaitAndSave(pScrubber, pList, pagesToScrubCheck); 645 646 for (iter = 0; iter < pageCount; iter++) 647 { 648 newId = pScrubber->lastSubmittedWorkId + 1; 649 650 NV_PRINTF(LEVEL_INFO, 651 "Submitting work, Id: %llx, base: %llx, size: %llx\n", 652 newId, pPages[iter], chunkSize); 653 654 { 655 status =_scrubMemory(pScrubber, pPages[iter], chunkSize, NV_MEMORY_DEFAULT, 656 (NvU32)newId); 657 } 658 659 if(status != NV_OK) 660 { 661 NV_PRINTF(LEVEL_ERROR, "Failing because the work dint submit.\n"); 662 goto exit; 663 } 664 _scrubAddWorkToList(pScrubber, pPages[iter], chunkSize, newId); 665 _scrubCheckProgress(pScrubber); 666 } 667 668 return iter; 669 exit: 670 return 0; 671 672 } 673 674 /** 675 * helper function to return the free space in scrub list 676 */ 677 static NvU64 678 _scrubGetFreeEntries 679 ( 680 OBJMEMSCRUB *pScrubber 681 ) 682 { 683 return MAX_SCRUB_ITEMS - pScrubber->scrubListSize; 684 } 685 686 /** 687 * helper function to return the max semaphore id that we need to wait for 688 * array of scrub works 689 * 690 * @returns 0, if no entry in list matched the base& end 691 */ 692 static NvU64 693 _searchScrubList 694 ( 695 OBJMEMSCRUB *pScrubber, 696 RmPhysAddr base, 697 RmPhysAddr end 698 ) 699 { 700 NvU64 tempLastSeenIdByClient = pScrubber->lastSeenIdByClient; 701 NvU64 lastSubmittedWorkId = pScrubber->lastSubmittedWorkId; 702 NvU64 id = 0; 703 NvU64 maxId = 0; 704 RmPhysAddr blockStart = 0; 705 RmPhysAddr blockEnd = 0; 706 707 // 708 // we need not check for lastSubmittedWorkId, since lastSubmittedWorkId is always one more than 709 // the lastSubmittedWorkIdx. 710 // 711 while (tempLastSeenIdByClient != lastSubmittedWorkId) 712 { 713 blockStart = pScrubber->pScrubList[tempLastSeenIdByClient%MAX_SCRUB_ITEMS].base; 714 blockEnd = pScrubber->pScrubList[tempLastSeenIdByClient%MAX_SCRUB_ITEMS].base + 715 pScrubber->pScrubList[tempLastSeenIdByClient%MAX_SCRUB_ITEMS].size - 1; 716 717 // Check whether the page ranges overlap 718 if ( !(blockStart > end || blockEnd < base) ) 719 { 720 id = pScrubber->pScrubList[tempLastSeenIdByClient%MAX_SCRUB_ITEMS].id; 721 maxId = (id > maxId) ? id : maxId; 722 } 723 tempLastSeenIdByClient++; 724 } 725 return maxId; 726 } 727 728 729 /** 730 * helper function which waits for a particular submission to complete and 731 * copies the completed work items from scrub list to temporary list 732 * 733 */ 734 735 static void 736 _scrubWaitAndSave 737 ( 738 OBJMEMSCRUB *pScrubber, 739 PSCRUB_NODE pList, 740 NvLength itemsToSave 741 ) 742 { 743 NvU64 currentCompletedId = 0; 744 745 if (itemsToSave == 0) 746 return; 747 748 currentCompletedId = _scrubCheckProgress(pScrubber); 749 750 while (currentCompletedId < (pScrubber->lastSeenIdByClient + itemsToSave)) 751 { 752 { 753 ceutilsServiceInterrupts(pScrubber->pCeUtils); 754 } 755 currentCompletedId = _scrubCheckProgress(pScrubber); 756 } 757 758 _scrubCopyListItems(pScrubber, pList, itemsToSave); 759 } 760 761 762 /** 763 * helper function to find and wait for a specific work to complete 764 */ 765 static void 766 _waitForPayload 767 ( 768 OBJMEMSCRUB *pScrubber, 769 RmPhysAddr base, 770 RmPhysAddr end 771 ) 772 { 773 NvU64 idToWait; 774 775 //We need to look up in the range between [lastSeenIdByClient, lastSubmittedWorkId] 776 idToWait = _searchScrubList(pScrubber, base, end); 777 778 if (idToWait == 0) 779 { 780 return; 781 } 782 783 // Loop will break out, when the semaphore is equal to payload 784 while (_scrubCheckProgress(pScrubber) < idToWait) 785 { 786 portUtilSpin(); 787 } 788 } 789 790 /** 791 * helper function to add a work to the scrub list 792 */ 793 static void 794 _scrubAddWorkToList 795 ( 796 OBJMEMSCRUB *pScrubber, 797 RmPhysAddr base, 798 NvU64 size, 799 NvU64 newId 800 ) 801 { 802 //since the Id works from [1,4k] range, the Idx in which it writes in 1 lesser 803 NvU32 idx = (newId-1) % MAX_SCRUB_ITEMS; 804 805 /* 806 * since this function is called after making sure that there is space 807 * available in the list, no check is needed 808 */ 809 NV_ASSERT(pScrubber->pScrubList[idx].id == 0); 810 pScrubber->pScrubList[idx].base = base; 811 pScrubber->pScrubList[idx].size = size; 812 pScrubber->pScrubList[idx].id = newId; 813 814 pScrubber->lastSubmittedWorkId = newId; 815 pScrubber->scrubListSize++; 816 NV_ASSERT(_scrubGetFreeEntries(pScrubber) <= MAX_SCRUB_ITEMS); 817 } 818 819 820 821 /** 822 * Scrubber uses 64 bit index to track the work submitted. But HW supports 823 * only 32 bit semaphore. The current completed Id is calculated here, based 824 * on the lastSeenIdByClient and current HW semaphore value. 825 * 826 * @returns Current Completed 64 bit ID 827 */ 828 static NvU64 829 _scrubCheckProgress 830 ( 831 OBJMEMSCRUB *pScrubber 832 ) 833 { 834 NvU32 hwCurrentCompletedId; 835 NvU64 lastSWSemaphoreDone; 836 837 NV_ASSERT(pScrubber != NULL); 838 839 if (pScrubber->bVgpuScrubberEnabled) 840 { 841 hwCurrentCompletedId = pScrubber->vgpuScrubBuffRing.pScrubBuffRingHeader->lastSWSemaphoreDone; 842 lastSWSemaphoreDone = pScrubber->lastSWSemaphoreDone; 843 844 if (hwCurrentCompletedId == (NvU32)lastSWSemaphoreDone) 845 return lastSWSemaphoreDone; 846 847 // check for wrap around case. Increment the upper 32 bits 848 if (hwCurrentCompletedId < (NvU32)lastSWSemaphoreDone) 849 { 850 lastSWSemaphoreDone += 0x100000000ULL; 851 } 852 853 // update lower 32 bits 854 lastSWSemaphoreDone &= 0xFFFFFFFF00000000ULL; 855 lastSWSemaphoreDone |= (NvU64)hwCurrentCompletedId; 856 857 } 858 else 859 { 860 lastSWSemaphoreDone = ceutilsUpdateProgress(pScrubber->pCeUtils); 861 } 862 863 pScrubber->lastSWSemaphoreDone = lastSWSemaphoreDone; 864 865 return lastSWSemaphoreDone; 866 } 867 868 869 /** Single function to memset a surface mapped by GPU. This interface supports 870 both sysmem and vidmem surface, since it uses CE to memset a surface. 871 The user is notified by releasing semaphore with value "payload" 872 */ 873 static NV_STATUS 874 _scrubMemory 875 ( 876 OBJMEMSCRUB *pScrubber, 877 RmPhysAddr base, 878 NvU64 size, 879 NvU32 dstCpuCacheAttrib, 880 NvU32 payload 881 ) 882 { 883 NV_STATUS status = NV_OK; 884 MEMORY_DESCRIPTOR *pMemDesc = NULL; 885 CEUTILS_MEMSET_PARAMS memsetParams = {0}; 886 887 status = memdescCreate(&pMemDesc, pScrubber->pGpu, size, 0, NV_TRUE, 888 ADDR_FBMEM, dstCpuCacheAttrib, MEMDESC_FLAGS_NONE); 889 NV_ASSERT_OR_RETURN(status == NV_OK, status); 890 891 memdescDescribe(pMemDesc, ADDR_FBMEM, base, size); 892 893 memsetParams.pMemDesc = pMemDesc; 894 memsetParams.length = size; 895 memsetParams.flags = NV0050_CTRL_MEMSET_FLAGS_ASYNC | NV0050_CTRL_MEMSET_FLAGS_PIPELINED; 896 897 status = ceutilsMemset(pScrubber->pCeUtils, &memsetParams); 898 if (status == NV_OK) 899 { 900 pScrubber->lastSubmittedWorkId = memsetParams.submittedWorkId; 901 } 902 903 memdescDestroy(pMemDesc); 904 return status; 905 } 906