1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 /********************************* DMA Manager *****************************\
25 *                                                                           *
26 *   Method notifications are handled in this module.  DMA report and OS     *
27 *   action are dealt with on a per-object basis.                            *
28 *                                                                           *
29 ****************************************************************************/
30 
31 #include "core/core.h"
32 #include "gpu/gpu.h"
33 #include "gpu/mem_mgr/virt_mem_allocator.h"
34 #include "gpu/mem_mgr/virt_mem_allocator_common.h"
35 #include "gpu/mem_mgr/context_dma.h"
36 #include "os/os.h"
37 #include "objtmr.h"
38 #include "gpu/device/device.h"
39 #include "gpu/bus/kern_bus.h"
40 #include "gpu/mem_mgr/mem_mgr.h"
41 
42 //---------------------------------------------------------------------------
43 //
44 //  Notification completion.
45 //
46 //---------------------------------------------------------------------------
47 
48 void notifyMethodComplete
49 (
50     OBJGPU   *pGpu,
51     ChannelDescendant *pObject,
52     NvU32     Offset,
53     NvV32     Data,
54     NvU32     info32,
55     NvU16     info16,
56     NV_STATUS CompletionStatus
57 )
58 {
59     if (pObject->bNotifyTrigger)
60     {
61         pObject->bNotifyTrigger = NV_FALSE;
62 
63         //
64         // Do any OS specified action related to this notification.
65         //
66         if (pObject->notifyAction)
67         {
68             PEVENTNOTIFICATION pEventNotifications = inotifyGetNotificationList(staticCast(pObject, INotifier));
69             notifyEvents(pGpu, pEventNotifications, 0, Offset, Data, CompletionStatus, pObject->notifyAction);
70         }
71     }
72 }
73 
74 static NV_STATUS notifyWriteNotifier
75 (
76     OBJGPU    *pGpu,
77     ContextDma *NotifyXlate,
78     NvV32      Info32,
79     NvV16      Info16,
80     NV_STATUS  CompletionStatus,
81     NvU64      Offset,
82     NvBool     TimeSupplied,
83     NvU64      Time
84 )
85 {
86     NV_STATUS status;
87     NOTIFICATION *pNotifyBuffer;
88 
89     //
90     // Fill in the notification structure.
91     //
92     status = ctxdmaGetKernelVA( NotifyXlate, Offset, sizeof(*pNotifyBuffer),
93         (void **)&(pNotifyBuffer),
94         gpumgrGetSubDeviceInstanceFromGpu(gpumgrGetParentGPU(pGpu)));
95 
96     if (status != NV_OK)
97     {
98         return status;
99     }
100 
101     notifyFillNOTIFICATION(pGpu, pNotifyBuffer, Info32, Info16,
102                            CompletionStatus, TimeSupplied, Time);
103     return status;
104 }
105 
106 void
107 notifyFillNOTIFICATION
108 (
109     OBJGPU       *pGpu,
110     NOTIFICATION *pNotifyBuffer,
111     NvV32         Info32,
112     NvV16         Info16,
113     NV_STATUS     CompletionStatus,
114     NvBool        TimeSupplied,
115     NvU64         Time
116 )
117 {
118     INFO16_STATUS infoStatus;
119     NvU32         TimeHi, TimeLo;
120 
121     if (!TimeSupplied)
122     {
123         OBJTMR *pTmr = GPU_GET_TIMER(pGpu);
124         tmrGetCurrentTime(pTmr, &Time);
125     }
126 
127     TimeLo = NvU64_LO32(Time);
128     TimeHi = NvU64_HI32(Time);
129 
130     //
131     // Since notifiers are not read by the GPU, and only the CPU, these
132     // writes to not need to be flushed. A subsequent CPU read to this data
133     // will be serialized with these writes
134     //
135     MEM_WR32(&pNotifyBuffer->OtherInfo32, Info32);
136     MEM_WR32(&pNotifyBuffer->TimeHi, TimeHi);
137     MEM_WR32(&pNotifyBuffer->TimeLo, TimeLo);
138 
139     //
140     // Combine into 32b write to avoid issues in environments that don't
141     // support 16b writes.  For example, when routing all memory requests
142     // through IFB we are limited to 32b read/writes only.
143     //
144     infoStatus.Info16Status_16.Status = (NvV16) CompletionStatus;
145     infoStatus.Info16Status_16.OtherInfo16 = Info16;
146     MEM_WR32(&pNotifyBuffer->Info16Status.Info16Status_32,
147              infoStatus.Info16Status_32);
148 }
149 
150 void
151 notifyFillNvNotification
152 (
153     OBJGPU         *pGpu,
154     NvNotification *pNotification,
155     NvV32           Info32,
156     NvV16           Info16,
157     NV_STATUS       CompletionStatus,
158     NvBool          TimeSupplied,
159     NvU64           Time
160 )
161 {
162     NvU32 TimeHi, TimeLo;
163 
164     if (!TimeSupplied)
165     {
166         OBJTMR *pTmr = GPU_GET_TIMER(pGpu);
167         tmrGetCurrentTime(pTmr, &Time);
168     }
169 
170     TimeLo = NvU64_LO32(Time);
171     TimeHi = NvU64_HI32(Time);
172 
173     //
174     // Since notifiers are not read by the GPU, and only the CPU, these
175     // writes do not need to be flushed. A subsequent CPU read to this data
176     // will be serialized with these writes
177     //
178     MEM_WR16(&pNotification->info16, Info16);
179     MEM_WR32(&pNotification->info32, Info32);
180     MEM_WR32(&pNotification->timeStamp.nanoseconds[0], TimeHi);
181     MEM_WR32(&pNotification->timeStamp.nanoseconds[1], TimeLo);
182     MEM_WR16(&pNotification->status, CompletionStatus);
183 }
184 
185 NV_STATUS notifyFillNotifier
186 (
187     OBJGPU    *pGpu,
188     ContextDma *NotifyXlate,
189     NvV32      Info32,
190     NvV16      Info16,
191     NV_STATUS  CompletionStatus
192 )
193 {
194     return notifyWriteNotifier(pGpu, NotifyXlate, Info32,
195                                Info16, CompletionStatus,
196                                0, NV_FALSE, 0);
197 }
198 
199 NV_STATUS notifyFillNotifierOffsetTimestamp
200 (
201     OBJGPU    *pGpu,
202     ContextDma *NotifyXlate,
203     NvV32      Info32,
204     NvV16      Info16,
205     NV_STATUS  CompletionStatus,
206     NvU64      Offset,
207     NvU64      Time
208 )
209 {
210     return notifyWriteNotifier(pGpu, NotifyXlate, Info32,
211                                Info16, CompletionStatus,
212                                Offset,
213                                NV_TRUE, Time);
214 }
215 
216 NV_STATUS notifyFillNotifierOffset
217 (
218     OBJGPU    *pGpu,
219     ContextDma *NotifyXlate,
220     NvV32      Info32,
221     NvV16      Info16,
222     NV_STATUS  CompletionStatus,
223     NvU64      Offset
224 )
225 {
226     return notifyWriteNotifier(pGpu, NotifyXlate, Info32,
227                                Info16, CompletionStatus,
228                                Offset,
229                                NV_FALSE, 0);
230 }
231 
232 NV_STATUS notifyFillNotifierArrayTimestamp
233 (
234     OBJGPU    *pGpu,
235     ContextDma *NotifyXlate,
236     NvV32      Info32,
237     NvV16      Info16,
238     NV_STATUS  CompletionStatus,
239     NvU32      Index,
240     NvU64      Time
241 )
242 {
243     return notifyWriteNotifier(pGpu, NotifyXlate, Info32,
244                                Info16, CompletionStatus,
245                                Index * sizeof(NOTIFICATION),
246                                NV_TRUE, Time);
247 }
248 
249 NV_STATUS notifyFillNotifierArray
250 (
251     OBJGPU    *pGpu,
252     ContextDma *NotifyXlate,
253     NvV32      Info32,
254     NvV16      Info16,
255     NV_STATUS  CompletionStatus,
256     NvU32      Index
257 )
258 {
259     return notifyWriteNotifier(pGpu, NotifyXlate, Info32,
260                                Info16, CompletionStatus,
261                                Index * sizeof(NOTIFICATION),
262                                NV_FALSE, 0);
263 }
264 
265 /*
266  * @brief fills notifier at GPU VA base + index with given info,
267  * time and completion status
268  *
269  * Looks up dma memory mapping with given GPU VA and performs writes.
270  * Notifier write is skipped when CPU kernel mapping is missing.
271  *
272  * @param[in] pGpu              OBJGPU pointer
273  * @param[in] hClient           NvU32 client handle
274  * @param[in] hMemoryCtx        Handle of a memory object to which NotifyGPUVABase belongs
275  * @param[in] NotifyGPUVABase   64b GPU VA base address of semaphore
276  * @param[in] Info32            32b info part
277  * @param[in] Info16            16b info part
278  * @param[in] CompletionStatus  NV_STATUS value to write to notifier status
279  * @param[in] Index             index of notifier in notifier array
280  * @param[in] Time              64b time stamp
281  *
282  * @return NV_ERR_INVALID_ADDRESS on wrong GPU VA address or out of bound index,
283  *         NV_OK on success
284  *
285  */
286 NV_STATUS notifyFillNotifierGPUVATimestamp
287 (
288     OBJGPU    *pGpu,
289     RsClient  *pClient,
290     NvHandle   hMemoryCtx,
291     NvU64      NotifyGPUVABase,
292     NvV32      Info32,
293     NvV16      Info16,
294     NV_STATUS  CompletionStatus,
295     NvU32      Index,
296     NvU64      Time
297 )
298 {
299     NvU64                 notifyGPUVA;
300     NvBool                bFound;
301     CLI_DMA_MAPPING_INFO *pDmaMappingInfo;
302     NvU64                 offset;
303     NvU32                 subdeviceInstance;
304     NOTIFICATION         *pNotifier;
305     Device               *pDevice;
306     NV_STATUS             status;
307 
308     status = deviceGetByGpu(pClient, pGpu, NV_TRUE, &pDevice);
309     if (status != NV_OK)
310         return status;
311 
312     notifyGPUVA = NotifyGPUVABase + (Index * sizeof(NOTIFICATION));
313 
314     // Memory context is required for mapping lookup
315     bFound = CliGetDmaMappingInfo(pClient,
316                                   RES_GET_HANDLE(pDevice),
317                                   hMemoryCtx,
318                                   notifyGPUVA,
319                                   gpumgrGetDeviceGpuMask(pGpu->deviceInstance),
320                                   &pDmaMappingInfo);
321     if (!bFound)
322     {
323         NV_PRINTF(LEVEL_ERROR, "Can't find mapping; notifier not written\n");
324         return NV_ERR_INVALID_ADDRESS;
325     }
326 
327     offset = notifyGPUVA - pDmaMappingInfo->DmaOffset;
328     if ((offset + sizeof(NOTIFICATION)) > pDmaMappingInfo->pMemDesc->Size)
329     {
330         NV_PRINTF(LEVEL_ERROR,
331                   "offset+size doesn't fit into mapping; notifier not written\n");
332         return NV_ERR_INVALID_ADDRESS;
333     }
334 
335     //
336     // Set idx to default position in the dma mapped address array
337     //
338     subdeviceInstance = gpumgrGetSubDeviceInstanceFromGpu(gpumgrGetParentGPU(pGpu));
339 
340     SLI_LOOP_START(SLI_LOOP_FLAGS_NONE)
341 
342     if (IsSLIEnabled(pGpu) &&
343         (memdescGetAddressSpace(pDmaMappingInfo->pMemDesc) == ADDR_FBMEM))
344     {
345         //
346         // If SLI and it is vidmem, replace idx with appropriate SLI index
347         // otherwise, this just stays the default value.
348         //
349         subdeviceInstance = gpumgrGetSubDeviceInstanceFromGpu(pGpu);
350     }
351 
352     if (!pDmaMappingInfo->KernelVAddr[subdeviceInstance])
353     {
354         NV_PRINTF(LEVEL_ERROR, "KernelVAddr==NULL; notifier not written\n");
355     }
356     else
357     {
358         pNotifier = (PNOTIFICATION)((NvU8*)pDmaMappingInfo->KernelVAddr[subdeviceInstance] + offset);
359 
360         notifyFillNOTIFICATION(pGpu, pNotifier, Info32, Info16,
361                                CompletionStatus, NV_TRUE, Time);
362     }
363 
364     SLI_LOOP_END
365 
366     return NV_OK;
367 }
368 
369 /*
370  * @brief fills notifier at GPU VA base + index with current time, given info,
371  * and completion status
372  *
373  * Use this function to fill notifier through BAR1 when you have GPU VA.
374  *
375  * Wrapper for notifyFillNotifierGPUVATimestamp.
376  * Gets current time and routes data to notifyFillNotifierGPUVATimestamp
377  *
378  * @param[in] pGpu              OBJGPU pointer
379  * @param[in] hClient           NvU32 client handle
380  * @param[in] hMemoryCtx        Handle of a memory object to which NotifyGPUVABase belongs
381  * @param[in] NotifyGPUVABase   64b GPU VA base address of semaphore
382  * @param[in] Info32            32b info part
383  * @param[in] Info16            16b info part
384  * @param[in] CompletionStatus  NV_STATUS value to write to notifier status
385  * @param[in] Index             index of notifier in notifier array
386  * @param[in] Time              64b time stamp
387  *
388  * @return status of notifyFillNotifierGPUVATimestamp
389  */
390 NV_STATUS notifyFillNotifierGPUVA
391 (
392     OBJGPU    *pGpu,
393     RsClient  *pClient,
394     NvHandle   hMemoryCtx,
395     NvU64      NotifyGPUVABase,
396     NvV32      Info32,
397     NvV16      Info16,
398     NV_STATUS  CompletionStatus,
399     NvU32      Index
400 )
401 {
402     OBJTMR *pTmr = GPU_GET_TIMER(pGpu);
403     NvU64   Time;
404 
405     tmrGetCurrentTime(pTmr, &Time);
406 
407     return notifyFillNotifierGPUVATimestamp(pGpu,
408                                             pClient,
409                                             hMemoryCtx,
410                                             NotifyGPUVABase,
411                                             Info32,
412                                             Info16,
413                                             CompletionStatus,
414                                             Index,
415                                             Time);
416 }
417 
418 /*
419  * @brief fills notifiers by given memory info and index with given time, info,
420  * and completion status
421  *
422  * Use this function to fill notifier through BAR2 when you have memory info.
423  *
424 
425  * @param[in] pGpu              OBJGPU pointer
426  * @param[in] hClient           NvU32 client handle
427  * @param[in] NotifyGPUVABase   64b GPU VA base address of semaphore
428  * @param[in] Info32            32b info part
429  * @param[in] Info16            16b info part
430  * @param[in] CompletionStatus  NV_STATUS value to write to notifier status
431  * @param[in] Index             index of notifier in notifier array
432  *
433  * @return NV_ERR_GENERIC if RM aperture mapping failed.
434  */
435 NV_STATUS notifyFillNotifierMemoryTimestamp
436 (
437     OBJGPU       *pGpu,
438     Memory       *pMemory,
439     NvV32         Info32,
440     NvV16         Info16,
441     NV_STATUS     CompletionStatus,
442     NvU32         Index,
443     NvU64         Time
444 )
445 {
446     NvNotification * pDebugNotifier = NULL;
447     MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu);
448     KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu);
449     TRANSFER_SURFACE surf = {0};
450 
451     //
452     // Check if there's already a CPU mapping we can use. If not, attempt to
453     // map the notifier, which may fail if we're in a context where we can't
454     // create mappings.
455     //
456     pDebugNotifier = (NvNotification *)((NvUPtr)pMemory->KernelVAddr);
457     if (pDebugNotifier == NULL)
458     {
459         surf.pMemDesc = pMemory->pMemDesc;
460         surf.offset = Index * sizeof(NvNotification);
461 
462         pDebugNotifier =
463             (NvNotification *) memmgrMemBeginTransfer(pMemoryManager, &surf,
464                                                       sizeof(NvNotification),
465                                                       TRANSFER_FLAGS_SHADOW_ALLOC);
466         NV_ASSERT_OR_RETURN(pDebugNotifier != NULL, NV_ERR_INVALID_STATE);
467     }
468     else
469     {
470         //
471         // If a CPU pointer has been passed by caller ensure that the notifier
472         // is in sysmem or in case it in vidmem, BAR access to the same is not
473         // blocked (for HCC)
474         //
475         NV_ASSERT_OR_RETURN(
476             memdescGetAddressSpace(pMemory->pMemDesc) == ADDR_SYSMEM ||
477             !kbusIsBarAccessBlocked(pKernelBus), NV_ERR_INVALID_ARGUMENT);
478         pDebugNotifier = &pDebugNotifier[Index];
479     }
480 
481     notifyFillNvNotification(pGpu, pDebugNotifier, Info32, Info16,
482                              CompletionStatus, NV_TRUE, Time);
483 
484     if (pMemory->KernelVAddr == NvP64_NULL)
485     {
486         memmgrMemEndTransfer(pMemoryManager, &surf, sizeof(NvNotification), 0);
487     }
488 
489     return NV_OK;
490 }
491 
492 /*
493  * @brief fills notifiers by given memory info and index with current time,
494  * info and completion status.
495  *
496  * Use this function to fill notifier through BAR2 when you have memory info.
497  *
498  * Current time wrapper around notifyFillNotifierMemoryTimestamp.
499  *
500  * @param[in] pGpu              OBJGPU pointer
501  * @param[in] hClient           NvU32 client handle
502  * @param[in] NotifyGPUVABase   64b GPU VA base address of semaphore
503  * @param[in] Info32            32b info part
504  * @param[in] Info16            16b info part
505  * @param[in] CompletionStatus  NV_STATUS value to write to notifier status
506  * @param[in] Index             index of notifier in notifier array
507  *
508  * @return status of notifyFillNotifierMemoryTimestamp
509  */
510 NV_STATUS notifyFillNotifierMemory
511 (
512     OBJGPU    *pGpu,
513     Memory    *pMemory,
514     NvV32      Info32,
515     NvV16      Info16,
516     NV_STATUS  CompletionStatus,
517     NvU32      Index
518 )
519 {
520     OBJTMR *pTmr = GPU_GET_TIMER(pGpu);
521     NvU64   Time;
522 
523     tmrGetCurrentTime(pTmr, &Time);
524 
525     return notifyFillNotifierMemoryTimestamp(pGpu,
526                                              pMemory,
527                                              Info32,
528                                              Info16,
529                                              CompletionStatus,
530                                              Index,
531                                              Time);
532 
533 }
534 
535 /*
536  * @brief fill semaphore structure at GPU VA base given time and release value
537  *
538  * Looks up dma memory mapping with given GPU VA and performs writes.
539  * Semaphore write is skipped when CPU kernel mapping is missing.
540  *
541  * @param[in] pGpu                  OBJGPU pointer
542  * @param[in] hClient               NvU32 client handle
543  * @param[in] SemaphoreGPUVABase    64b GPU VA base address of semaphore
544  * @param[in] ReleaseValue          NvU32 value to write to semaphore upon release
545  * @param[in] Index                 index of semaphore in semaphore array
546  * @param[in] Time                  64b time stamp
547  *
548  * @return NV_ERR_INVALID_ADDRESS on wrong GPU VA address or out of bound index,
549  *         NV_OK on success
550  *
551  */
552 NV_STATUS semaphoreFillGPUVATimestamp
553 (
554     OBJGPU    *pGpu,
555     RsClient  *pClient,
556     NvHandle   hMemCtx,
557     NvU64      SemaphoreGPUVABase,
558     NvV32      ReleaseValue,
559     NvU32      Index,
560     NvBool     bBroadcast,
561     NvU64      Time
562 )
563 {
564     NvU64                 semaphoreGPUVA;
565     NvU64                 semaphoreGPUVAOffset;
566     CLI_DMA_MAPPING_INFO *pDmaMappingInfo;
567     NvU64                 offset;
568     NvU32                 timeHi, timeLo;
569     NvU32                 subdeviceInstance;
570     NvGpuSemaphore       *pSemaphore;
571     NvBool                bBcState = gpumgrGetBcEnabledStatus(pGpu);
572     NvBool                bFound;
573     Device               *pDevice;
574     NV_STATUS             status;
575 
576     status = deviceGetByGpu(pClient, pGpu, NV_TRUE, &pDevice);
577     if (status != NV_OK)
578         return status;
579 
580     if (!portSafeMulU64((NvU64) Index,
581                         (NvU64) sizeof(NvGpuSemaphore),
582                         &semaphoreGPUVAOffset) ||
583         !portSafeAddU64(SemaphoreGPUVABase,
584                         semaphoreGPUVAOffset,
585                         &semaphoreGPUVA))
586     {
587         return NV_ERR_INVALID_ARGUMENT;
588     }
589 
590     bFound = CliGetDmaMappingInfo(pClient,
591                                   RES_GET_HANDLE(pDevice),
592                                   hMemCtx,
593                                   semaphoreGPUVA,
594                                   gpumgrGetDeviceGpuMask(pGpu->deviceInstance),
595                                   &pDmaMappingInfo);
596     if (!bFound)
597     {
598         NV_PRINTF(LEVEL_ERROR, "Can't find mapping; semaphore not released\n");
599         return NV_ERR_INVALID_ADDRESS;
600     }
601 
602     offset = semaphoreGPUVA - pDmaMappingInfo->DmaOffset;
603     if ((offset + sizeof(NvGpuSemaphore)) > pDmaMappingInfo->pMemDesc->Size)
604     {
605         NV_PRINTF(LEVEL_ERROR,
606                   "offset+size doesn't fit into mapping; semaphore not released\n");
607         return NV_ERR_INVALID_ADDRESS;
608     }
609 
610     timeLo = NvU64_LO32(Time);
611     timeHi = NvU64_HI32(Time);
612 
613     //
614     // Set idx to default position in the dma mapped address array
615     //
616     subdeviceInstance = gpumgrGetSubDeviceInstanceFromGpu(gpumgrGetParentGPU(pGpu));
617 
618     osFlushCpuWriteCombineBuffer();
619 
620     gpumgrSetBcEnabledStatus(pGpu, bBroadcast);
621     SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY)
622 
623     if (IsSLIEnabled(pGpu) &&
624         (memdescGetAddressSpace(pDmaMappingInfo->pMemDesc) == ADDR_FBMEM))
625     {
626         //
627         // If SLI and it is vidmem, replace idx with appropriate SLI index
628         // otherwise, this just stays the default value.
629         //
630         subdeviceInstance = gpumgrGetSubDeviceInstanceFromGpu(pGpu);
631     }
632 
633     if (!pDmaMappingInfo->KernelVAddr[subdeviceInstance])
634     {
635         NV_PRINTF(LEVEL_ERROR, "KernelVAddr==NULL; semaphore not released\n");
636     }
637     else
638     {
639         pSemaphore = (NvGpuSemaphore*)((NvU8*)pDmaMappingInfo->KernelVAddr[subdeviceInstance] + offset);
640 
641         MEM_WR32(&(pSemaphore->timeStamp.nanoseconds[0]), timeLo);
642         MEM_WR32(&(pSemaphore->timeStamp.nanoseconds[1]), timeHi);
643         MEM_WR32(&(pSemaphore->data[0]), ReleaseValue);
644     }
645 
646     SLI_LOOP_END
647 
648     gpumgrSetBcEnabledStatus(pGpu, bBcState);
649     osFlushCpuWriteCombineBuffer();
650 
651     return NV_OK;
652 }
653 
654 /*
655  * @brief fill semaphore at GPU VA with given release value and current time stamp
656  *
657  * Use this function to fill Semaphore through BAR1 when you have GPU VA.
658  *
659  * Wrapper for semaphore handling. Gets current time and routes data to
660  * semaphoreFillGPUVATimestamp.
661  *
662  * @param[in] pGpu                  OBJGPU pointer
663  * @param[in] hClient               NvU32 client handle
664  * @param[in] SemaphoreGPUVABase    64b GPU VA base address of semaphore
665  * @param[in] ReleaseValue          NvU32 value to write to semaphore upon release
666  * @param[in] Index                 index of semaphore in semaphore array
667  *
668  * @return status of semaphoreFillGPUVATimestamp
669  */
670 NV_STATUS semaphoreFillGPUVA
671 (
672     OBJGPU    *pGpu,
673     RsClient  *pClient,
674     NvHandle   hMemCtx,
675     NvU64      SemaphoreGPUVABase,
676     NvV32      ReleaseValue,
677     NvU32      Index,
678     NvBool     bBroadcast
679 )
680 {
681     OBJTMR *pTmr = GPU_GET_TIMER(pGpu);
682     NvU64   Time;
683 
684     tmrGetCurrentTime(pTmr, &Time);
685 
686     return semaphoreFillGPUVATimestamp(pGpu,
687                                        pClient,
688                                        hMemCtx,
689                                        SemaphoreGPUVABase,
690                                        ReleaseValue,
691                                        Index,
692                                        bBroadcast,
693                                        Time);
694 }
695