1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 #include "rmapi/event_buffer.h"
24 #include "os/os.h"
25 #include "gpu/mem_mgr/mem_desc.h"
26 #include "core/locks.h"
27 #include "gpu/gpu.h"
28 #include "gpu/subdevice/subdevice.h"
29 #include "gpu_mgr/gpu_mgr.h"
30 #include "gpu/mem_mgr/mem_mgr.h"
31 #include "rmapi/rs_utils.h"
32 #include "rmapi/rmapi_utils.h"
33 #include "gpu/mem_mgr/mem_mgr.h"
34 #include "rmapi/rs_utils.h"
35 #include "rmapi/rmapi_utils.h"
36 #include "kernel/gpu/gr/fecs_event_list.h"
37 #include "mem_mgr/no_device_mem.h"
38 #include "class/cl90ce.h"
39 #include "class/cl0040.h"
40 
41 static NV_STATUS _allocAndMapMemory(CALL_CONTEXT *pCallContext, NvP64 pAddress, MEMORY_DESCRIPTOR** ppMemDesc, NvU64 size, NvBool bKernel,
42     NvP64* pKernelAddr, NvP64* pKernelPriv, NvP64* pUserAddr, NvP64* pUserPriv);
43 
44 static void _unmapAndFreeMemory(MEMORY_DESCRIPTOR *pMemDesc, NvBool bKernel, NvP64 kernelAddr,
45     NvP64 kernelPriv, NvP64 userAddr, NvP64 userPriv);
46 
47 NV_STATUS
48 eventbufferConstruct_IMPL
49 (
50     EventBuffer                  *pEventBuffer,
51     CALL_CONTEXT                 *pCallContext,
52     RS_RES_ALLOC_PARAMS_INTERNAL *pParams
53 )
54 {
55     NV_STATUS                         status;
56     NV_EVENT_BUFFER_ALLOC_PARAMETERS *pAllocParams   = pParams->pAllocParams;
57 
58     EVENT_BUFFER_MAP_INFO            *pKernelMap     = &pEventBuffer->kernelMapInfo;
59     EVENT_BUFFER_MAP_INFO            *pClientMap     = &pEventBuffer->clientMapInfo;
60 
61     NvU32                             hClient        = pCallContext->pClient->hClient;
62     NvBool                            bKernel        = (rmclientGetCachedPrivilegeByHandle(hClient) >= RS_PRIV_LEVEL_KERNEL);
63 
64     NvU32                             recordBufferSize;
65     NvP64                             kernelNotificationhandle;
66     Subdevice                        *pSubdevice     = NULL;
67     NvBool                            bInternalAlloc = (pAllocParams->hBufferHeader == 0);
68     NvBool                            bNoDeviceMem   = NV_FALSE;
69     NvBool                            bUsingVgpuStagingBuffer = NV_FALSE;
70     OBJGPU                           *pGpu           = NULL;
71     RsResourceRef                    *pHeaderRef     = NULL;
72     RsResourceRef                    *pRecordRef     = NULL;
73     RsResourceRef                    *pVardataRef    = NULL;
74     NvHandle                          hMapperClient  = 0;
75     NvHandle                          hMapperDevice  = 0;
76 
77     pAllocParams->bufferHeader  = NvP64_NULL;
78     pAllocParams->recordBuffer  = NvP64_NULL;
79     pAllocParams->vardataBuffer = NvP64_NULL;
80 
81     if (bInternalAlloc)
82     {
83         OBJSYS *pSys = SYS_GET_INSTANCE();
84         NvBool bSupported = pSys->getProperty(pSys, PDB_PROP_SYS_INTERNAL_EVENT_BUFFER_ALLOC_ALLOWED);
85         NV_ASSERT_OR_RETURN(bSupported, NV_ERR_NOT_SUPPORTED);
86     }
87     else
88     {
89         NV_ASSERT_OR_RETURN((pAllocParams->hRecordBuffer != 0), NV_ERR_INVALID_ARGUMENT);
90         NV_ASSERT_OR_RETURN(((pAllocParams->vardataBufferSize == 0) ^ (pAllocParams->hVardataBuffer != 0)),
91                           NV_ERR_INVALID_ARGUMENT);
92 
93         status = clientGetResourceRef(pCallContext->pClient, pAllocParams->hBufferHeader, &pHeaderRef);
94         if (status != NV_OK)
95             return status;
96 
97         status = clientGetResourceRef(pCallContext->pClient, pAllocParams->hRecordBuffer, &pRecordRef);
98         if (status != NV_OK)
99             return status;
100 
101         // Avoid mixing and matching backing-memory
102         if (pRecordRef->externalClassId != pHeaderRef->externalClassId)
103             return NV_ERR_INVALID_ARGUMENT;
104 
105         if (pAllocParams->hVardataBuffer != 0)
106         {
107             status = clientGetResourceRef(pCallContext->pClient, pAllocParams->hVardataBuffer, &pVardataRef);
108             if (status != NV_OK)
109                 return status;
110 
111             if (pVardataRef->externalClassId != pHeaderRef->externalClassId)
112                 return NV_ERR_INVALID_ARGUMENT;
113         }
114 
115         bNoDeviceMem = (pRecordRef->externalClassId == NV01_MEMORY_DEVICELESS);
116 
117         if (!bNoDeviceMem)
118         {
119             if (pAllocParams->hSubDevice == 0)
120             {
121                 NV_PRINTF(LEVEL_WARNING, "hSubDevice must be provided.\n");
122                 return NV_ERR_INVALID_ARGUMENT;
123             }
124         }
125     }
126 
127     // bound check inputs and also check for overflow
128     if ((pAllocParams->recordSize == 0) || (pAllocParams->recordCount == 0) ||
129         (!portSafeMulU32(pAllocParams->recordSize, pAllocParams->recordCount, &recordBufferSize)) ||
130         (recordBufferSize / pAllocParams->recordCount != pAllocParams->recordSize) ||
131         (pAllocParams->recordsFreeThreshold > pAllocParams->recordCount) ||
132         (pAllocParams->vardataFreeThreshold > pAllocParams->vardataBufferSize))
133     {
134         return NV_ERR_INVALID_ARGUMENT;
135     }
136 
137     pEventBuffer->hClient = pCallContext->pClient->hClient;
138     pEventBuffer->hSubDevice = pAllocParams->hSubDevice;
139     if (pEventBuffer->hSubDevice)
140     {
141         status = subdeviceGetByHandle(pCallContext->pClient, pEventBuffer->hSubDevice, &pSubdevice);
142         if (status != NV_OK)
143             return NV_ERR_INVALID_OBJECT_HANDLE;
144 
145         pEventBuffer->subDeviceInst = pSubdevice->subDeviceInst;
146         pGpu = GPU_RES_GET_GPU(pSubdevice);
147 
148         if (!bNoDeviceMem)
149         {
150             if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu))
151             {
152                 // Staging buffer should be mapped as read-only in guest RM
153                 bUsingVgpuStagingBuffer = NV_TRUE;
154             }
155 
156             if (!bKernel)
157             {
158                 RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
159                 status = rmapiutilAllocClientAndDeviceHandles(pRmApi,
160                         pGpu,
161                         &pEventBuffer->hInternalClient,
162                         &pEventBuffer->hInternalDevice,
163                         &pEventBuffer->hInternalSubdevice);
164 
165                 if (status != NV_OK)
166                     return status;
167 
168                 hMapperClient = pEventBuffer->hInternalClient;
169                 hMapperDevice = pEventBuffer->hInternalDevice;
170             }
171             else
172             {
173                 hMapperClient = pCallContext->pClient->hClient;
174                 hMapperDevice = RES_GET_PARENT_HANDLE(pSubdevice);
175             }
176         }
177     }
178 
179 
180     //
181     // Use goto cleanup on failure below here
182     //
183 
184     if (!bInternalAlloc)
185     {
186         Memory *pMemory;
187         NvBool bRequireReadOnly = bUsingVgpuStagingBuffer || !bKernel;
188 
189         //
190         // Buffer header
191         //
192         pEventBuffer->pHeader = dynamicCast(pHeaderRef->pResource, Memory);
193         pMemory = pEventBuffer->pHeader;
194         if ((pMemory == NULL) || (bRequireReadOnly && !memdescGetFlag(pMemory->pMemDesc, MEMDESC_FLAGS_USER_READ_ONLY)))
195         {
196             status = NV_ERR_INVALID_ARGUMENT;
197             goto cleanup;
198         }
199 
200         if (pMemory->Length < sizeof(NV_EVENT_BUFFER_HEADER))
201         {
202             status = NV_ERR_INVALID_ARGUMENT;
203             goto cleanup;
204         }
205 
206         if (!bNoDeviceMem)
207         {
208             RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
209             NvHandle hMemory = RES_GET_HANDLE(pMemory);
210 
211             if ((!bKernel) && bUsingVgpuStagingBuffer)
212             {
213                 status = pRmApi->DupObject(pRmApi,
214                                            hMapperClient,
215                                            hMapperDevice,
216                                            &hMemory,
217                                            pCallContext->pClient->hClient,
218                                            hMemory, 0);
219                 if (status != NV_OK)
220                 {
221                     goto cleanup;
222                 }
223             }
224 
225             status = pRmApi->MapToCpu(pRmApi,
226                                       hMapperClient,
227                                       hMapperDevice,
228                                       hMemory,
229                                       0,
230                                       pMemory->Length,
231                                       &pKernelMap->headerAddr,
232                                       bUsingVgpuStagingBuffer
233                                           ? DRF_DEF(OS33, _FLAGS, _ACCESS, _READ_ONLY)
234                                           : DRF_DEF(OS33, _FLAGS, _ACCESS, _READ_WRITE));
235 
236             if (status != NV_OK)
237             {
238                 goto cleanup;
239             }
240         }
241         else
242         {
243             status = memCreateKernelMapping(pMemory, NV_PROTECT_READ_WRITE, NV_TRUE);
244             if (status != NV_OK)
245                 goto cleanup;
246 
247             pKernelMap->headerAddr = pMemory->KernelVAddr;
248         }
249 
250         //
251         // Record buffer
252         //
253         pEventBuffer->pRecord = dynamicCast(pRecordRef->pResource, Memory);
254         pMemory = pEventBuffer->pRecord;
255         if ((pMemory == NULL) || (bRequireReadOnly && !memdescGetFlag(pMemory->pMemDesc, MEMDESC_FLAGS_USER_READ_ONLY)))
256         {
257             status = NV_ERR_INVALID_ARGUMENT;
258             goto cleanup;
259         }
260 
261         if (pMemory->Length < recordBufferSize)
262         {
263             status = NV_ERR_INVALID_ARGUMENT;
264             goto cleanup;
265         }
266 
267         if (!bNoDeviceMem)
268         {
269             RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
270             NvHandle hMemory = RES_GET_HANDLE(pMemory);
271 
272             if ((!bKernel) && bUsingVgpuStagingBuffer)
273             {
274                 status = pRmApi->DupObject(pRmApi,
275                                            hMapperClient,
276                                            hMapperDevice,
277                                            &hMemory,
278                                            pCallContext->pClient->hClient,
279                                            hMemory, 0);
280                 if (status != NV_OK)
281                 {
282                     goto cleanup;
283                 }
284             }
285 
286             status = pRmApi->MapToCpu(pRmApi,
287                                       hMapperClient,
288                                       hMapperDevice,
289                                       hMemory,
290                                       0,
291                                       pMemory->Length,
292                                       &pKernelMap->recordBuffAddr,
293                                       bUsingVgpuStagingBuffer
294                                           ? DRF_DEF(OS33, _FLAGS, _ACCESS, _READ_ONLY)
295                                           : DRF_DEF(OS33, _FLAGS, _ACCESS, _READ_WRITE));
296             if (status != NV_OK)
297             {
298                 goto cleanup;
299             }
300         }
301         else
302         {
303             status = memCreateKernelMapping(pMemory, NV_PROTECT_READ_WRITE, NV_TRUE);
304             pKernelMap->recordBuffAddr = pMemory->KernelVAddr;
305             if (status != NV_OK)
306                 goto cleanup;
307         }
308 
309         //
310         // Vardata buffer [optional]
311         //
312         if (pAllocParams->hVardataBuffer != 0)
313         {
314             pEventBuffer->pVardata = dynamicCast(pVardataRef->pResource, Memory);
315             pMemory = pEventBuffer->pVardata;
316             if ((pMemory == NULL) || (bRequireReadOnly && !memdescGetFlag(pMemory->pMemDesc, MEMDESC_FLAGS_USER_READ_ONLY)))
317             {
318                 status = NV_ERR_INVALID_ARGUMENT;
319                 goto cleanup;
320             }
321 
322             if (pMemory->Length < pAllocParams->vardataBufferSize)
323             {
324                 status = NV_ERR_INVALID_ARGUMENT;
325                 goto cleanup;
326             }
327 
328             if (!bNoDeviceMem)
329             {
330                 RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
331                 NvHandle hMemory = RES_GET_HANDLE(pMemory);
332 
333                 if ((!bKernel) && bUsingVgpuStagingBuffer)
334                 {
335                     status = pRmApi->DupObject(pRmApi,
336                                                hMapperClient,
337                                                hMapperDevice,
338                                                &hMemory,
339                                                pCallContext->pClient->hClient,
340                                                hMemory, 0);
341                     if (status != NV_OK)
342                     {
343                         goto cleanup;
344                     }
345                 }
346 
347                 status = pRmApi->MapToCpu(pRmApi,
348                                           hMapperClient,
349                                           hMapperDevice,
350                                           hMemory,
351                                           0,
352                                           pMemory->Length,
353                                           &pKernelMap->recordBuffAddr,
354                                           bUsingVgpuStagingBuffer
355                                             ? DRF_DEF(OS33, _FLAGS, _ACCESS, _READ_ONLY)
356                                             : DRF_DEF(OS33, _FLAGS, _ACCESS, _READ_WRITE));
357                 if (status != NV_OK)
358                 {
359                     goto cleanup;
360                 }
361             }
362             else
363             {
364                 status = memCreateKernelMapping(pMemory, NV_PROTECT_READ_WRITE, NV_TRUE);
365                 if (status != NV_OK)
366                     goto cleanup;
367             }
368 
369             pKernelMap->vardataBuffAddr = pMemory->KernelVAddr;
370 
371             refAddDependant(pVardataRef, pCallContext->pResourceRef);
372         }
373 
374         refAddDependant(pHeaderRef, pCallContext->pResourceRef);
375         refAddDependant(pRecordRef, pCallContext->pResourceRef);
376     }
377 
378     if (bInternalAlloc)
379     {
380         status = _allocAndMapMemory(pCallContext,
381                                     pAllocParams->bufferHeader,
382                                     &pEventBuffer->pHeaderDesc,
383                                     sizeof(NV_EVENT_BUFFER_HEADER),
384                                     bKernel,
385                                     &pKernelMap->headerAddr,
386                                     &pKernelMap->headerPriv,
387                                     &pClientMap->headerAddr,
388                                     &pClientMap->headerPriv);
389         if (status != NV_OK)
390             goto cleanup;
391 
392         status = _allocAndMapMemory(pCallContext,
393                                     pAllocParams->recordBuffer,
394                                     &pEventBuffer->pRecordBufDesc,
395                                     recordBufferSize,
396                                     bKernel,
397                                     &pKernelMap->recordBuffAddr,
398                                     &pKernelMap->recordBuffPriv,
399                                     &pClientMap->recordBuffAddr,
400                                     &pClientMap->recordBuffPriv);
401         if (status != NV_OK)
402             goto cleanup;
403     }
404 
405     eventBufferInitRecordBuffer(&pEventBuffer->producerInfo,
406                                 KERNEL_POINTER_FROM_NvP64(NV_EVENT_BUFFER_HEADER*, pKernelMap->headerAddr),
407                                 pKernelMap->recordBuffAddr,
408                                 pAllocParams->recordSize,
409                                 pAllocParams->recordCount,
410                                 recordBufferSize,
411                                 pAllocParams->recordsFreeThreshold);
412 
413     // not needed for all events, such as FECS context switch events
414     if (pAllocParams->vardataBufferSize != 0)
415     {
416         if (bInternalAlloc)
417         {
418             status = _allocAndMapMemory(pCallContext,
419                     pAllocParams->vardataBuffer,
420                     &pEventBuffer->pVardataBufDesc,
421                     pAllocParams->vardataBufferSize,
422                     bKernel,
423                     &pKernelMap->vardataBuffAddr,
424                     &pKernelMap->vardataBuffPriv,
425                     &pClientMap->vardataBuffAddr,
426                     &pClientMap->vardataBuffPriv);
427 
428             if (status != NV_OK)
429                 goto cleanup;
430         }
431 
432         eventBufferInitVardataBuffer(&pEventBuffer->producerInfo,
433                                      pKernelMap->vardataBuffAddr,
434                                      pAllocParams->vardataBufferSize,
435                                      pAllocParams->vardataFreeThreshold);
436     }
437 
438     kernelNotificationhandle = (NvP64)pAllocParams->notificationHandle;
439     if (bKernel != NV_TRUE)
440         status = osUserHandleToKernelPtr(pCallContext->pClient->hClient,
441                                          kernelNotificationhandle,
442                                          &kernelNotificationhandle);
443 
444     eventBufferInitNotificationHandle(&pEventBuffer->producerInfo, kernelNotificationhandle);
445     eventBufferSetEnable(&pEventBuffer->producerInfo, NV_FALSE);
446 
447     // return user mode mappings
448     pAllocParams->bufferHeader = pClientMap->headerAddr;
449     pAllocParams->recordBuffer = pClientMap->recordBuffAddr;
450     pAllocParams->vardataBuffer = pClientMap->vardataBuffAddr;
451 
452     return NV_OK;
453 
454 cleanup:
455     eventbufferDestruct_IMPL(pEventBuffer);
456     return status;
457 }
458 
459 void
460 eventbufferDestruct_IMPL
461 (
462     EventBuffer *pEventBuffer
463 )
464 {
465     CALL_CONTEXT          *pCallContext;
466     EVENT_BUFFER_MAP_INFO *pClientMap         = &pEventBuffer->clientMapInfo;
467     EVENT_BUFFER_MAP_INFO *pKernelMap         = &pEventBuffer->kernelMapInfo;
468     NvBool                 bKernel            = rmclientGetCachedPrivilegeByHandle(pEventBuffer->hClient) >= RS_PRIV_LEVEL_KERNEL;
469     void                  *notificationHandle = NvP64_VALUE(pEventBuffer->producerInfo.notificationHandle);
470 
471     resGetFreeParams(staticCast(pEventBuffer, RsResource), &pCallContext, NULL);
472 
473     if (notificationHandle != NULL)
474     {
475         osDereferenceObjectCount(notificationHandle);
476     }
477 
478     fecsRemoveAllBindpoints(pEventBuffer);
479 
480     _unmapAndFreeMemory(pEventBuffer->pHeaderDesc, bKernel, pKernelMap->headerAddr,
481         pKernelMap->headerPriv, pClientMap->headerAddr, pClientMap->headerPriv);
482 
483     _unmapAndFreeMemory(pEventBuffer->pRecordBufDesc, bKernel, pKernelMap->recordBuffAddr,
484         pKernelMap->recordBuffPriv, pClientMap->recordBuffAddr, pClientMap->recordBuffPriv);
485 
486     _unmapAndFreeMemory(pEventBuffer->pVardataBufDesc, bKernel, pKernelMap->vardataBuffAddr,
487         pKernelMap->vardataBuffPriv, pClientMap->vardataBuffAddr, pClientMap->vardataBuffPriv);
488 
489     if (pEventBuffer->hInternalClient != 0)
490     {
491         RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
492         pRmApi->Free(pRmApi, pEventBuffer->hInternalClient, pEventBuffer->hInternalClient);
493     }
494 
495 }
496 
497 NV_STATUS
498 _allocAndMapMemory
499 (
500     CALL_CONTEXT *pCallContext,
501     NvP64 pAddress,
502     MEMORY_DESCRIPTOR** ppMemDesc,
503     NvU64 size,
504     NvBool bKernel,
505     NvP64* pKernelAddr,
506     NvP64* pKernelPriv,
507     NvP64* pUserAddr,
508     NvP64* pUserPriv
509 )
510 {
511     NV_STATUS           status;
512     MEMORY_DESCRIPTOR*  pMemDesc = NULL;
513 
514     NV_ASSERT_OR_RETURN(pAddress == NvP64_NULL, NV_ERR_NOT_SUPPORTED);
515 
516     status = memdescCreate(ppMemDesc, NULL, size, 0, NV_MEMORY_CONTIGUOUS,
517             ADDR_SYSMEM, NV_MEMORY_WRITECOMBINED, MEMDESC_FLAGS_CPU_ONLY);
518     if (status != NV_OK)
519         return status;
520 
521     pMemDesc = *ppMemDesc;
522 
523     status = osAllocPages(pMemDesc);
524     if (status != NV_OK)
525         goto cleanup;
526     pMemDesc->Allocated = 1;
527 
528     // map memory to kernel VA space
529     status = memdescMap(pMemDesc, 0, size, NV_TRUE, NV_PROTECT_READ_WRITE,
530                         pKernelAddr, pKernelPriv);
531     if (status != NV_OK)
532         goto cleanup;
533 
534     portMemSet(NvP64_VALUE(*pKernelAddr), 0, size);
535 
536     // map memory to user VA space
537     status = memdescMap(pMemDesc, 0, size, bKernel, NV_PROTECT_READABLE,
538             pUserAddr, pUserPriv);
539 
540     if (status != NV_OK)
541         goto cleanup;
542 
543     return NV_OK;
544 
545 cleanup:
546     _unmapAndFreeMemory(pMemDesc, bKernel, *pKernelAddr, *pKernelPriv, *pUserAddr, *pUserPriv);
547     return status;
548 }
549 
550 static void
551 _unmapAndFreeMemory
552 (
553     MEMORY_DESCRIPTOR *pMemDesc,
554     NvBool             bKernel,
555     NvP64              kernelAddr,
556     NvP64              kernelPriv,
557     NvP64              userAddr,
558     NvP64              userPriv
559 )
560 {
561     if (pMemDesc == NULL)
562         return;
563 
564     if (userAddr)
565         memdescUnmap(pMemDesc, bKernel, osGetCurrentProcess(), userAddr, userPriv);
566 
567     if (kernelAddr)
568         memdescUnmap(pMemDesc, NV_TRUE, osGetCurrentProcess(), kernelAddr, kernelPriv);
569 
570     memdescFree(pMemDesc);
571     memdescDestroy(pMemDesc);
572 }
573 
574 NV_STATUS
575 eventbuffertBufferCtrlCmdFlush_IMPL
576 (
577     EventBuffer *pEventBuffer
578 )
579 {
580     OBJGPU *pGpu;
581     NvU32 gpuMask = 0;
582     NvU32 gpuIndex = 0;
583     gpumgrGetGpuAttachInfo(NULL, &gpuMask);
584     while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuIndex)) != NULL)
585     {
586         nvEventBufferFecsCallback(pGpu, NULL);
587     }
588     return NV_OK;
589 }
590 
591 NV_STATUS
592 eventbuffertBufferCtrlCmdEnableEvent_IMPL
593 (
594     EventBuffer *pEventBuffer,
595     NV_EVENT_BUFFER_CTRL_CMD_ENABLE_EVENTS_PARAMS *pEnableParams
596 )
597 {
598     GPU_MASK  gpuMask;
599     NV_STATUS status          = NV_OK;
600     NvBool    updateTelemetry = NV_FALSE;
601 
602     if (pEnableParams->flags &
603         ~(NV_EVENT_BUFFER_FLAG_OVERFLOW_POLICY_KEEP_NEWEST|NV_EVENT_BUFFER_FLAG_OVERFLOW_POLICY_KEEP_OLDEST))
604     {
605         return NV_ERR_INVALID_ARGUMENT;
606     }
607 
608     if (pEnableParams->enable && !pEventBuffer->producerInfo.isEnabled)
609     {
610         updateTelemetry = NV_TRUE;
611     }
612 
613     eventBufferSetEnable(&pEventBuffer->producerInfo, pEnableParams->enable);
614     if (pEnableParams->flags & NV_EVENT_BUFFER_FLAG_OVERFLOW_POLICY_KEEP_NEWEST)
615         eventBufferSetKeepNewest(&pEventBuffer->producerInfo, NV_TRUE);
616     else if (pEnableParams->flags & NV_EVENT_BUFFER_FLAG_OVERFLOW_POLICY_KEEP_OLDEST)
617         eventBufferSetKeepNewest(&pEventBuffer->producerInfo, NV_FALSE);
618 
619     // NvTelemetry requires a valid subdevice
620     if (updateTelemetry && pEventBuffer->hSubDevice)
621     {
622         Subdevice *pSubDevice;
623 
624         status = rmGpuGroupLockAcquire(pEventBuffer->subDeviceInst,
625                                        GPU_LOCK_GRP_SUBDEVICE,
626                                        GPUS_LOCK_FLAGS_NONE,
627                                        RM_LOCK_MODULES_GPU, &gpuMask);
628         if (status != NV_OK)
629             return status;
630 
631         status = subdeviceGetByHandle(RES_GET_CLIENT(pEventBuffer),
632                 pEventBuffer->hSubDevice, &pSubDevice);
633         if (status != NV_OK)
634             return status;
635 
636         GPU_RES_SET_THREAD_BC_STATE(pSubDevice);
637 
638         rmGpuGroupLockRelease(gpuMask, GPUS_LOCK_FLAGS_NONE);
639     }
640     return NV_OK;
641 }
642 
643 NV_STATUS
644 eventbuffertBufferCtrlCmdUpdateGet_IMPL
645 (
646     EventBuffer *pEventBuffer,
647     NV_EVENT_BUFFER_CTRL_CMD_UPDATE_GET_PARAMS *pUpdateParams
648 )
649 {
650     EVENT_BUFFER_PRODUCER_INFO *pProducerInfo = &pEventBuffer->producerInfo;
651     NvP64 pVardataBuf = pEventBuffer->kernelMapInfo.vardataBuffAddr;
652 
653     if ((pUpdateParams->recordBufferGet >= eventBufferGetRecordBufferCount(pProducerInfo)) ||
654         (pVardataBuf == NvP64_NULL && pUpdateParams->varDataBufferGet > 0) ||
655         (pVardataBuf != NvP64_NULL && pUpdateParams->varDataBufferGet >= eventBufferGetVardataBufferCount(pProducerInfo)))
656     {
657         return NV_ERR_INVALID_ARGUMENT;
658     }
659 
660     eventBufferUpdateRecordBufferGet(pProducerInfo, pUpdateParams->recordBufferGet);
661     if (pVardataBuf)
662         eventBufferUpdateVardataBufferGet(pProducerInfo, pUpdateParams->varDataBufferGet);
663 
664     pEventBuffer->bNotifyPending = NV_FALSE;
665 
666     return NV_OK;
667 }
668 
669 /*
670  *  eventbuffertBufferCtrlCmdPostTelemetryEvent posts an event to the event buffer for testing purposes.
671  *  Note -- in order to post an event, a handle to the buffer is required.  since the handle is
672  *  only available to the client that created the buffer, one can only post events to buffers that
673  *  it created.  this has been done to limit the ability to post to buffers for testing purposes
674  *  only.  if it is determined that we want to open this up to other callers, then this ctrl call
675  *  should be moved to the 2080 class & adjustments made for acquiring the pGpu based on the
676  *  subdevice handle there.
677  */
678 NV_STATUS
679 eventbuffertBufferCtrlCmdPostTelemetryEvent_IMPL
680 (
681     EventBuffer *pEventBuffer,
682     NV_EVENT_BUFFER_CTRL_CMD_POST_TELEMETRY_EVENT_PARAMS *pPostTelemetryEvent
683 )
684 {
685     return NV_ERR_NOT_SUPPORTED;
686 }
687 
688 NV_STATUS
689 eventBufferAdd(EventBuffer* pEventBuffer, void *pEventData, NvU32 recordType, NvBool *pBNotify, NvP64 *pHandle)
690 {
691     EVENT_BUFFER_PRODUCER_DATA *pProducerData = (EVENT_BUFFER_PRODUCER_DATA*)pEventData;
692     RECORD_BUFFER_INFO *pRBI;
693     NV_EVENT_BUFFER_HEADER *pHeader;
694 
695     if (!pEventBuffer->producerInfo.isEnabled)
696         return NV_WARN_NOTHING_TO_DO;
697 
698     pRBI = &pEventBuffer->producerInfo.recordBuffer;
699     pHeader = pEventBuffer->producerInfo.recordBuffer.pHeader;
700 
701     NV_ASSERT_OR_RETURN(pHeader->recordPut < pRBI->totalRecordCount, NV_ERR_INVALID_STATE);
702 
703     eventBufferProducerAddEvent(&pEventBuffer->producerInfo,
704         recordType, 0, pProducerData);
705 
706     *pBNotify = (!pEventBuffer->bNotifyPending) &&
707                 (eventBufferIsNotifyThresholdMet(&pEventBuffer->producerInfo));
708     *pHandle  = pEventBuffer->producerInfo.notificationHandle;
709     return NV_OK;
710 }
711