1 /*
2 * SPDX-FileCopyrightText: Copyright (c) 2017-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 * SPDX-License-Identifier: MIT
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "core/core.h"
25 #include "gpu/gpu.h"
26 #include "gpu/mmu/kern_gmmu.h"
27 #include "gpu/uvm/uvm.h"
28 #include "gpu/uvm/access_cntr_buffer.h"
29 #include "kernel/gpu/intr/intr.h"
30
31 #include "class/clc365.h"
32 #include "ctrl/ctrlc365.h"
33 #include "published/turing/tu102/dev_access_counter.h"
34 #include "published/turing/tu102/dev_vm.h"
35
36 NvU32
uvmGetRegOffsetAccessCntrBufferPut_TU102(OBJUVM * pUvm,NvU32 accessCounterIndex)37 uvmGetRegOffsetAccessCntrBufferPut_TU102
38 (
39 OBJUVM *pUvm,
40 NvU32 accessCounterIndex
41 )
42 {
43 NV_ASSERT(accessCounterIndex == 0);
44
45 return NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_PUT;
46 }
47
48 NvU32
uvmGetRegOffsetAccessCntrBufferGet_TU102(OBJUVM * pUvm,NvU32 accessCounterIndex)49 uvmGetRegOffsetAccessCntrBufferGet_TU102
50 (
51 OBJUVM *pUvm,
52 NvU32 accessCounterIndex
53 )
54 {
55 NV_ASSERT(accessCounterIndex == 0);
56
57 return NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_GET;
58 }
59
60 NvU32
uvmGetRegOffsetAccessCntrBufferHi_TU102(OBJUVM * pUvm,NvU32 accessCounterIndex)61 uvmGetRegOffsetAccessCntrBufferHi_TU102
62 (
63 OBJUVM *pUvm,
64 NvU32 accessCounterIndex
65 )
66 {
67 NV_ASSERT(accessCounterIndex == 0);
68
69 return NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_HI;
70 }
71
72 NvU32
uvmGetRegOffsetAccessCntrBufferLo_TU102(OBJUVM * pUvm,NvU32 accessCounterIndex)73 uvmGetRegOffsetAccessCntrBufferLo_TU102
74 (
75 OBJUVM *pUvm,
76 NvU32 accessCounterIndex
77 )
78 {
79 NV_ASSERT(accessCounterIndex == 0);
80
81 return NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_LO;
82 }
83
84 NvU32
uvmGetRegOffsetAccessCntrBufferConfig_TU102(OBJUVM * pUvm,NvU32 accessCounterIndex)85 uvmGetRegOffsetAccessCntrBufferConfig_TU102
86 (
87 OBJUVM *pUvm,
88 NvU32 accessCounterIndex
89 )
90 {
91 NV_ASSERT(accessCounterIndex == 0);
92
93 return NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_CONFIG;
94 }
95
96 NvU32
uvmGetRegOffsetAccessCntrBufferInfo_TU102(OBJUVM * pUvm,NvU32 accessCounterIndex)97 uvmGetRegOffsetAccessCntrBufferInfo_TU102
98 (
99 OBJUVM *pUvm,
100 NvU32 accessCounterIndex
101 )
102 {
103 NV_ASSERT(accessCounterIndex == 0);
104
105 return NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_INFO;
106 }
107
108 NvU32
uvmGetRegOffsetAccessCntrBufferSize_TU102(OBJUVM * pUvm,NvU32 accessCounterIndex)109 uvmGetRegOffsetAccessCntrBufferSize_TU102
110 (
111 OBJUVM *pUvm,
112 NvU32 accessCounterIndex
113 )
114 {
115 NV_ASSERT(accessCounterIndex == 0);
116
117 return NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_SIZE;
118 }
119
120 NV_STATUS
uvmReadAccessCntrBufferPutPtr_TU102(OBJGPU * pGpu,OBJUVM * pUvm,NvU32 accessCounterIndex,NvU32 * pPutOffset)121 uvmReadAccessCntrBufferPutPtr_TU102
122 (
123 OBJGPU *pGpu,
124 OBJUVM *pUvm,
125 NvU32 accessCounterIndex,
126 NvU32 *pPutOffset
127 )
128 {
129 *pPutOffset = GPU_VREG_RD32(pGpu, uvmGetRegOffsetAccessCntrBufferPut_HAL(pUvm, accessCounterIndex));
130
131 return NV_OK;
132 }
133
134 NV_STATUS
uvmReadAccessCntrBufferGetPtr_TU102(OBJGPU * pGpu,OBJUVM * pUvm,NvU32 accessCounterIndex,NvU32 * pGetOffset)135 uvmReadAccessCntrBufferGetPtr_TU102
136 (
137 OBJGPU *pGpu,
138 OBJUVM *pUvm,
139 NvU32 accessCounterIndex,
140 NvU32 *pGetOffset
141 )
142 {
143 *pGetOffset = GPU_VREG_RD32(pGpu, uvmGetRegOffsetAccessCntrBufferGet_HAL(pUvm, accessCounterIndex));
144
145 return NV_OK;
146 }
147
148 NV_STATUS
uvmWriteAccessCntrBufferGetPtr_TU102(OBJGPU * pGpu,OBJUVM * pUvm,NvU32 accessCounterIndex,NvU32 getPtrValue)149 uvmWriteAccessCntrBufferGetPtr_TU102
150 (
151 OBJGPU *pGpu,
152 OBJUVM *pUvm,
153 NvU32 accessCounterIndex,
154 NvU32 getPtrValue
155 )
156 {
157 GPU_VREG_WR32(pGpu, uvmGetRegOffsetAccessCntrBufferGet_HAL(pUvm, accessCounterIndex), getPtrValue);
158
159 return NV_OK;
160 }
161
162 NV_STATUS
uvmEnableAccessCntr_TU102(OBJGPU * pGpu,OBJUVM * pUvm,NvU32 accessCounterIndex,NvBool bIsErrorRecovery)163 uvmEnableAccessCntr_TU102
164 (
165 OBJGPU *pGpu,
166 OBJUVM *pUvm,
167 NvU32 accessCounterIndex,
168 NvBool bIsErrorRecovery
169 )
170 {
171 KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu);
172 NvBool bRmOwnsAccessCntr = !!(pKernelGmmu->uvmSharedIntrRmOwnsMask &
173 RM_UVM_SHARED_INTR_MASK_HUB_ACCESS_COUNTER_NOTIFY);
174 //
175 // Do not touch interrupts if in error recovery path
176 // Also disable interrupts if RM does not own the interrupt to prevent race
177 // See bug 2094809 for more details
178 //
179 if (!bRmOwnsAccessCntr)
180 {
181 uvmDisableAccessCntrIntr_HAL(pGpu, pUvm);
182 }
183 else
184 {
185 if (!bIsErrorRecovery)
186 uvmEnableAccessCntrIntr_HAL(pGpu, pUvm, intr_all);
187 }
188
189 uvmProgramAccessCntrBufferEnabled_HAL(pGpu, pUvm, accessCounterIndex, NV_TRUE);
190
191 return NV_OK;
192 }
193
194 NV_STATUS
uvmGetAccessCntrRegisterMappings_TU102(OBJGPU * pGpu,OBJUVM * pUvm,NvU32 accessCounterIndex,NvP64 * pAccessCntrBufferGet,NvP64 * pAccessCntrBufferPut,NvP64 * pAccessCntrBufferFull,NvP64 * pHubIntr,NvP64 * pHubIntrEnSet,NvP64 * pHubIntrEnClear,NvU32 * pAccessCntrMask)195 uvmGetAccessCntrRegisterMappings_TU102
196 (
197 OBJGPU *pGpu,
198 OBJUVM *pUvm,
199 NvU32 accessCounterIndex,
200 NvP64 *pAccessCntrBufferGet,
201 NvP64 *pAccessCntrBufferPut,
202 NvP64 *pAccessCntrBufferFull,
203 NvP64 *pHubIntr,
204 NvP64 *pHubIntrEnSet,
205 NvP64 *pHubIntrEnClear,
206 NvU32 *pAccessCntrMask
207 )
208 {
209 Intr *pIntr = GPU_GET_INTR(pGpu);
210 DEVICE_MAPPING *pMapping = gpuGetDeviceMapping(pGpu, DEVICE_INDEX_GPU, 0);
211 NvP64 bar0Mapping = NV_PTR_TO_NvP64(pMapping->gpuNvAddr);
212 NvU32 intrVector = intrGetVectorFromEngineId(pGpu, pIntr, MC_ENGINE_IDX_ACCESS_CNTR, NV_FALSE);
213 NvU32 leafReg, leafBit;
214
215 leafReg = NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_REG(intrVector);
216 leafBit = NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_BIT(intrVector);
217
218 *pAccessCntrBufferGet = NvP64_PLUS_OFFSET(bar0Mapping, GPU_GET_VREG_OFFSET(pGpu, uvmGetRegOffsetAccessCntrBufferGet_HAL(pUvm, accessCounterIndex)));
219 *pAccessCntrBufferPut = NvP64_PLUS_OFFSET(bar0Mapping, GPU_GET_VREG_OFFSET(pGpu, uvmGetRegOffsetAccessCntrBufferPut_HAL(pUvm, accessCounterIndex)));
220 *pAccessCntrBufferFull = NvP64_PLUS_OFFSET(bar0Mapping,GPU_GET_VREG_OFFSET(pGpu, uvmGetRegOffsetAccessCntrBufferInfo_HAL(pUvm, accessCounterIndex)));
221 *pHubIntr = NvP64_PLUS_OFFSET(bar0Mapping, GPU_GET_VREG_OFFSET(pGpu, NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF(leafReg)));
222 *pHubIntrEnSet = NvP64_PLUS_OFFSET(bar0Mapping, GPU_GET_VREG_OFFSET(pGpu, NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_SET(leafReg)));
223 *pHubIntrEnClear = NvP64_PLUS_OFFSET(bar0Mapping, GPU_GET_VREG_OFFSET(pGpu, NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_CLEAR(leafReg)));
224 *pAccessCntrMask = NVBIT(leafBit);
225
226 return NV_OK;
227 }
228
229 NV_STATUS
uvmReadAccessCntrBufferFullPtr_TU102(OBJGPU * pGpu,OBJUVM * pUvm,NvU32 accessCounterIndex,NvBool * pFullFlag)230 uvmReadAccessCntrBufferFullPtr_TU102
231 (
232 OBJGPU *pGpu,
233 OBJUVM *pUvm,
234 NvU32 accessCounterIndex,
235 NvBool *pFullFlag
236 )
237 {
238 NvU32 info = GPU_VREG_RD32(pGpu, uvmGetRegOffsetAccessCntrBufferInfo_HAL(pUvm, accessCounterIndex));
239
240 *pFullFlag = FLD_TEST_DRF(_VIRTUAL_FUNCTION, _PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_INFO, _FULL, _TRUE, info);
241
242 return NV_OK;
243 }
244
245 NV_STATUS
uvmAccessCntrSetThreshold_TU102(OBJGPU * pGpu,OBJUVM * pUvm,NvU32 accessCounterIndex,NvU32 threshold)246 uvmAccessCntrSetThreshold_TU102
247 (
248 OBJGPU *pGpu,
249 OBJUVM *pUvm,
250 NvU32 accessCounterIndex,
251 NvU32 threshold
252 )
253 {
254 NvU32 regOffsetConfig = uvmGetRegOffsetAccessCntrBufferConfig_HAL(pUvm, accessCounterIndex);
255 NvU32 config = GPU_VREG_RD32(pGpu, regOffsetConfig);
256
257 config = FLD_SET_DRF_NUM(_VIRTUAL_FUNCTION, _PRIV_ACCESS_COUNTER_CONFIG, _THRESHOLD, threshold, config);
258 GPU_VREG_WR32(pGpu, regOffsetConfig, config);
259
260 return NV_OK;
261 }
262
263 // Note: This function returns zero for chips which do not support the access counter.
264 NvU32
uvmGetAccessCounterBufferSize_TU102(OBJGPU * pGpu,OBJUVM * pUvm,NvU32 accessCounterIndex)265 uvmGetAccessCounterBufferSize_TU102
266 (
267 OBJGPU *pGpu,
268 OBJUVM *pUvm,
269 NvU32 accessCounterIndex
270 )
271 {
272 return GPU_VREG_RD32(pGpu, uvmGetRegOffsetAccessCntrBufferSize_HAL(pUvm, accessCounterIndex)) *
273 NV_ACCESS_COUNTER_NOTIFY_BUF_SIZE;
274 }
275
276 NV_STATUS
uvmAccessCntrSetGranularity_TU102(OBJGPU * pGpu,OBJUVM * pUvm,NvU32 accessCounterIndex,ACCESS_CNTR_TYPE accessCntType,NvU32 granularity)277 uvmAccessCntrSetGranularity_TU102
278 (
279 OBJGPU *pGpu,
280 OBJUVM *pUvm,
281 NvU32 accessCounterIndex,
282 ACCESS_CNTR_TYPE accessCntType,
283 NvU32 granularity
284 )
285 {
286 NvU32 regOffsetConfig = uvmGetRegOffsetAccessCntrBufferConfig_HAL(pUvm, accessCounterIndex);
287 NvU32 config = GPU_VREG_RD32(pGpu, regOffsetConfig);
288
289 if (accessCntType == MIMC)
290 {
291 switch(granularity)
292 {
293 case NVC365_CTRL_ACCESS_COUNTER_GRANULARITY_64K:
294 config = FLD_SET_DRF(_VIRTUAL_FUNCTION, _PRIV_ACCESS_COUNTER_CONFIG, _MIMC_GRANULARITY, _64K, config);
295 break;
296 case NVC365_CTRL_ACCESS_COUNTER_GRANULARITY_2M:
297 config = FLD_SET_DRF(_VIRTUAL_FUNCTION, _PRIV_ACCESS_COUNTER_CONFIG, _MIMC_GRANULARITY, _2M, config);
298 break;
299 case NVC365_CTRL_ACCESS_COUNTER_GRANULARITY_16M:
300 config = FLD_SET_DRF(_VIRTUAL_FUNCTION, _PRIV_ACCESS_COUNTER_CONFIG, _MIMC_GRANULARITY, _16M, config);
301 break;
302 case NVC365_CTRL_ACCESS_COUNTER_GRANULARITY_16G:
303 config = FLD_SET_DRF(_VIRTUAL_FUNCTION, _PRIV_ACCESS_COUNTER_CONFIG, _MIMC_GRANULARITY, _16G, config);
304 break;
305 default:
306 return NV_ERR_INVALID_ARGUMENT;
307 }
308 }
309 else if (accessCntType == MOMC)
310 {
311 switch(granularity)
312 {
313 case NVC365_CTRL_ACCESS_COUNTER_GRANULARITY_64K:
314 config = FLD_SET_DRF(_VIRTUAL_FUNCTION, _PRIV_ACCESS_COUNTER_CONFIG, _MOMC_GRANULARITY, _64K, config);
315 break;
316 case NVC365_CTRL_ACCESS_COUNTER_GRANULARITY_2M:
317 config = FLD_SET_DRF(_VIRTUAL_FUNCTION, _PRIV_ACCESS_COUNTER_CONFIG, _MOMC_GRANULARITY, _2M, config);
318 break;
319 case NVC365_CTRL_ACCESS_COUNTER_GRANULARITY_16M:
320 config = FLD_SET_DRF(_VIRTUAL_FUNCTION, _PRIV_ACCESS_COUNTER_CONFIG, _MOMC_GRANULARITY, _16M, config);
321 break;
322 case NVC365_CTRL_ACCESS_COUNTER_GRANULARITY_16G:
323 config = FLD_SET_DRF(_VIRTUAL_FUNCTION, _PRIV_ACCESS_COUNTER_CONFIG, _MOMC_GRANULARITY, _16G, config);
324 break;
325 default:
326 return NV_ERR_INVALID_ARGUMENT;
327 }
328 }
329 else
330 return NV_ERR_INVALID_ARGUMENT;
331
332 GPU_VREG_WR32(pGpu, regOffsetConfig, config);
333
334 return NV_OK;
335 }
336
337 void
uvmProgramWriteAccessCntrBufferAddress_TU102(OBJGPU * pGpu,OBJUVM * pUvm,NvU32 accessCounterIndex,NvU64 addr)338 uvmProgramWriteAccessCntrBufferAddress_TU102
339 (
340 OBJGPU *pGpu,
341 OBJUVM *pUvm,
342 NvU32 accessCounterIndex,
343 NvU64 addr
344 )
345 {
346 GPU_VREG_WR32(pGpu, uvmGetRegOffsetAccessCntrBufferHi_HAL(pUvm, accessCounterIndex), NvU64_HI32(addr));
347 GPU_VREG_WR32(pGpu, uvmGetRegOffsetAccessCntrBufferLo_HAL(pUvm, accessCounterIndex), NvU64_LO32(addr));
348 }
349
350 void
uvmProgramAccessCntrBufferEnabled_TU102(OBJGPU * pGpu,OBJUVM * pUvm,NvU32 accessCounterIndex,NvBool bEn)351 uvmProgramAccessCntrBufferEnabled_TU102
352 (
353 OBJGPU *pGpu,
354 OBJUVM *pUvm,
355 NvU32 accessCounterIndex,
356 NvBool bEn
357 )
358 {
359 NvU32 regOffsetLo = uvmGetRegOffsetAccessCntrBufferLo_HAL(pUvm, accessCounterIndex);
360 NvU32 lo = GPU_VREG_RD32(pGpu, regOffsetLo);
361
362 lo = FLD_SET_DRF_NUM(_VIRTUAL_FUNCTION, _PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_LO, _EN, bEn, lo);
363 GPU_VREG_WR32(pGpu, regOffsetLo, lo);
364 }
365
366 NvBool
uvmIsAccessCntrBufferEnabled_TU102(OBJGPU * pGpu,OBJUVM * pUvm,NvU32 accessCounterIndex)367 uvmIsAccessCntrBufferEnabled_TU102
368 (
369 OBJGPU *pGpu,
370 OBJUVM *pUvm,
371 NvU32 accessCounterIndex
372 )
373 {
374 NvU32 lo = GPU_VREG_RD32(pGpu, uvmGetRegOffsetAccessCntrBufferLo_HAL(pUvm, accessCounterIndex));
375
376 return FLD_TEST_DRF(_VIRTUAL_FUNCTION, _PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_LO, _EN, _TRUE, lo);
377 }
378
379 NvBool
uvmIsAccessCntrBufferPushed_TU102(OBJGPU * pGpu,OBJUVM * pUvm,NvU32 accessCounterIndex)380 uvmIsAccessCntrBufferPushed_TU102
381 (
382 OBJGPU *pGpu,
383 OBJUVM *pUvm,
384 NvU32 accessCounterIndex
385 )
386 {
387 NvU32 info = GPU_VREG_RD32(pGpu, uvmGetRegOffsetAccessCntrBufferInfo_HAL(pUvm, accessCounterIndex));
388
389 return FLD_TEST_DRF(_VIRTUAL_FUNCTION, _PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_INFO, _PUSHED, _TRUE, info);
390 }
391
392 NV_STATUS
uvmEnableAccessCntrIntr_TU102(OBJGPU * pGpu,OBJUVM * pUvm,NvU32 intrType)393 uvmEnableAccessCntrIntr_TU102
394 (
395 OBJGPU *pGpu,
396 OBJUVM *pUvm,
397 NvU32 intrType
398 )
399 {
400 Intr *pIntr = GPU_GET_INTR(pGpu);
401
402 if (intrType == intr_all || intrType == intr_notify)
403 {
404 intrEnableLeaf_HAL(pGpu, pIntr,
405 intrGetVectorFromEngineId(pGpu, pIntr, MC_ENGINE_IDX_ACCESS_CNTR, NV_FALSE));
406 }
407
408 return NV_OK;
409 }
410
411 NV_STATUS
uvmDisableAccessCntrIntr_TU102(OBJGPU * pGpu,OBJUVM * pUvm)412 uvmDisableAccessCntrIntr_TU102
413 (
414 OBJGPU *pGpu,
415 OBJUVM *pUvm
416 )
417 {
418 Intr *pIntr = GPU_GET_INTR(pGpu);
419
420 intrDisableLeaf_HAL(pGpu, pIntr,
421 intrGetVectorFromEngineId(pGpu, pIntr, MC_ENGINE_IDX_ACCESS_CNTR, NV_FALSE));
422 return NV_OK;
423 }
424
425 NV_STATUS
uvmAccessCntrService_TU102(OBJGPU * pGpu,OBJUVM * pUvm)426 uvmAccessCntrService_TU102
427 (
428 OBJGPU *pGpu,
429 OBJUVM *pUvm
430 )
431 {
432 NvU32 i;
433
434 for (i = 0; i < pUvm->accessCounterBufferCount; i++)
435 {
436 AccessCounterBuffer *pAccessCounterBuffer = pUvm->pAccessCounterBuffers[i].pAccessCounterBuffer;
437 EVENTNOTIFICATION **ppEventNotification;
438
439 if (pAccessCounterBuffer == NULL)
440 continue;
441
442 ppEventNotification = inotifyGetNotificationListPtr(staticCast(pAccessCounterBuffer, INotifier));
443
444 if (ppEventNotification == NULL)
445 continue;
446
447 if (pUvm->accessCounterBufferCount > 1)
448 {
449 NvU32 get, put;
450
451 NV_ASSERT_OK_OR_RETURN(uvmReadAccessCntrBufferGetPtr(pGpu, pUvm, pAccessCounterBuffer->accessCounterIndex, &get));
452 NV_ASSERT_OK_OR_RETURN(uvmReadAccessCntrBufferPutPtr(pGpu, pUvm, pAccessCounterBuffer->accessCounterIndex, &put));
453
454 //
455 // We can't know which access counter buffer has the new entries,
456 // so we send out notifications for each non-empty buffer.
457 // This can generate extra notifications when interrupts for different counters
458 // arrive in rapid succession, so client doesn't update get pointer in time.
459 //
460 // We could remember the last known put pointer and compare it, but
461 // theoretically the buffer can wrap around while the interrupt is being handled by client
462 //
463 if (get == put)
464 continue;
465 }
466
467 NV_ASSERT_OK(notifyEvents(pGpu, *ppEventNotification, NVC365_NOTIFIERS_ACCESS_COUNTER,
468 0, 0, NV_OK, NV_OS_WRITE_THEN_AWAKEN));
469 }
470
471 return NV_OK;
472 }
473