1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 /* This file contains push buffer utility functions and declarations */
25 
26 #ifndef __NVIDIA_PUSH_UTILS_H__
27 #define __NVIDIA_PUSH_UTILS_H__
28 
29 #include "nvidia-push-types.h"
30 #include "nvlimits.h"
31 
32 #include "class/cla16f.h"
33 
34 #ifdef __cplusplus
35 extern "C" {
36 #endif
37 
nvPushIsAModel(const NvPushDeviceRec * pDevice)38 static inline NvBool nvPushIsAModel(const NvPushDeviceRec *pDevice)
39 {
40     return FALSE;
41 }
42 
43 
44 /* declare prototypes: */
45 NvBool nvPushCheckChannelError(NvPushChannelPtr pChannel);
46 void nvPushKickoff(NvPushChannelPtr);
47 NvBool nvPushIdleChannelTest(NvPushChannelPtr pChannel, NvU32 timeoutMSec);
48 NvBool nvPushIdleChannel(NvPushChannelPtr);
49 
50 void nvPushWaitForNotifier(
51     NvPushChannelPtr pChannel,
52     NvU32 notifierIndex,
53     NvU32 subdeviceMask,
54     NvBool yield,
55     NvPushImportEvent *pEvent,
56     int id);
57 
58 void nvPushReleaseTimelineSemaphore(
59     NvPushChannelPtr p,
60     void *cpuAddress,
61     NvU64 gpuAddress,
62     NvU64 val);
63 
64 void nvPushAcquireTimelineSemaphore(
65     NvPushChannelPtr p,
66     NvU64 gpuAddress,
67     NvU64 val);
68 
69 NvBool nvPushDecodeMethod(NvU32 header, NvU32 *count);
70 void nvPushSetObject(NvPushChannelPtr p, NvU32 subch, NvU32 *object);
71 void nvPushSetSubdeviceMask(NvPushChannelPtr p, NvU32 mask);
72 void __nvPushMakeRoom(NvPushChannelPtr, NvU32 count);
73 
74 #define NV_PUSH_SUBDEVICE_MASK_PRIMARY 0x00000001
75 #define NV_PUSH_SUBDEVICE_MASK_ALL DRF_MASK(NVA16F_DMA_SET_SUBDEVICE_MASK_VALUE)
76 
77 /*
78  * Evaluates to TRUE if the two subDevMasks are equivalent for the given SLI
79  * device
80  */
nvPushSubDeviceMaskEquiv(const NvPushDeviceRec * pDevice,NvU32 maskA,NvU32 maskB)81 static inline NvBool nvPushSubDeviceMaskEquiv(
82     const NvPushDeviceRec *pDevice,
83     NvU32 maskA,
84     NvU32 maskB)
85 {
86     const NvU32 allSubDevices = (1 << pDevice->numSubDevices) - 1;
87 
88     return (maskA & allSubDevices) == (maskB & allSubDevices);
89 }
90 
91 /* Evaluates to TRUE if subDevMask will write to all of the GPUs */
nvPushSubDeviceMaskAllActive(const NvPushDeviceRec * pDevice,NvU32 subDevMask)92 static inline NvBool nvPushSubDeviceMaskAllActive(
93     const NvPushDeviceRec *pDevice,
94     NvU32 subDevMask)
95 {
96     return nvPushSubDeviceMaskEquiv(pDevice, subDevMask,
97                                     NV_PUSH_SUBDEVICE_MASK_ALL);
98 }
99 
100 #define NV_PUSH_NOTIFIER_INTERNAL_BIT 0x80
101 ct_assert(NV_PUSH_NOTIFIER_INTERNAL_BIT >=
102           NV_CHANNELGPFIFO_NOTIFICATION_TYPE__SIZE_1);
103 #define NV_PUSH_ERROR_NOTIFIER_INDEX \
104             (NV_PUSH_NOTIFIER_INTERNAL_BIT | \
105              NV_CHANNELGPFIFO_NOTIFICATION_TYPE_ERROR)
106 #define NV_PUSH_TOKEN_NOTIFIER_INDEX \
107             (NV_PUSH_NOTIFIER_INTERNAL_BIT | \
108              NV_CHANNELGPFIFO_NOTIFICATION_TYPE_WORK_SUBMIT_TOKEN)
109 
110 /*
111  * Notifiers for use by nvidia-push, not exposed to clients:
112  * NV_CHANNELGPFIFO_NOTIFICATION_TYPE__SIZE_1: defined by RM
113  * NV_MAX_SUBDEVICES: one for each subdevice to track work submission token
114  */
115 #define NV_PUSH_NUM_INTERNAL_NOTIFIERS \
116     (NV_CHANNELGPFIFO_NOTIFICATION_TYPE__SIZE_1 + NV_MAX_SUBDEVICES)
117 
__nvPushGetNotifierRawIndex(const NvPushDeviceRec * pDevice,NvU32 notifierIndex,NvU32 sd)118 static inline NvU32 __nvPushGetNotifierRawIndex(
119     const NvPushDeviceRec *pDevice,
120     NvU32 notifierIndex,
121     NvU32 sd)
122 {
123     if (notifierIndex & NV_PUSH_NOTIFIER_INTERNAL_BIT) {
124         return notifierIndex & ~NV_PUSH_NOTIFIER_INTERNAL_BIT;
125     } else {
126         return (notifierIndex * pDevice->numSubDevices) + sd +
127                 NV_PUSH_NUM_INTERNAL_NOTIFIERS;
128     }
129 }
130 
nvPushGetNotifierCpuAddress(const NvPushChannelRec * pChannel,NvU32 notifierIndex,NvU32 sd)131 static inline NvNotification *nvPushGetNotifierCpuAddress(
132     const NvPushChannelRec *pChannel,
133     NvU32 notifierIndex,
134     NvU32 sd)
135 {
136     const NvU32 rawIndex =
137         __nvPushGetNotifierRawIndex(pChannel->pDevice, notifierIndex, sd);
138 
139     return &pChannel->notifiers.cpuAddress[rawIndex];
140 }
141 
nvPushGetNotifierGpuAddress(const NvPushChannelRec * pChannel,NvU32 notifierIndex,NvU32 sd)142 static inline NvU64 nvPushGetNotifierGpuAddress(
143     const NvPushChannelRec *pChannel,
144     NvU32 notifierIndex,
145     NvU32 sd)
146 {
147     const NvU32 rawIndex =
148         __nvPushGetNotifierRawIndex(pChannel->pDevice, notifierIndex, sd);
149     const size_t offset = rawIndex * sizeof(NvNotification);
150 
151     return pChannel->notifiers.gpuAddress + offset;
152 }
153 
154 
155 extern NvU32 nvPushReadGetOffset(NvPushChannelPtr push_buffer, NvBool minimum);
156 
157 
158 /*!
159  * Make room in the pushbuffer, checking for errors.
160  *
161  * If a channel error occurred, channelErrorOccurred is set to TRUE.
162  * nvPushCheckForRoomAndErrors() is designed to be called just before a
163  * nvPushMethod() with the same size.
164  */
nvPushCheckForRoomAndErrors(NvPushChannelPtr pChannel,NvU32 count)165 static inline void nvPushCheckForRoomAndErrors(
166     NvPushChannelPtr pChannel,
167     NvU32 count)
168 {
169     pChannel->channelErrorOccurred = FALSE;
170 
171     if (pChannel->main.freeDwords < (count + 1)) {
172         __nvPushMakeRoom(pChannel, count + 1);
173     }
174 }
175 
176 #ifdef __cplusplus
177 };
178 #endif
179 
180 #endif /* __NVIDIA_PUSH_UTILS_H__ */
181