1 #ifndef _G_GPU_ACCESS_NVOC_H_
2 #define _G_GPU_ACCESS_NVOC_H_
3 #include "nvoc/runtime.h"
4 
5 #ifdef __cplusplus
6 extern "C" {
7 #endif
8 
9 /*
10  * SPDX-FileCopyrightText: Copyright (c) 2004-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
11  * SPDX-License-Identifier: MIT
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a
14  * copy of this software and associated documentation files (the "Software"),
15  * to deal in the Software without restriction, including without limitation
16  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17  * and/or sell copies of the Software, and to permit persons to whom the
18  * Software is furnished to do so, subject to the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29  * DEALINGS IN THE SOFTWARE.
30  */
31 #include "g_gpu_access_nvoc.h"
32 
33 #ifndef _GPU_ACCESS_H_
34 #define _GPU_ACCESS_H_
35 
36 #include "core/core.h"
37 #include "ioaccess/ioaccess.h"
38 #include "gpu/gpu_device_mapping.h"
39 
40 // Go straight at the memory or hardware.
41 #define MEM_RD08(a) (*(const volatile NvU8  *)(a))
42 #define MEM_RD16(a) (*(const volatile NvU16 *)(a))
43 #define MEM_RD32(a) (*(const volatile NvU32 *)(a))
44 #define MEM_WR08(a, d) do { *(volatile NvU8  *)(a) = (d); } while (0)
45 #define MEM_WR16(a, d) do { *(volatile NvU16 *)(a) = (d); } while (0)
46 #define MEM_WR32(a, d) do { *(volatile NvU32 *)(a) = (d); } while (0)
47 #define MEM_WR64(a, d) do { *(volatile NvU64 *)(a) = (d); } while (0)
48 
49 //
50 // Define the signature of the register filter callback function
51 //
52 // flags can be optionally used for filters to decide whether to actually
53 // touch HW or not. flags should be OR'ed every time a new filter is found. (see objgpu.c)
54 //
55 typedef void (*GpuWriteRegCallback)(OBJGPU *, void *, NvU32 addr, NvU32 val, NvU32 accessSize, NvU32 flags);
56 typedef NvU32 (*GpuReadRegCallback)(OBJGPU *, void *, NvU32 addr, NvU32 accessSize, NvU32 flags);
57 
58 union GPUHWREG
59 {
60     volatile NvV8 Reg008[1];
61     volatile NvV16 Reg016[1];
62     volatile NvV32 Reg032[1];
63 };
64 
65 typedef union  GPUHWREG  GPUHWREG;
66 
67 //
68 // Register filter record
69 //
70 // If REGISTER_FILTER_FLAGS_READ is set, then that means that the base RegRead
71 // function will not read the register, so the provided read callback function
72 // is expected to read the register and return the value.
73 //
74 // If REGISTER_FILTER_FLAGS_WRITE is set, then that means that the base RegWrite
75 // function will not write the register, so the provided callback write function
76 // is expected to write the given value to the register.
77 //
78 // It is an error to specify REGISTER_FILTER_FLAGS_READ and not provide a
79 // read callback function.
80 //
81 // It is an error to specify REGISTER_FILTER_FLAGS_WRITE and not provide a
82 // write callback function.
83 //
84 #define REGISTER_FILTER_FLAGS_READ         (NVBIT(0))
85 #define REGISTER_FILTER_FLAGS_WRITE        (NVBIT(1))
86 // filter is in the list but it is invalid and should be removed
87 #define REGISTER_FILTER_FLAGS_INVALID      (NVBIT(2))
88 
89 #define REGISTER_FILTER_FLAGS_VIRTUAL      (0)
90 #define REGISTER_FILTER_FLAGS_READ_WRITE   (REGISTER_FILTER_FLAGS_READ | REGISTER_FILTER_FLAGS_WRITE)
91 
92 // Do not warn if attempting to add a filter on GSP [CORERM-5356]
93 #define REGISTER_FILTER_FLAGS_NO_GSP_WARNING (NVBIT(3))
94 
95 typedef struct REGISTER_FILTER REGISTER_FILTER;
96 
97 struct REGISTER_FILTER
98 {
99     REGISTER_FILTER            *pNext;           //!< pointer to next filter
100     NvU32                       flags;           //!< attributes of this filter
101     DEVICE_INDEX                devIndex;        //!< filter device
102     NvU32                       devInstance;     //!< filter device instance
103     NvU32                       rangeStart;      //!< filter range start (can overlap)
104     NvU32                       rangeEnd;        //!< filter range end   (can overlap)
105     GpuWriteRegCallback         pWriteCallback;  //!< callback for write
106     GpuReadRegCallback          pReadCallback;   //!< callback for read
107     void                       *pParam;          //!< pointer to param which gets passed to callbacks
108 };
109 
110 typedef struct {
111     REGISTER_FILTER        *pRegFilterList;         // Active filters
112     REGISTER_FILTER        *pRegFilterRecycleList;  // Inactive filters
113     PORT_SPINLOCK *         pRegFilterLock;         // Thread-safe list management
114     NvU32                   regFilterRefCnt;        // Thread-safe list management
115     NvBool                  bRegFilterNeedRemove;   // Thread-safe list garbage collection
116 } DEVICE_REGFILTER_INFO;
117 
118 typedef struct DEVICE_MAPPING
119 {
120     GPUHWREG             *gpuNvAddr;        // CPU Virtual Address
121     RmPhysAddr            gpuNvPAddr;       // Physical Base Address
122     NvU32                 gpuNvLength;      // Length of the Aperture
123     NvU32                 gpuNvSaveLength;
124     NvU32                 gpuDeviceEnum;    // Device ID NV_DEVID_*
125     NvU32                 refCount;         // refCount for the device map.
126     DEVICE_REGFILTER_INFO devRegFilterInfo; // register filter range list
127 } DEVICE_MAPPING;
128 
129 typedef struct
130 {
131     // Pointer to GPU linked to this RegisterAccess object
132     OBJGPU       *pGpu;
133 
134     // HW register access tools
135     GPUHWREG     *gpuFbAddr;
136     GPUHWREG     *gpuInstAddr;
137 
138     // Register access profiling
139     NvU32         regReadCount;
140     NvU32         regWriteCount;
141 } RegisterAccess;
142 
143 /*! Init register IO access path */
144 NV_STATUS regAccessConstruct(RegisterAccess *, OBJGPU *pGpu);
145 
146 /*! Shutdown register IO access path */
147 void regAccessDestruct(RegisterAccess *);
148 
149 /*! Writes to 8 bit register */
150 void regWrite008(RegisterAccess *, DEVICE_INDEX, NvU32, NvU32, NvV8);
151 
152 /*! Writes to 16 bit register */
153 void regWrite016(RegisterAccess *, DEVICE_INDEX, NvU32, NvU32, NvV16);
154 
155 /*! Writes to 32 bit register, with thread state on the stack */
156 void regWrite032(RegisterAccess *, DEVICE_INDEX, NvU32, NvU32, NvV32, THREAD_STATE_NODE *);
157 
158 /*! Unicast register access, with thread state on the stack */
159 void regWrite032Unicast(RegisterAccess *, DEVICE_INDEX, NvU32, NvU32, NvV32, THREAD_STATE_NODE *);
160 
161 /*! Reads from 8 bit register */
162 NvU8 regRead008(RegisterAccess *, DEVICE_INDEX, NvU32, NvU32);
163 
164 /*! Reads from 16 bit register */
165 NvU16 regRead016(RegisterAccess *, DEVICE_INDEX, NvU32, NvU32);
166 
167 /*! Reads from 32 bit register, with thread state on the stack */
168 NvU32 regRead032(RegisterAccess *, DEVICE_INDEX, NvU32, NvU32, THREAD_STATE_NODE *);
169 
170 /*! Reads from 32 bit register and checks bit mask, with thread state on the stack */
171 NvU32 regCheckRead032(RegisterAccess *, NvU32, NvU32, THREAD_STATE_NODE *);
172 
173 /*! Reads 32 bit register and polls bit field for specific value */
174 NV_STATUS regRead032_AndPoll(RegisterAccess *, DEVICE_INDEX, NvU32, NvU32, NvU32);
175 
176 /*! Adds a register filter */
177 NV_STATUS regAddRegisterFilter(RegisterAccess *, NvU32, DEVICE_INDEX, NvU32, NvU32, NvU32, GpuWriteRegCallback, GpuReadRegCallback, void *, REGISTER_FILTER **);
178 
179 /*! Removes register filter */
180 void regRemoveRegisterFilter(RegisterAccess *, REGISTER_FILTER *);
181 
182 /*! Check status of read return value for GPU/bus errors */
183 void regCheckAndLogReadFailure(RegisterAccess *, NvU32 addr, NvU32 mask, NvU32 value);
184 
185 //
186 // GPU register I/O macros.
187 //
188 
189 //
190 // GPU neutral macros typically used for register I/O.
191 //
192 #define GPU_DRF_SHIFT(drf)          ((0?drf) % 32)
193 #define GPU_DRF_MASK(drf)           (0xFFFFFFFF>>(31-((1?drf) % 32)+((0?drf) % 32)))
194 #define GPU_DRF_DEF(d,r,f,c)        ((NV ## d ## r ## f ## c)<<GPU_DRF_SHIFT(NV ## d ## r ## f))
195 #define GPU_DRF_NUM(d,r,f,n)        (((n)&GPU_DRF_MASK(NV ## d ## r ## f))<<GPU_DRF_SHIFT(NV ## d ## r ## f))
196 #define GPU_DRF_VAL(d,r,f,v)        (((v)>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f))
197 #define GPU_DRF_SHIFTMASK(drf)      (GPU_DRF_MASK(drf)<<(GPU_DRF_SHIFT(drf)))
198 #define GPU_DRF_WIDTH(drf)          ((1?drf) - (0?drf) + 1)
199 
200 
201 // Device independent macros
202 // Multiple device instance macros
203 
204 #define REG_INST_RD08(g,dev,inst,a)             regRead008(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a)
205 #define REG_INST_RD16(g,dev,inst,a)             regRead016(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a)
206 #define REG_INST_RD32(g,dev,inst,a)             regRead032(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a, NULL)
207 
208 #define REG_INST_WR08(g,dev,inst,a,v)     regWrite008(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a, v)
209 #define REG_INST_WR16(g,dev,inst,a,v)     regWrite016(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a, v)
210 #define REG_INST_WR32(g,dev,inst,a,v)     regWrite032(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a, v, NULL)
211 #define REG_INST_WR32_UC(g,dev,inst,a,v)  regWrite032Unicast(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a, v, NULL)
212 
213 #define REG_INST_RD32_EX(g,dev,inst,a,t)    regRead032(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a, t)
214 #define REG_INST_WR32_EX(g,dev,inst,a,v,t)  regWrite032(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a, v, t)
215 
216 #define REG_INST_DEVIDX_RD32_EX(g,devidx,inst,a,t)    regRead032(GPU_GET_REGISTER_ACCESS(g), devidx, inst, a, t)
217 #define REG_INST_DEVIDX_WR32_EX(g,devidx,inst,a,v,t)  regWrite032(GPU_GET_REGISTER_ACCESS(g), devidx, inst, a, v, t)
218 
219 // Get the address of a register given the Aperture and offset.
220 #define REG_GET_ADDR(ap, offset) ioaprtGetRegAddr(ap, offset)
221 
222 // GPU macros defined in terms of DEV_ macros
223 #define GPU_REG_RD08(g,a) REG_INST_RD08(g,GPU,0,a)
224 #define GPU_REG_RD16(g,a) REG_INST_RD16(g,GPU,0,a)
225 #define GPU_REG_RD32(g,a) REG_INST_RD32(g,GPU,0,a)
226 #define GPU_CHECK_REG_RD32(g,a,m) regCheckRead032(GPU_GET_REGISTER_ACCESS(g),a,m,NULL)
227 #define GPU_REG_RD32_AND_POLL(g,r,m,v) regRead032_AndPoll(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_GPU, r, m, v)
228 
229 #define GPU_REG_WR08(g,a,v)    REG_INST_WR08(g,GPU,0,a,v)
230 #define GPU_REG_WR16(g,a,v)    REG_INST_WR16(g,GPU,0,a,v)
231 #define GPU_REG_WR32(g,a,v)    REG_INST_WR32(g,GPU,0,a,v)
232 #define GPU_REG_WR32_UC(g,a,v) REG_INST_WR32_UC(g,GPU,0,a,v)
233 
234 // GPU macros for SR-IOV
235 #define GPU_VREG_RD32(g, a)                GPU_REG_RD32(g, g->sriovState.virtualRegPhysOffset + a)
236 #define GPU_VREG_WR32(g, a, v)             GPU_REG_WR32(g, g->sriovState.virtualRegPhysOffset + a, v)
237 #define GPU_VREG_RD32_EX(g,a,t)            REG_INST_RD32_EX(g, GPU, 0, g->sriovState.virtualRegPhysOffset + a, t)
238 #define GPU_VREG_WR32_EX(g,a,v,t)          REG_INST_WR32_EX(g, GPU, 0, g->sriovState.virtualRegPhysOffset + a, v, t)
239 #define GPU_VREG_FLD_WR_DRF_DEF(g,d,r,f,c) GPU_VREG_WR32(g, NV##d##r,(GPU_VREG_RD32(g,NV##d##r)&~(GPU_DRF_MASK(NV##d##r##f)<<GPU_DRF_SHIFT(NV##d##r##f)))|GPU_DRF_DEF(d,r,f,c))
240 #define GPU_VREG_RD_DRF(g,d,r,f)        (((GPU_VREG_RD32(g, NV ## d ## r))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f))
241 
242 #define VREG_INST_RD32(g,dev,inst,a)            regRead032(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, g->sriovState.virtualRegPhysOffset + a, NULL)
243 #define VREG_INST_WR32(g,dev,inst,a,v)          regWrite032(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, g->sriovState.virtualRegPhysOffset + a, v, NULL)
244 #define GPU_VREG_FLD_WR_DRF_NUM(g,d,r,f,n) VREG_INST_WR32(g,GPU,0,NV##d##r,(VREG_INST_RD32(g,GPU,0,NV##d##r)&~(GPU_DRF_MASK(NV##d##r##f)<<GPU_DRF_SHIFT(NV##d##r##f)))|GPU_DRF_NUM(d,r,f,n))
245 
246 #define GPU_VREG_FLD_TEST_DRF_DEF(g,d,r,f,c) (GPU_VREG_RD_DRF(g, d, r, f) == NV##d##r##f##c)
247 
248 #define GPU_GET_VREG_OFFSET(g, a) (g->sriovState.virtualRegPhysOffset + a)
249 
250 #define GPU_VREG_IDX_RD_DRF(g,d,r,i,f)           (((GPU_VREG_RD32(g, NV ## d ## r(i)))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f))
251 #define GPU_VREG_FLD_IDX_WR_DRF_DEF(g,d,r,i,f,c) GPU_VREG_WR32(g, NV##d##r(i),(GPU_VREG_RD32(g,NV##d##r(i))&~(GPU_DRF_MASK(NV##d##r##f)<<GPU_DRF_SHIFT(NV##d##r##f)))|GPU_DRF_DEF(d,r,f,c))
252 #define GPU_VREG_FLD_IDX_WR_DRF_NUM(g,d,r,i,f,n) GPU_VREG_WR32(g, NV##d##r(i),(GPU_VREG_RD32(g,NV##d##r(i))&~(GPU_DRF_MASK(NV##d##r##f)<<GPU_DRF_SHIFT(NV##d##r##f)))|GPU_DRF_NUM(d,r,f,n))
253 
254 
255 #define GPU_REG_RD32_EX(g,a,t)    REG_INST_RD32_EX(g,GPU,0,a,t)
256 #define GPU_REG_WR32_EX(g,a,v,t)  REG_INST_WR32_EX(g,GPU,0,a,v,t)
257 
258 // Uncomment this to enable register access dump in gsp client
259 // #define GPU_REGISTER_ACCESS_DUMP    RMCFG_FEATURE_GSP_CLIENT_RM
260 #ifndef GPU_REGISTER_ACCESS_DUMP
261 #define GPU_REGISTER_ACCESS_DUMP    0
262 #endif
263 
264 #if GPU_REGISTER_ACCESS_DUMP
265 
266 NvU8  gpuRegRd08_dumpinfo(const char *func, const char *addrStr, const char *vreg, OBJGPU *pGpu, NvU32 addr);
267 NvU16 gpuRegRd16_dumpinfo(const char *func, const char *addrStr, const char *vreg, OBJGPU *pGpu, NvU32 addr);
268 NvU32 gpuRegRd32_dumpinfo(const char *func, const char *addrStr, const char *vreg, OBJGPU *pGpu, NvU32 addr);
269 void  gpuRegWr08_dumpinfo(const char *func, const char *addrStr, const char *vreg, OBJGPU *pGpu, NvU32 addr, NvV8 val);
270 void  gpuRegWr16_dumpinfo(const char *func, const char *addrStr, const char *vreg, OBJGPU *pGpu, NvU32 addr, NvV16 val);
271 void  gpuRegWr32_dumpinfo(const char *func, const char *addrStr, const char *vreg, OBJGPU *pGpu, NvU32 addr, NvV32 val);
272 void  gpuRegWr32Uc_dumpinfo(const char *func, const char *addrStr, const char *vreg, OBJGPU *pGpu, NvU32 addr, NvV32 val);
273 
274 #undef GPU_REG_RD08
275 #undef GPU_REG_RD16
276 #undef GPU_REG_RD32
277 #undef GPU_REG_WR08
278 #undef GPU_REG_WR16
279 #undef GPU_REG_WR32
280 #undef GPU_REG_WR32_UC
281 #undef GPU_VREG_RD32
282 #undef GPU_VREG_WR32
283 
284 #define GPU_REG_RD08(g,a)       gpuRegRd08_dumpinfo(__FUNCTION__,#a,"",g,a)
285 #define GPU_REG_RD16(g,a)       gpuRegRd16_dumpinfo(__FUNCTION__,#a,"",g,a)
286 #define GPU_REG_RD32(g,a)       gpuRegRd32_dumpinfo(__FUNCTION__,#a,"",g,a)
287 #define GPU_REG_WR08(g,a,v)     gpuRegWr08_dumpinfo(__FUNCTION__,#a,"",g,a,v)
288 #define GPU_REG_WR16(g,a,v)     gpuRegWr16_dumpinfo(__FUNCTION__,#a,"",g,a,v)
289 #define GPU_REG_WR32(g,a,v)     gpuRegWr32_dumpinfo(__FUNCTION__,#a,"",g,a,v)
290 #define GPU_REG_WR32_UC(g,a,v)  gpuRegWr32Uc_dumpinfo(__FUNCTION__,#a,"",g,a,v)
291 #define GPU_VREG_RD32(g, a)     gpuRegRd32_dumpinfo(__FUNCTION__,#a,"(VREG)",g, g->sriovState.virtualRegPhysOffset + a)
292 #define GPU_VREG_WR32(g, a, v)  gpuRegWr32_dumpinfo(__FUNCTION__,#a,"(VREG)",g, g->sriovState.virtualRegPhysOffset + a, v)
293 
294 #endif // GPU_REGISTER_ACCESS_DUMP
295 
296 //
297 // Macros for register I/O
298 //
299 #define GPU_FLD_WR_DRF_NUM(g,d,r,f,n) REG_INST_WR32(g,GPU,0,NV##d##r,(REG_INST_RD32(g,GPU,0,NV##d##r)&~(GPU_DRF_MASK(NV##d##r##f)<<GPU_DRF_SHIFT(NV##d##r##f)))|GPU_DRF_NUM(d,r,f,n))
300 #define GPU_FLD_WR_DRF_NUM_UC(g,d,r,f,n) GPU_REG_WR32_UC(g, NV##d##r,(GPU_REG_RD32(g,NV##d##r)&~(GPU_DRF_MASK(NV##d##r##f)<<GPU_DRF_SHIFT(NV##d##r##f)))|GPU_DRF_NUM(d,r,f,n))
301 #define GPU_FLD_WR_DRF_DEF(g,d,r,f,c) GPU_REG_WR32(g, NV##d##r,(GPU_REG_RD32(g,NV##d##r)&~(GPU_DRF_MASK(NV##d##r##f)<<GPU_DRF_SHIFT(NV##d##r##f)))|GPU_DRF_DEF(d,r,f,c))
302 #define GPU_REG_RD_DRF(g,d,r,f)       (((GPU_REG_RD32(g, NV ## d ## r))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f))
303 #define GPU_FLD_TEST_DRF_DEF(g,d,r,f,c) (GPU_REG_RD_DRF(g, d, r, f) == NV##d##r##f##c)
304 #define GPU_FLD_TEST_DRF_NUM(g,d,r,f,n) (GPU_REG_RD_DRF(g, d, r, f) == n)
305 #define GPU_FLD_IDX_TEST_DRF_DEF(g,d,r,f,c,i) (GPU_REG_IDX_RD_DRF(g, d, r, i, f) == NV##d##r##f##c)
306 #define GPU_FLD_2IDX_TEST_DRF_DEF(g,d,r,f,c,i,j) (GPU_REG_2IDX_RD_DRF(g, d, r, i, j, f) == NV##d##r##f##c)
307 
308 #define GPU_REG_RD_DRF_EX(g,d,r,f,t)       (((GPU_REG_RD32_EX(g, NV ## d ## r, t))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f))
309 
310 #define GPU_FLD_WR_DRF_NUM_EX(g,d,r,f,n,t) REG_INST_WR32_EX(g,GPU,0,NV##d##r,(REG_INST_RD32_EX(g,GPU,0,NV##d##r,t)&~(GPU_DRF_MASK(NV##d##r##f)<<GPU_DRF_SHIFT(NV##d##r##f)))|GPU_DRF_NUM(d,r,f,n),t)
311 
312 // Read/write a field or entire register of which there are several copies each accessed via an index
313 #define GPU_REG_IDX_WR_DRF_NUM(g,d,r,i,f,n) GPU_REG_WR32(g, NV ## d ## r(i), GPU_DRF_NUM(d,r,f,n))
314 #define GPU_REG_IDX_WR_DRF_DEF(g,d,r,i,f,c) GPU_REG_WR32(g, NV ## d ## r(i), GPU_DRF_DEF(d,r,f,c))
315 #define GPU_FLD_IDX_WR_DRF_NUM(g,d,r,i,f,n) GPU_REG_WR32(g, NV##d##r(i),(GPU_REG_RD32(g,NV##d##r(i))&~(GPU_DRF_MASK(NV##d##r##f)<<GPU_DRF_SHIFT(NV##d##r##f)))|GPU_DRF_NUM(d,r,f,n))
316 #define GPU_FLD_IDX_WR_DRF_DEF(g,d,r,i,f,c) GPU_REG_WR32(g, NV##d##r(i),(GPU_REG_RD32(g,NV##d##r(i))&~(GPU_DRF_MASK(NV##d##r##f)<<GPU_DRF_SHIFT(NV##d##r##f)))|GPU_DRF_DEF(d,r,f,c))
317 #define GPU_REG_IDX_WR_DRF_NUM_UC(g,d,r,i,f,n) GPU_REG_WR32_UC(g, NV ## d ## r(i), GPU_DRF_NUM(d,r,f,n))
318 #define GPU_REG_IDX_WR_DRF_DEF_UC(g,d,r,i,f,c) GPU_REG_WR32_UC(g, NV ## d ## r(i), GPU_DRF_DEF(d,r,f,c))
319 #define GPU_FLD_IDX_WR_DRF_DEF_UC(g,d,r,i,f,c) GPU_REG_WR32_UC(g, NV##d##r(i),(GPU_REG_RD32(g,NV##d##r(i))&~(GPU_DRF_MASK(NV##d##r##f)<<GPU_DRF_SHIFT(NV##d##r##f)))|GPU_DRF_DEF(d,r,f,c))
320 #define GPU_REG_IDX_RD_DRF(g,d,r,i,f)              (((GPU_REG_RD32(g, NV ## d ## r(i)))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f))
321 #define GPU_REG_2IDX_RD_DRF(g,d,r,i,j,f)             (((GPU_REG_RD32(g, NV ## d ## r(i, j)))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f))
322 #define GPU_REG_RD_DRF_IDX(g,d,r,f,i)              (((GPU_REG_RD32(g, NV ## d ## r))>>GPU_DRF_SHIFT(NV ## d ## r ## f(i)))&GPU_DRF_MASK(NV ## d ## r ## f(i)))
323 #define GPU_REG_IDX_OFFSET_RD_DRF(g,d,r,i,o,f)     (((GPU_REG_RD32(g, NV ## d ## r(i,o)))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f))
324 
325 //
326 // Macros that abstract the use of bif object to access GPU bus config registers
327 // This is the preferred set >= NV50
328 //
329 #define GPU_BUS_CFG_RD32(g,r,d)             gpuReadBusConfigReg_HAL(g, r, d)
330 #define GPU_BUS_CFG_WR32(g,r,d)             gpuWriteBusConfigReg_HAL(g, r, d)
331 #define GPU_BUS_CFG_FLD_WR_DRF_DEF(g,x,d,r,f,c) GPU_BUS_CFG_WR32(g, NV##d##r,(x &~(GPU_DRF_MASK(NV##d##r##f)<<GPU_DRF_SHIFT(NV##d##r##f)))|GPU_DRF_DEF(d,r,f,c))
332 #define GPU_BUS_CFG_FLD_WR_DRF_NUM(g,x,d,r,f,n) GPU_BUS_CFG_WR32(g, NV##d##r,(x &~(GPU_DRF_MASK(NV##d##r##f)<<GPU_DRF_SHIFT(NV##d##r##f)))|GPU_DRF_NUM(d,r,f,n))
333 
334 #define GPU_BUS_CFG_RD32_EX(g,r,d,t)        gpuReadBusConfigRegEx_HAL(g, r, d, t)
335 
336 //
337 // Macros that provide access to the config space of functions other than the gpu
338 //
339 #define PCI_FUNCTION_BUS_CFG_RD32(g,f,r,d)            gpuReadFunctionConfigReg_HAL(g, f, r, d)
340 #define PCI_FUNCTION_BUS_CFG_WR32(g,f,r,d)            gpuWriteFunctionConfigReg_HAL(g, f, r, d)
341 #define PCI_FUNCTION_BUS_CFG_FLD_WR_DRF_NUM(g,fn,x,d,r,f,n) gpuWriteFunctionConfigReg_HAL(g, fn, NV##d##r, (x &~(GPU_DRF_MASK(NV##d##r##f)<<GPU_DRF_SHIFT(NV##d##r##f)))|GPU_DRF_NUM(d,r,f,n))
342 #define PCI_FUNCTION_BUS_CFG_FLD_WR_DRF_DEF(g,fn,x,d,r,f,c) gpuWriteFunctionConfigReg_HAL(g, fn, NV##d##r, (x &~(GPU_DRF_MASK(NV##d##r##f)<<GPU_DRF_SHIFT(NV##d##r##f)))|GPU_DRF_DEF(d,r,f,c))
343 
344 #define PCI_FUNCTION_BUS_CFG_WR32_EX(g,f,r,d,t)       gpuWriteFunctionConfigRegEx_HAL(g, f, r, d, t)
345 
346 #define GPU_BUS_CFG_CYCLE_RD32(g,r,d)                  gpuReadBusConfigCycle(g, r, d)
347 #define GPU_BUS_CFG_CYCLE_WR32(g,r,d)                  gpuWriteBusConfigCycle(g, r, d)
348 #define GPU_BUS_CFG_CYCLE_FLD_WR_DRF_DEF(g,x,d,r,f,c)  gpuWriteBusConfigCycle(g, NV##d##r,(x &~(GPU_DRF_MASK(NV##d##r##f)<<GPU_DRF_SHIFT(NV##d##r##f)))|GPU_DRF_DEF(d,r,f,c))
349 
350 //
351 // Instance memory structure access definitions.
352 //
353 // DRF macros (nvmisc.h) should be used when possible instead of these
354 // definitions.
355 //
356 // Key difference is SF variants take structure ## field (2-level nested
357 // namespace), DRF take device ## register ## field (3-level nested
358 // namespace).
359 //
360 // SF variants are primarily used for GPU host memory structures. DRF
361 // should be used for manipulation of most registers
362 //
363 #define SF_INDEX(sf)            ((0?sf)/32)
364 #define SF_OFFSET(sf)           (((0?sf)/32)<<2)
365 #define SF_SHIFT(sf)            ((0?sf)&31)
366 #undef  SF_MASK
367 #define SF_MASK(sf)             (0xFFFFFFFF>>(31-(1?sf)+(0?sf)))
368 #define SF_SHIFTMASK(sf)        (SF_MASK(sf) << SF_SHIFT(sf))
369 #define SF_DEF(s,f,c)           ((NV ## s ## f ## c)<<SF_SHIFT(NV ## s ## f))
370 #define SF_IDX_DEF(s,f,c,i)     ((NV ## s ## f ## c)<<SF_SHIFT(NV ## s ## f(i)))
371 #define SF_NUM(s,f,n)           (((n)&SF_MASK(NV ## s ## f))<<SF_SHIFT(NV ## s ## f))
372 #define SF_IDX_NUM(s,f,n,i)     (((n)&SF_MASK(NV ## s ## f(i)))<<SF_SHIFT(NV ## s ## f(i)))
373 #define SF_VAL(s,f,v)           (((v)>>SF_SHIFT(NV ## s ## f))&SF_MASK(NV ## s ## f))
374 #define SF_WIDTH(sf)            ((1?sf) - (0?sf) + 1)
375 // This macro parses multi-word/array defines
376 #define SF_ARR32_VAL(s,f,arr) \
377     (((arr)[SF_INDEX(NV ## s ## f)] >> SF_SHIFT(NV ## s ## f)) & SF_MASK(NV ## s ## f))
378 #define FLD_SF_DEF(s,f,d,l)     ((l)&~(SF_MASK(NV##s##f) << SF_SHIFT(NV##s##f)))| SF_DEF(s,f,d)
379 #define FLD_SF_NUM(s,f,n,l)     ((l)&~(SF_MASK(NV##s##f) << SF_SHIFT(NV##s##f)))| SF_NUM(s,f,n)
380 #define FLD_SF_IDX_DEF(s,f,c,i,l) (((l) & ~SF_SHIFTMASK(NV ## s ## f(i))) | SF_IDX_DEF(s,f,c,i))
381 #define FLD_SF_IDX_NUM(s,f,n,i,l) (((l) & ~SF_SHIFTMASK(NV ## s ## f(i))) | SF_IDX_NUM(s,f,n,i))
382 
383 
384 // Private field names are wrapped in PRIVATE_FIELD, which does nothing for
385 // the matching C source file, but causes diagnostics to be issued if another
386 // source file references the field.
387 #ifdef NVOC_GPU_ACCESS_H_PRIVATE_ACCESS_ALLOWED
388 #define PRIVATE_FIELD(x) x
389 #else
390 #define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
391 #endif
392 
393 struct IoAperture {
394     const struct NVOC_RTTI *__nvoc_rtti;
395     struct Object __nvoc_base_Object;
396     struct RegisterAperture __nvoc_base_RegisterAperture;
397     struct Object *__nvoc_pbase_Object;
398     struct RegisterAperture *__nvoc_pbase_RegisterAperture;
399     struct IoAperture *__nvoc_pbase_IoAperture;
400     NvU8 (*__ioaprtReadReg08__)(struct IoAperture *, NvU32);
401     NvU16 (*__ioaprtReadReg16__)(struct IoAperture *, NvU32);
402     NvU32 (*__ioaprtReadReg32__)(struct IoAperture *, NvU32);
403     void (*__ioaprtWriteReg08__)(struct IoAperture *, NvU32, NvV8);
404     void (*__ioaprtWriteReg16__)(struct IoAperture *, NvU32, NvV16);
405     void (*__ioaprtWriteReg32__)(struct IoAperture *, NvU32, NvV32);
406     void (*__ioaprtWriteReg32Uc__)(struct IoAperture *, NvU32, NvV32);
407     NvBool (*__ioaprtIsRegValid__)(struct IoAperture *, NvU32);
408     OBJGPU *pGpu;
409     NvU32 deviceIndex;
410     NvU32 deviceInstance;
411     DEVICE_MAPPING *pMapping;
412     NvU32 mappingStartAddr;
413     NvU32 baseAddress;
414     NvU32 length;
415 };
416 
417 #ifndef __NVOC_CLASS_IoAperture_TYPEDEF__
418 #define __NVOC_CLASS_IoAperture_TYPEDEF__
419 typedef struct IoAperture IoAperture;
420 #endif /* __NVOC_CLASS_IoAperture_TYPEDEF__ */
421 
422 #ifndef __nvoc_class_id_IoAperture
423 #define __nvoc_class_id_IoAperture 0x40549c
424 #endif /* __nvoc_class_id_IoAperture */
425 
426 extern const struct NVOC_CLASS_DEF __nvoc_class_def_IoAperture;
427 
428 #define __staticCast_IoAperture(pThis) \
429     ((pThis)->__nvoc_pbase_IoAperture)
430 
431 #ifdef __nvoc_gpu_access_h_disabled
432 #define __dynamicCast_IoAperture(pThis) ((IoAperture*)NULL)
433 #else //__nvoc_gpu_access_h_disabled
434 #define __dynamicCast_IoAperture(pThis) \
435     ((IoAperture*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(IoAperture)))
436 #endif //__nvoc_gpu_access_h_disabled
437 
438 
439 NV_STATUS __nvoc_objCreateDynamic_IoAperture(IoAperture**, Dynamic*, NvU32, va_list);
440 
441 NV_STATUS __nvoc_objCreate_IoAperture(IoAperture**, Dynamic*, NvU32, struct IoAperture * arg_pParentAperture, OBJGPU * arg_pGpu, NvU32 arg_deviceIndex, NvU32 arg_deviceInstance, DEVICE_MAPPING * arg_pMapping, NvU32 arg_mappingStartAddr, NvU32 arg_offset, NvU32 arg_length);
442 #define __objCreate_IoAperture(ppNewObj, pParent, createFlags, arg_pParentAperture, arg_pGpu, arg_deviceIndex, arg_deviceInstance, arg_pMapping, arg_mappingStartAddr, arg_offset, arg_length) \
443     __nvoc_objCreate_IoAperture((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pParentAperture, arg_pGpu, arg_deviceIndex, arg_deviceInstance, arg_pMapping, arg_mappingStartAddr, arg_offset, arg_length)
444 
445 #define ioaprtReadReg08(pAperture, addr) ioaprtReadReg08_DISPATCH(pAperture, addr)
446 #define ioaprtReadReg16(pAperture, addr) ioaprtReadReg16_DISPATCH(pAperture, addr)
447 #define ioaprtReadReg32(pAperture, addr) ioaprtReadReg32_DISPATCH(pAperture, addr)
448 #define ioaprtWriteReg08(pAperture, addr, value) ioaprtWriteReg08_DISPATCH(pAperture, addr, value)
449 #define ioaprtWriteReg16(pAperture, addr, value) ioaprtWriteReg16_DISPATCH(pAperture, addr, value)
450 #define ioaprtWriteReg32(pAperture, addr, value) ioaprtWriteReg32_DISPATCH(pAperture, addr, value)
451 #define ioaprtWriteReg32Uc(pAperture, addr, value) ioaprtWriteReg32Uc_DISPATCH(pAperture, addr, value)
452 #define ioaprtIsRegValid(pAperture, addr) ioaprtIsRegValid_DISPATCH(pAperture, addr)
453 NvU8 ioaprtReadReg08_IMPL(struct IoAperture *pAperture, NvU32 addr);
454 
ioaprtReadReg08_DISPATCH(struct IoAperture * pAperture,NvU32 addr)455 static inline NvU8 ioaprtReadReg08_DISPATCH(struct IoAperture *pAperture, NvU32 addr) {
456     return pAperture->__ioaprtReadReg08__(pAperture, addr);
457 }
458 
459 NvU16 ioaprtReadReg16_IMPL(struct IoAperture *pAperture, NvU32 addr);
460 
ioaprtReadReg16_DISPATCH(struct IoAperture * pAperture,NvU32 addr)461 static inline NvU16 ioaprtReadReg16_DISPATCH(struct IoAperture *pAperture, NvU32 addr) {
462     return pAperture->__ioaprtReadReg16__(pAperture, addr);
463 }
464 
465 NvU32 ioaprtReadReg32_IMPL(struct IoAperture *pAperture, NvU32 addr);
466 
ioaprtReadReg32_DISPATCH(struct IoAperture * pAperture,NvU32 addr)467 static inline NvU32 ioaprtReadReg32_DISPATCH(struct IoAperture *pAperture, NvU32 addr) {
468     return pAperture->__ioaprtReadReg32__(pAperture, addr);
469 }
470 
471 void ioaprtWriteReg08_IMPL(struct IoAperture *pAperture, NvU32 addr, NvV8 value);
472 
ioaprtWriteReg08_DISPATCH(struct IoAperture * pAperture,NvU32 addr,NvV8 value)473 static inline void ioaprtWriteReg08_DISPATCH(struct IoAperture *pAperture, NvU32 addr, NvV8 value) {
474     pAperture->__ioaprtWriteReg08__(pAperture, addr, value);
475 }
476 
477 void ioaprtWriteReg16_IMPL(struct IoAperture *pAperture, NvU32 addr, NvV16 value);
478 
ioaprtWriteReg16_DISPATCH(struct IoAperture * pAperture,NvU32 addr,NvV16 value)479 static inline void ioaprtWriteReg16_DISPATCH(struct IoAperture *pAperture, NvU32 addr, NvV16 value) {
480     pAperture->__ioaprtWriteReg16__(pAperture, addr, value);
481 }
482 
483 void ioaprtWriteReg32_IMPL(struct IoAperture *pAperture, NvU32 addr, NvV32 value);
484 
ioaprtWriteReg32_DISPATCH(struct IoAperture * pAperture,NvU32 addr,NvV32 value)485 static inline void ioaprtWriteReg32_DISPATCH(struct IoAperture *pAperture, NvU32 addr, NvV32 value) {
486     pAperture->__ioaprtWriteReg32__(pAperture, addr, value);
487 }
488 
489 void ioaprtWriteReg32Uc_IMPL(struct IoAperture *pAperture, NvU32 addr, NvV32 value);
490 
ioaprtWriteReg32Uc_DISPATCH(struct IoAperture * pAperture,NvU32 addr,NvV32 value)491 static inline void ioaprtWriteReg32Uc_DISPATCH(struct IoAperture *pAperture, NvU32 addr, NvV32 value) {
492     pAperture->__ioaprtWriteReg32Uc__(pAperture, addr, value);
493 }
494 
495 NvBool ioaprtIsRegValid_IMPL(struct IoAperture *pAperture, NvU32 addr);
496 
ioaprtIsRegValid_DISPATCH(struct IoAperture * pAperture,NvU32 addr)497 static inline NvBool ioaprtIsRegValid_DISPATCH(struct IoAperture *pAperture, NvU32 addr) {
498     return pAperture->__ioaprtIsRegValid__(pAperture, addr);
499 }
500 
ioaprtGetRegAddr(struct IoAperture * pAperture,NvU32 addr)501 static inline NvU32 ioaprtGetRegAddr(struct IoAperture *pAperture, NvU32 addr) {
502     return pAperture->baseAddress + addr;
503 }
504 
ioaprtGetBaseAddr(struct IoAperture * pAperture)505 static inline NvU32 ioaprtGetBaseAddr(struct IoAperture *pAperture) {
506     return pAperture->baseAddress;
507 }
508 
ioaprtGetLength(struct IoAperture * pAperture)509 static inline NvU32 ioaprtGetLength(struct IoAperture *pAperture) {
510     return pAperture->length;
511 }
512 
513 NV_STATUS ioaprtConstruct_IMPL(struct IoAperture *arg_pAperture, struct IoAperture *arg_pParentAperture, OBJGPU *arg_pGpu, NvU32 arg_deviceIndex, NvU32 arg_deviceInstance, DEVICE_MAPPING *arg_pMapping, NvU32 arg_mappingStartAddr, NvU32 arg_offset, NvU32 arg_length);
514 
515 #define __nvoc_ioaprtConstruct(arg_pAperture, arg_pParentAperture, arg_pGpu, arg_deviceIndex, arg_deviceInstance, arg_pMapping, arg_mappingStartAddr, arg_offset, arg_length) ioaprtConstruct_IMPL(arg_pAperture, arg_pParentAperture, arg_pGpu, arg_deviceIndex, arg_deviceInstance, arg_pMapping, arg_mappingStartAddr, arg_offset, arg_length)
516 #undef PRIVATE_FIELD
517 
518 
519 // In-place construct wrapper
520 NV_STATUS ioaprtInit(struct IoAperture *pAperture, struct IoAperture *pParentAperture, NvU32 offset, NvU32 length);
521 
522 
523 // Private field names are wrapped in PRIVATE_FIELD, which does nothing for
524 // the matching C source file, but causes diagnostics to be issued if another
525 // source file references the field.
526 #ifdef NVOC_GPU_ACCESS_H_PRIVATE_ACCESS_ALLOWED
527 #define PRIVATE_FIELD(x) x
528 #else
529 #define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
530 #endif
531 
532 struct SwBcAperture {
533     const struct NVOC_RTTI *__nvoc_rtti;
534     struct Object __nvoc_base_Object;
535     struct RegisterAperture __nvoc_base_RegisterAperture;
536     struct Object *__nvoc_pbase_Object;
537     struct RegisterAperture *__nvoc_pbase_RegisterAperture;
538     struct SwBcAperture *__nvoc_pbase_SwBcAperture;
539     NvU8 (*__swbcaprtReadReg08__)(struct SwBcAperture *, NvU32);
540     NvU16 (*__swbcaprtReadReg16__)(struct SwBcAperture *, NvU32);
541     NvU32 (*__swbcaprtReadReg32__)(struct SwBcAperture *, NvU32);
542     void (*__swbcaprtWriteReg08__)(struct SwBcAperture *, NvU32, NvV8);
543     void (*__swbcaprtWriteReg16__)(struct SwBcAperture *, NvU32, NvV16);
544     void (*__swbcaprtWriteReg32__)(struct SwBcAperture *, NvU32, NvV32);
545     void (*__swbcaprtWriteReg32Uc__)(struct SwBcAperture *, NvU32, NvV32);
546     NvBool (*__swbcaprtIsRegValid__)(struct SwBcAperture *, NvU32);
547     struct IoAperture *pApertures;
548     NvU32 numApertures;
549 };
550 
551 #ifndef __NVOC_CLASS_SwBcAperture_TYPEDEF__
552 #define __NVOC_CLASS_SwBcAperture_TYPEDEF__
553 typedef struct SwBcAperture SwBcAperture;
554 #endif /* __NVOC_CLASS_SwBcAperture_TYPEDEF__ */
555 
556 #ifndef __nvoc_class_id_SwBcAperture
557 #define __nvoc_class_id_SwBcAperture 0x6d0f88
558 #endif /* __nvoc_class_id_SwBcAperture */
559 
560 extern const struct NVOC_CLASS_DEF __nvoc_class_def_SwBcAperture;
561 
562 #define __staticCast_SwBcAperture(pThis) \
563     ((pThis)->__nvoc_pbase_SwBcAperture)
564 
565 #ifdef __nvoc_gpu_access_h_disabled
566 #define __dynamicCast_SwBcAperture(pThis) ((SwBcAperture*)NULL)
567 #else //__nvoc_gpu_access_h_disabled
568 #define __dynamicCast_SwBcAperture(pThis) \
569     ((SwBcAperture*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(SwBcAperture)))
570 #endif //__nvoc_gpu_access_h_disabled
571 
572 
573 NV_STATUS __nvoc_objCreateDynamic_SwBcAperture(SwBcAperture**, Dynamic*, NvU32, va_list);
574 
575 NV_STATUS __nvoc_objCreate_SwBcAperture(SwBcAperture**, Dynamic*, NvU32, struct IoAperture * arg_pApertures, NvU32 arg_numApertures);
576 #define __objCreate_SwBcAperture(ppNewObj, pParent, createFlags, arg_pApertures, arg_numApertures) \
577     __nvoc_objCreate_SwBcAperture((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pApertures, arg_numApertures)
578 
579 #define swbcaprtReadReg08(pAperture, addr) swbcaprtReadReg08_DISPATCH(pAperture, addr)
580 #define swbcaprtReadReg16(pAperture, addr) swbcaprtReadReg16_DISPATCH(pAperture, addr)
581 #define swbcaprtReadReg32(pAperture, addr) swbcaprtReadReg32_DISPATCH(pAperture, addr)
582 #define swbcaprtWriteReg08(pAperture, addr, value) swbcaprtWriteReg08_DISPATCH(pAperture, addr, value)
583 #define swbcaprtWriteReg16(pAperture, addr, value) swbcaprtWriteReg16_DISPATCH(pAperture, addr, value)
584 #define swbcaprtWriteReg32(pAperture, addr, value) swbcaprtWriteReg32_DISPATCH(pAperture, addr, value)
585 #define swbcaprtWriteReg32Uc(pAperture, addr, value) swbcaprtWriteReg32Uc_DISPATCH(pAperture, addr, value)
586 #define swbcaprtIsRegValid(pAperture, addr) swbcaprtIsRegValid_DISPATCH(pAperture, addr)
587 NvU8 swbcaprtReadReg08_IMPL(struct SwBcAperture *pAperture, NvU32 addr);
588 
swbcaprtReadReg08_DISPATCH(struct SwBcAperture * pAperture,NvU32 addr)589 static inline NvU8 swbcaprtReadReg08_DISPATCH(struct SwBcAperture *pAperture, NvU32 addr) {
590     return pAperture->__swbcaprtReadReg08__(pAperture, addr);
591 }
592 
593 NvU16 swbcaprtReadReg16_IMPL(struct SwBcAperture *pAperture, NvU32 addr);
594 
swbcaprtReadReg16_DISPATCH(struct SwBcAperture * pAperture,NvU32 addr)595 static inline NvU16 swbcaprtReadReg16_DISPATCH(struct SwBcAperture *pAperture, NvU32 addr) {
596     return pAperture->__swbcaprtReadReg16__(pAperture, addr);
597 }
598 
599 NvU32 swbcaprtReadReg32_IMPL(struct SwBcAperture *pAperture, NvU32 addr);
600 
swbcaprtReadReg32_DISPATCH(struct SwBcAperture * pAperture,NvU32 addr)601 static inline NvU32 swbcaprtReadReg32_DISPATCH(struct SwBcAperture *pAperture, NvU32 addr) {
602     return pAperture->__swbcaprtReadReg32__(pAperture, addr);
603 }
604 
605 void swbcaprtWriteReg08_IMPL(struct SwBcAperture *pAperture, NvU32 addr, NvV8 value);
606 
swbcaprtWriteReg08_DISPATCH(struct SwBcAperture * pAperture,NvU32 addr,NvV8 value)607 static inline void swbcaprtWriteReg08_DISPATCH(struct SwBcAperture *pAperture, NvU32 addr, NvV8 value) {
608     pAperture->__swbcaprtWriteReg08__(pAperture, addr, value);
609 }
610 
611 void swbcaprtWriteReg16_IMPL(struct SwBcAperture *pAperture, NvU32 addr, NvV16 value);
612 
swbcaprtWriteReg16_DISPATCH(struct SwBcAperture * pAperture,NvU32 addr,NvV16 value)613 static inline void swbcaprtWriteReg16_DISPATCH(struct SwBcAperture *pAperture, NvU32 addr, NvV16 value) {
614     pAperture->__swbcaprtWriteReg16__(pAperture, addr, value);
615 }
616 
617 void swbcaprtWriteReg32_IMPL(struct SwBcAperture *pAperture, NvU32 addr, NvV32 value);
618 
swbcaprtWriteReg32_DISPATCH(struct SwBcAperture * pAperture,NvU32 addr,NvV32 value)619 static inline void swbcaprtWriteReg32_DISPATCH(struct SwBcAperture *pAperture, NvU32 addr, NvV32 value) {
620     pAperture->__swbcaprtWriteReg32__(pAperture, addr, value);
621 }
622 
623 void swbcaprtWriteReg32Uc_IMPL(struct SwBcAperture *pAperture, NvU32 addr, NvV32 value);
624 
swbcaprtWriteReg32Uc_DISPATCH(struct SwBcAperture * pAperture,NvU32 addr,NvV32 value)625 static inline void swbcaprtWriteReg32Uc_DISPATCH(struct SwBcAperture *pAperture, NvU32 addr, NvV32 value) {
626     pAperture->__swbcaprtWriteReg32Uc__(pAperture, addr, value);
627 }
628 
629 NvBool swbcaprtIsRegValid_IMPL(struct SwBcAperture *pAperture, NvU32 addr);
630 
swbcaprtIsRegValid_DISPATCH(struct SwBcAperture * pAperture,NvU32 addr)631 static inline NvBool swbcaprtIsRegValid_DISPATCH(struct SwBcAperture *pAperture, NvU32 addr) {
632     return pAperture->__swbcaprtIsRegValid__(pAperture, addr);
633 }
634 
635 NV_STATUS swbcaprtConstruct_IMPL(struct SwBcAperture *arg_pAperture, struct IoAperture *arg_pApertures, NvU32 arg_numApertures);
636 
637 #define __nvoc_swbcaprtConstruct(arg_pAperture, arg_pApertures, arg_numApertures) swbcaprtConstruct_IMPL(arg_pAperture, arg_pApertures, arg_numApertures)
638 #undef PRIVATE_FIELD
639 
640 
641 #endif // _GPU_ACCESS_H_
642 
643 #ifdef __cplusplus
644 } // extern "C"
645 #endif
646 
647 #endif // _G_GPU_ACCESS_NVOC_H_
648