1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2014-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #ifndef _NV_MMU_WALK_H_
25 #define _NV_MMU_WALK_H_
26 
27 #ifdef __cplusplus
28 extern "C" {
29 #endif
30 
31 /*!
32  * @file mmu_walk.h
33  *
34  * @brief Defines high-level utilities to manage, update, and query general MMU page tables.
35  *
36  * The MMU walk library provides a set of routines to manage and modify
37  * the page directories and tables of an MMU page level hierarchy.
38  *
39  * An instance of an MMU level hierarchy may be associated with a
40  * virtual address space (VAS) at a higher level, but this association
41  * is not handled by the library.
42  *
43  * @par State:
44  * The library requires SW state for each hierarchy being managed.
45  * Library users refer to this state through an opaque
46  * @ref MMU_WALK pointer, initialized by @ref mmuWalkCreate
47  * and destroyed by @ref mmuWalkDestroy.
48  *
49  * @par Locking:
50  * The library requires exclusive access to its @ref MMU_WALK
51  * state during each operation.
52  * It does NOT support concurrent operations on the same
53  * @ref MMU_WALK state, but each state is independent.
54  * Callers must also ensure any resources accessed by its
55  * callbacks are synchronized appropriately.
56  *
57  * @par Synchrony:
58  * While concurrent updates on the same hiearachy are not supported,
59  * the walker does support asynchronous/buffered updates.
60  * If the user callbacks support this strategy, it is possible to
61  * pipeline update operations with VAS access.
62  *
63  * @par Flushing:
64  * All access to page level memory and HW is abstracted by
65  * user callbacks. Therefore the walker does not enforce
66  * or schedule flushes or invalidation of caches/TLBs
67  * required for each operation. It is up to user callbacks to
68  * handle these appropriately.
69  *
70  * The basic operations of the MMU walker are mapping and unmapping
71  * ranges of VA, provided by @ref mmuWalkMap and @ref mmuWalkUnmap.
72  *
73  * These two operations have few restrictions on the allowed state transitions.
74  * The goal is to provide flexible primitives that enable the user
75  * to enforce specific (safer) policies above the walker.
76  *
77  * For example, it is possible to unmap a range that only partially
78  * overlaps existing mapped ranges. Similarly, existing mappings may be
79  * clobbered by new ones or moved from one page size to another.
80  *
81  * However, the walker does ensure that state transitions are complete -
82  * stale mappings of different page size are cleared and
83  * intermediate states are minimized (though not fully atomic).
84  *
85  * The remaining APIs are for special features and/or tuning.
86  *
87  * Sparse textures (a.k.a. tiled resources) is supported through
88  * @ref mmuWalkSparsify and @ref mmuWalkUnsparsify.
89  *
90  * Pre-reservation (lock-down) of page level memory for external use
91  * and/or to force non-lazy page level allocation is supported through
92  * @ref mmuWalkReserveEntries and @ref mmuWalkReleaseEntries.
93  *
94  * External migration of page level memory is supported through
95  * @ref mmuWalkMigrateLevelInstance. This is needed only to migrate
96  * page level instances. The target physical memory mapped by the levels
97  * can be migrated with @ref mmuWalkMap (user must handle the copy part).
98  */
99 
100 /* ------------------------ Includes --------------------------------------- */
101 #include "nvtypes.h"
102 #include "nvstatus.h"
103 #include "mmu_fmt.h"
104 
105 /* ------------------------ Version --------------------------------------- */
106 /*!
107  * MMU Walk Api version number.
108  * version 2 added bIgnoreChannelBusy parameter in MmuWalkCBUpdatePdb
109  * and mmuWalkMigrateLevelInstance.
110  * @note - Whenever any of this API changes increment this version number. This
111  * is required to maintain compatibility with external clients.
112  */
113 #define MMU_WALK_API_VERSION 2
114 
115 /* --------------------------- Datatypes ------------------------------------ */
116 
117 /*!
118  * Opaque library-defined state for a single page level hierarchy backing a VAS.
119  */
120 typedef struct MMU_WALK             MMU_WALK;
121 
122 /*!
123  * Opaque user-defined state describing a block of physical memory.
124  * The library references these as the backing memory for page level instances.
125  */
126 typedef struct MMU_WALK_MEMDESC     MMU_WALK_MEMDESC;
127 
128 /*!
129  * Opaque user-defined state passed to all user callbacks.
130  */
131 typedef struct MMU_WALK_USER_CTX    MMU_WALK_USER_CTX;
132 
133 /*!
134  * State that a range of MMU page level entries can be filled to.
135  * @see MmuWalkCBFillEntries.
136  */
137 typedef enum
138 {
139     /*!
140      * The range is not valid and will generate a fault on access.
141      */
142     MMU_WALK_FILL_INVALID,
143 
144     /*!
145      * Also known as the "zero" state.
146      * Writes are dropped and reads return zero when the range is accessed.
147      *
148      * @note Not all MMUs support this state/feature.
149      */
150     MMU_WALK_FILL_SPARSE,
151 
152     /**
153      * No valid aligned 4K PTE state for a 64K PTE
154      * 64K big PTE state indicating that there is no valid 4K aligned PTEs
155      *
156      * @note This is not supported pre Volta.
157      */
158     MMU_WALK_FILL_NV4K,
159 } MMU_WALK_FILL_STATE;
160 
161 /*!
162  * See @ref mmuWalkMap.
163  */
164 typedef struct MMU_MAP_TARGET       MMU_MAP_TARGET;
165 
166 /*!
167  * See @ref mmuWalkMap.
168  */
169 typedef struct MMU_MAP_ITERATOR     MMU_MAP_ITERATOR;
170 
171 /*!
172  * User callback to allocate backing physical memory for a page level instance.
173  *
174  * The contents of the memory need not be initialized.
175  * The walker will initialize entries before use.
176  *
177  * The walker calls this lazily when a page level instance is
178  * required for the operation taking place.
179  * It is up to user implementation whether to allocate new memory,
180  * pre-allocate, pool, etc.
181  *
182  * It is also up to user to determine the best physical aperture and
183  * attributes for the memory (e.g. for GPU whether to place in vidmem/sysmem).
184  * The walker only modifies the memory contents through the remaining
185  * callbacks below so access is entirely opaque.
186  *
187  * This interface has several parameters that are required for
188  * specialized tuning of particular MMU formats, but for a simple
189  * user implementation most can be ignored.
190  *
191  * @param[in]  vaBase       First absolute VA covered by this level instance.
192  *                            This (+pLevelFmt) uniquely identifies the instance.
193  * @param[in]  vaLimit      Last absolute VA covered by this level instance
194  *                            required for the current operation.
195  *                            This may be used to implement dynamic growth
196  *                            for levels that support it (e.g. root page dir).
197  * @param[in]  bTarget      Indicates if this level instance is the target
198  *                            for the current operation.
199  *                            If bTarget is false it is usually not required
200  *                            to allocate memory, but it is required to maintain
201  *                            parallel partial size sub-levels.
202  * @param[in,out] ppMemDesc On input, the existing memory descriptor for this instance
203  *                            (may be NULL).
204  *                            This must NOT be modified or freed during this
205  *                            callback. The walker will call @ref MmuWalkCBLevelFree
206  *                            when this memory is no longer required.
207  *                          On output, new memory descriptor for this instance.
208  *                            Leaving the old memory descriptor is allowed if it
209  *                            already provides sufficient backing memory for the given VA range.
210  *                            If bTarget is true, this MUST be non-NULL on success.
211  * @param[in,out] pMemSize  Old/new memory size in bytes.
212  *                            Can be used for dynamic root page directory growth
213  *                            or partial-size page tables.
214  * @param[in,out] pBChanged Indicates if the backing memory behind *ppMemDesc has
215  *                            changed (initially NV_FALSE).
216  *                            This must be set if either the *ppMemDesc pointer or
217  *                            *pMemSize change, but also allows for changes in
218  *                            physical aperture or location to be updated properly.
219  *
220  * @returns On failure the current walk operation will be aborted
221  *          and the SW state rolled back.
222  * @returns The walker will only call this function
223  *          prior to page level and HW state modifications.
224  */
225 typedef NV_STATUS
226 MmuWalkCBLevelAlloc
227 (
228     MMU_WALK_USER_CTX       *pUserCtx,
229     const MMU_FMT_LEVEL     *pLevelFmt,
230     const NvU64              vaBase,
231     const NvU64              vaLimit,
232     const NvBool             bTarget,
233     MMU_WALK_MEMDESC       **ppMemDesc,
234     NvU32                   *pMemSize,
235     NvBool                  *pBChanged
236 );
237 
238 /*!
239  * User callback to free backing physical memory of an unused page level instance.
240  *
241  * The contents of the memory when freed are undefined.
242  * It is up to the user to zero the memory if required.
243  *
244  * The walker calls this aggressively when a page level instance is no longer
245  * required (on a commit or discard operation).
246  * It is up to user implementation whether to free immediately,
247  * cache for later re-use, etc.
248  *
249  * @param[in]  vaBase    First absolute VA covered by this level instance.
250  *                         This (+pLevelFmt) uniquely identifies the instance.
251  * @param[in]  pOldMem   Memory descriptor to free.
252  */
253 typedef void
254 MmuWalkCBLevelFree
255 (
256     MMU_WALK_USER_CTX   *pUserCtx,
257     const MMU_FMT_LEVEL *pLevelFmt,
258     const NvU64          vaBase,
259     MMU_WALK_MEMDESC    *pOldMem
260 );
261 
262 /*!
263  * User callback to initialize the HW root page directory pointer (PDB).
264  * In this context "PDB" stands for "page directory base (address)."
265  *
266  * Example: For GPU MMU this should update the instance blocks
267  *          associated with the VAS.
268  *
269  * @returns NV_TRUE if the operation completed.
270  * @returns NV_FALSE if the operation must be retried later. See @ref mmuWalkContinue.
271  */
272 typedef NvBool
273 MmuWalkCBUpdatePdb
274 (
275     MMU_WALK_USER_CTX       *pUserCtx,
276     const MMU_FMT_LEVEL     *pRootFmt,
277     const MMU_WALK_MEMDESC  *pRootMem,
278     const NvBool             bIgnoreChannelBusy
279 );
280 
281 /*!
282  * User callback to initialize a page directory entry to point to one or more
283  * sub-levels.
284  *
285  * @param[in]  pLevelFmt  Format of the parent level.
286  * @param[in]  pLevelMem  Memory descriptor of the parent level.
287  * @param[in]  entryIndex Index of the PDE being initialized.
288  * @param[in]  pSubLevels Array of sub-level memory descriptors of length
289  *                        pLevelFmt->numSubLevels.
290  *
291  * @returns NV_TRUE if the operation completed.
292  * @returns NV_FALSE if the operation must be retried later. See @ref mmuWalkContinue.
293  */
294 typedef NvBool
295 MmuWalkCBUpdatePde
296 (
297     MMU_WALK_USER_CTX       *pUserCtx,
298     const MMU_FMT_LEVEL     *pLevelFmt,
299     const MMU_WALK_MEMDESC  *pLevelMem,
300     const NvU32              entryIndex,
301     const MMU_WALK_MEMDESC **pSubLevels
302 );
303 
304 /*!
305  * User callback to fill a range of entries with a constant state.
306  *
307  * @param[in]     pLevelFmt    Format of the level.
308  * @param[in]     pLevelMem    Memory descriptor of the level.
309  * @param[in]     entryIndexLo First entry index to initialize.
310  * @param[in]     entryIndexHi Last entry index to initialize.
311  * @param[in]     fillState    Constant state to initialize to.
312  * @param[in,out] pProgress    Number of entries filled successfully (0 on input).
313  *
314  * @returns If (*pProgress == entryIndexHi - entryIndexLo + 1) then
315  *          the operation completed successfully.
316  * @returns Otherwise the operation must be retried later for
317  *          the remaining entries. See @ref mmuWalkContinue.
318  */
319 typedef void
320 MmuWalkCBFillEntries
321 (
322     MMU_WALK_USER_CTX         *pUserCtx,
323     const MMU_FMT_LEVEL       *pLevelFmt,
324     const MMU_WALK_MEMDESC    *pLevelMem,
325     const NvU32                entryIndexLo,
326     const NvU32                entryIndexHi,
327     const MMU_WALK_FILL_STATE  fillState,
328     NvU32                     *pProgress
329 );
330 
331 /*!
332  * User callback to copy a range of entries between backing page level memory.
333  *
334  * @note This interface is only required if dynamic page level growth is
335  *       supported (e.g. for the root page directory or partial page tables)
336  *       or for page level migration (@ref mmuWalkMigrateLevelInstance).
337  *
338  * @param[in]     pLevelFmt    Format of the level.
339  * @param[in]     pMemSrc      Source memory descriptor of the level.
340  * @param[in]     pMemDst      Destination memory descriptor of the level.
341  * @param[in]     entryIndexLo First entry index to copy.
342  * @param[in]     entryIndexHi Last entry index to copy.
343  * @param[in,out] pProgress    Number of entries copied successfully (0 on input).
344  *
345  * @returns If (*pProgress == entryIndexHi - entryIndexLo + 1) then
346  *          the operation completed successfully.
347  * @returns Otherwise the operation must be retried later for
348  *          the remaining entries. See @ref mmuWalkContinue.
349  */
350 typedef void
351 MmuWalkCBCopyEntries
352 (
353     MMU_WALK_USER_CTX         *pUserCtx,
354     const MMU_FMT_LEVEL       *pLevelFmt,
355     const MMU_WALK_MEMDESC    *pMemSrc,
356     const MMU_WALK_MEMDESC    *pMemDst,
357     const NvU32                entryIndexLo,
358     const NvU32                entryIndexHi,
359     NvU32                     *pProgress
360 );
361 
362 /*!
363  * User callback to copy staging buffer to its final destination.
364  *
365  * If NULL is passed as the staging buffer on walker creation,
366  * this callback is ignored. Otherwise, this callback should
367  * perform a memcopy from the table located at the staging buffer
368  * memdesc to its final location in the buffer allocated for the
369  * actual table (in FB or otherwise).
370  *
371  * @param[in] pStagingBuffer Staging buffer PD/PTs are written to
372  * @param[in] pLevelBuffer   Memdesc containing final location for
373  *                           PD/PT
374  * @param[in] entryIndexLo   Start index of entries to be copied.
375  * @param[in] entryIndexHi   End index (inclusive) of entries to be
376  *                           copied.
377  * @param[in] tableSize      Size of the current level of PD/PT, in
378  *                           entries. The offsets into the staging
379  *                           buffer are the entry indices taken
380  *                           modulo tableSize.
381  * @param[in] entrySize      Size of each entry, in bytes
382  */
383 typedef void
384 MmuWalkCBWriteBuffer
385 (
386     MMU_WALK_USER_CTX    *pUserCtx,
387     MMU_WALK_MEMDESC     *pStagingBuffer,
388     MMU_WALK_MEMDESC     *pLevelBuffer,
389     NvU64                 entryIndexLo,
390     NvU64                 entryIndexHi,
391     NvU64                 tableSize,
392     NvU64                 entrySize
393 );
394 
395 /*!
396  * Bundles user-implemented callback pointers.
397  */
398 typedef struct
399 {
400     MmuWalkCBLevelAlloc  *LevelAlloc;
401     MmuWalkCBLevelFree   *LevelFree;
402     MmuWalkCBUpdatePdb   *UpdatePdb;
403     MmuWalkCBUpdatePde   *UpdatePde;
404     MmuWalkCBFillEntries *FillEntries;
405     MmuWalkCBCopyEntries *CopyEntries;
406     MmuWalkCBWriteBuffer *WriteBuffer;
407 } MMU_WALK_CALLBACKS;
408 
409 
410 
411 /*!
412  * Flags that affect walk library behavior.
413  */
414 typedef struct
415 {
416     /*!
417      * Indicates if the user implementation supports asynchronous/buffered
418      * updates, such that all callbacks that modify page level and/or HW state
419      * are buffered (e.g. to be committed by a later DMA/copy).
420      *
421      * The primary advantage of asynchronous mode is the potential to pipeline
422      * updates with other work.
423      *
424      * The main drawback of asynchronous mode is the amount of space required for
425      * the buffers is generally not known ahead of time (bounded, but potentially large).
426      * The walker library supports splitting a given operation into multiple
427      * pieces, each piece continuing where it left off until an operation is complete.
428      * This way the user can use a fixed or limited size buffer.
429      *
430      * Synchronous update mode (default) *requires* the callbacks to modify page level
431      * and HW state immediately. This is usually simpler to implement but
432      * less efficient.
433      */
434     NvBool bAsynchronous : 1;
435     /**
436      * @brief      Indicates if ATS is enabled.
437      * @details    Should be setup as:
438      *             gvaspaceIsAtsEnabled(pWalk->pUserCtx->pGVAS)
439      *             Currently, from 8/2016, it is used to enable NV4K (no valid
440      *             4K PTE) in MMU walker
441      */
442     NvBool bAtsEnabled : 1;
443     /**
444      * @brief      Indicates if the iterative implementation should be used
445      * @details    In certain situations like running the MMU Tracer or running
446      *             on platforms like PPC, the recursive implementation of the
447      *             MMU Walker consumes too much stack space. Enabling this option
448      *             changes the MMU Walker to use iteration instead of recursion to
449      *             reduce stack usage.
450      */
451     NvBool bUseIterative : 1;
452 } MMU_WALK_FLAGS;
453 
454 /*!
455  * User callback to map a batch of entries during an @ref mmuWalkMap operation.
456  *
457  * A "batch" is a contiguous range of entries within a single page level instance.
458  * It is the responsibility of the callback to track the current
459  * page index into the target physical memory (if applicable).
460  *
461  * @note The reason this interface enforces batching is to amortize the cost
462  *       of the function pointer (callback) flexibility.
463  *       Some architectures (e.g. ARM) have performance issues with indirect
464  *       function calls, and PTE init loop should be the critical path.
465  *
466  * @param[in]     entryIndexLo First entry index to map.
467  * @param[in]     entryIndexHi Last entry index to map.
468  * @param[in,out] pProgress    Number of entries mapped successfully (0 on input).
469  *
470  * @returns If (*pProgress == entryIndexHi - entryIndexLo + 1) then
471  *          the operation completed successfully.
472  * @returns Otherwise the operation must be retried later for
473  *          the remaining entries. See @ref mmuWalkContinue.
474  */
475 typedef void
476 MmuWalkCBMapNextEntries
477 (
478     MMU_WALK_USER_CTX        *pUserCtx,
479     const MMU_MAP_TARGET     *pTarget,
480     const MMU_WALK_MEMDESC   *pLevelMem,
481     const NvU32               entryIndexLo,
482     const NvU32               entryIndexHi,
483     NvU32                    *pProgress
484 );
485 
486 /*!
487  * Describes the physical memory (target) of an @ref mmuWalkMap operation.
488  */
489 struct MMU_MAP_TARGET
490 {
491     /*!
492      * Target level format.
493      */
494     const MMU_FMT_LEVEL     *pLevelFmt;
495 
496     /*!
497      * User-defined iterator for the physical pages being mapped.
498      * This may be context sensitive - e.g. it can contain a counter to track
499      * the current page index. The walker will always call this for consecutive
500      * increasing page indices across a single map operation.
501      *
502      * @note The lifetime of this pointer extends until the operation
503      *       completes. Take care that it is not stack allocated if
504      *       using @ref mmuWalkContinue from a different call stack later.
505      */
506     MMU_MAP_ITERATOR        *pIter;
507 
508     /*!
509      * Callback to map the batch of entries.
510      */
511     MmuWalkCBMapNextEntries *MapNextEntries;
512 
513     /*!
514      * Page array granularity of the physical target memory
515      */
516     NvU32 pageArrayGranularity;
517 };
518 
519 /*----------------------------Public Interface--------------------------------*/
520 
521 /*!
522  * Create an initial walker library SW state.
523  *
524  * @param[in]  pRootFmt       MMU format of the root page level.
525  * @param[in]  pUserCtx       User-defined context passed to callbacks.
526  * @param[in]  pCb            User-implemented callback bundle.
527  * @param[in]  flags          Flags applying to this walker instance.
528  * @param[out] ppWalk         Returned walker state.
529  * @param[in]  pStagingBuffer Optional memdesc to stage PD/PT writes to.
530  */
531 NV_STATUS
532 mmuWalkCreate
533 (
534     const MMU_FMT_LEVEL      *pRootFmt,
535     MMU_WALK_USER_CTX        *pUserCtx,
536     const MMU_WALK_CALLBACKS *pCb,
537     const MMU_WALK_FLAGS      flags,
538     MMU_WALK                **ppWalk,
539     MMU_WALK_MEMDESC         *pStagingBuffer
540 );
541 
542 /*!
543  * Destroy a walker library SW state.
544  *
545  * This will free all remaining memory referenced by the walker, but it
546  * is recommended to enforce symmetric operations at a higher level
547  * to catch/report memory leaks.
548  */
549 void
550 mmuWalkDestroy
551 (
552     MMU_WALK *pWalk
553 );
554 
555 /*!
556  * Map a range of VA to physical memory at an arbitrary page level.
557  *
558  * The VA range must be aligned to the MMU's smallest page size and
559  * to the largest page size of any previous mapping that overlaps.
560  * The VA range cannot cross a sparse boundary.
561  *
562  * @returns See @ref mmuWalkContinue.
563  */
564 NV_STATUS
565 mmuWalkMap
566 (
567     MMU_WALK             *pWalk,
568     const NvU64           vaLo,
569     const NvU64           vaHi,
570     const MMU_MAP_TARGET *pTarget
571 );
572 
573 /*!
574  * Return a range of VA to its unmapped state (invalid or sparse).
575  *
576  * The VA range must be aligned to the MMU's smallest page size and
577  * to the largest page size of any previous mappings that overlap.
578  * The VA range cannot cross a sparse boundary.
579  *
580  * @returns See @ref mmuWalkContinue.
581  */
582 NV_STATUS
583 mmuWalkUnmap
584 (
585     MMU_WALK             *pWalk,
586     const NvU64           vaLo,
587     const NvU64           vaHi
588 );
589 
590 /*!
591  * Set the unmapped state of a VA range to the sparse (zero) state.
592  *
593  * The VA range must be aligned to the MMU's smallest page size.
594  * The previous state of the entire range must be unmapped and non-sparse.
595  * The last parameter indicates whether the staging buffer and the WriteBuffer
596  * callback should be used.
597  *
598  * @returns See @ref mmuWalkContinue.
599  */
600 NV_STATUS
601 mmuWalkSparsify
602 (
603     MMU_WALK             *pWalk,
604     const NvU64           vaLo,
605     const NvU64           vaHi,
606     const NvBool          bUseStagingBuffer
607 );
608 
609 /*!
610  * Return a range of VA to the invalid unmapped state.
611  *
612  * The VA range must exactly match a previously sparsified range.
613  * Any mappings remaining within the range are cleared to invalid.
614  *
615  * @returns See @ref mmuWalkContinue.
616  */
617 NV_STATUS
618 mmuWalkUnsparsify
619 (
620     MMU_WALK             *pWalk,
621     const NvU64           vaLo,
622     const NvU64           vaHi
623 );
624 
625 /*!
626  * Reserve (lock-down) page level entries for a VA range.
627  *
628  * The VA range must be aligned to the target page size.
629  * The range may not overlap with an existing reserved range for the
630  * target page level, but reservation state between levels is independent.
631  *
632  * @note This does not change the effective state of the VA range.
633  *       It only changes the state of the backing page level memory.
634  *
635  * @returns See @ref mmuWalkContinue.
636  */
637 NV_STATUS
638 mmuWalkReserveEntries
639 (
640     MMU_WALK             *pWalk,
641     const MMU_FMT_LEVEL  *pLevelFmt,
642     const NvU64           vaLo,
643     const NvU64           vaHi,
644     const NvBool          bInvalidate
645 );
646 
647 /*!
648  * Release page level entries previously reserved.
649  *
650  * The VA range must exactly match an existing reserved range for
651  * the target page level.
652  *
653  * @note This does not change the effective state of the VA range.
654  *       It only changes the state of the backing page level memory.
655  *
656  * @returns See @ref mmuWalkContinue.
657  */
658 NV_STATUS
659 mmuWalkReleaseEntries
660 (
661     MMU_WALK             *pWalk,
662     const MMU_FMT_LEVEL  *pLevelFmt,
663     const NvU64           vaLo,
664     const NvU64           vaHi
665 );
666 
667 /*!
668  * Commit the page directory entries for a VA range.
669  *
670  * Traverse the walker and rewrite the PDEs from the SW state.
671  * This won't trigger any new PDE allocations or state change.
672  *
673  * This call won't affect the PTEs. If needed, support can be added later.
674  *
675  * The VA range must be aligned to the MMU's smallest page size and
676  * to the largest page size of any previous mappings that overlap.
677  * The VA range cannot cross a sparse boundary.
678  *
679  * @returns See @ref mmuWalkContinue.
680  */
681 NV_STATUS
682 mmuWalkCommitPDEs
683 (
684     MMU_WALK             *pWalk,
685     const MMU_FMT_LEVEL  *pLevelTarget,
686     const NvU64           vaLo,
687     const NvU64           vaHi
688 );
689 
690 /*!
691  * Switches a page level instance from one memory location to another.
692  *
693  * This function commits the PDB to the HW if the level instance being
694  * migrated happens to be the PDB.
695  *
696  * @note This differs from @ref mmuWalkMigrateLevelInstance in that it
697  * copies/does not copy and updates/does not update parent PDE as
698  * specified by the caller.
699  *
700  * @returns
701  */
702 NV_STATUS
703 mmuWalkModifyLevelInstance
704 (
705     MMU_WALK             *pWalk,
706     const MMU_FMT_LEVEL  *pLevelFmt,
707     const NvU64           vaBase,
708     MMU_WALK_MEMDESC     *pNewMem,
709     const NvU32           memSize,
710     const NvBool          bCopyEntries,
711     const NvBool          bUpdatePde,
712     const NvBool          bIgnoreChannelBusy
713 );
714 
715 /*!
716  * Migrate a page level instance from one memory location to another.
717  *
718  * The VA must be aligned to the base of an instance that has been
719  * previously allocated by the walker through @ref MmuWalkCBLevelAlloc.
720  *
721  * @note This does not change the effective state of any VA range.
722  *       It only changes the state of the backing page level memory.
723  *       It is a wrapper around @ref mmuWalkModifyLevelInstance.
724  *
725  * @returns See @ref mmuWalkContinue.
726  */
727 NV_STATUS
728 mmuWalkMigrateLevelInstance
729 (
730     MMU_WALK             *pWalk,
731     const MMU_FMT_LEVEL  *pLevelFmt,
732     const NvU64           vaBase,
733     MMU_WALK_MEMDESC     *pNewMem,
734     const NvU32           memSize,
735     const NvBool          bIgnoreChannelBusy
736 );
737 
738 /*!
739  * Query a walker SW state for the page level instance memory descriptors
740  * backing a given virtual address and page size.
741  * The caller provides an array of memdesc pointers.
742  */
743 NV_STATUS
744 mmuWalkGetPageLevelInfo
745 (
746     const MMU_WALK          *pWalk,
747     const MMU_FMT_LEVEL     *pLevelFmt,
748     const NvU64              virtAddr,
749     const MMU_WALK_MEMDESC **ppMemDesc,
750     NvU32                   *pMemSize
751 );
752 
753 /*!
754  * Force frees all page level instances. We may have to force free page levels
755  * in case of surprise removal. In the surprise removal case, we may end up
756  * with many failed unmappings once the GPU is off the bus. This might leave
757  * some of the MMU_WALK_LEVEL and MMU_WALK_LEVEL_INST objects to be in an
758  * allocated state. This function just iterates over the level instances at
759  * each level and force frees everything ignoring any outstanding valid, sparse
760  * and reserved entries..
761  */
762 void
763 mmuWalkLevelInstancesForceFree
764 (
765     MMU_WALK *pWalk
766 );
767 
768 /*!
769  * Continue a walker operation that was previously started.
770  *
771  * If a state changing operation on the walker returns
772  * NV_WARN_MORE_PROCESSING_REQUIRED, the user must call this function
773  * to continue processing once resources (e.g. pushbuffer space)
774  * are again available.
775  *
776  * Any operation-specific context passed to the walker when the operation
777  * is started continues to be referenced until the operation completes.
778  *
779  * @returns NV_OK if the operation has finished. For asynchronous mode,
780  *          the user must call @ref mmuWalkCommit once the last update
781  *          buffer has committed.
782  *
783  * @returns NV_WARN_MORE_PROCESSING_REQUIRED if more processing is
784  *          required. As a pathological case the walker supports a
785  *          1-entry update buffer, but that is obviously not efficient.
786  *
787  * @returns Any other error codes indicate the walker is either
788  *          not in a state that can continue (user bug, ignored) or
789  *          there is an interal bug - either in walker or user
790  *          callbacks. The latter case is a fatal error - there is no
791  *          way for walker to recover from such situations as the
792  *          SW/HW state has potentially lost consistency.
793  *          This would require fully transactional updates
794  *          that would both increase intermediate memory requirements and
795  *          the probability of an internal bug :D.
796  *          The user must decide how to handle this case (either ignore
797  *          and hope for the best or reset/crash the context using
798  *          this state).
799  */
800 NV_STATUS
801 mmuWalkContinue
802 (
803     MMU_WALK *pWalk
804 );
805 
806 /*!
807  * Commit any pending SW state the walker is tracking and
808  * free references to unused page level instances.
809  *
810  * The user only needs to call this if supporting
811  * @ref MMU_WALK_FLAGS::bAsynchronous mode.
812  * Otherwise this will be called automatically once an operation completes.
813  *
814  * For buffered updates, the user must call this after the entire
815  * operation has completed - once @ref mmuWalkContinue returns NV_OK and the final
816  * update buffer has been committed to memory/HW (only then is it safe
817  * to free the unused level instances).
818  */
819 void
820 mmuWalkCommit
821 (
822     MMU_WALK *pWalk
823 );
824 
825 /*!
826  * Get the user context of a walker state.
827  */
828 MMU_WALK_USER_CTX *
829 mmuWalkGetUserCtx
830 (
831     const MMU_WALK *pWalk
832 );
833 
834 /*!
835  * Set the user context of a walker state.
836  */
837 void
838 mmuWalkSetUserCtx
839 (
840     MMU_WALK          *pWalk,
841     MMU_WALK_USER_CTX *pUserCtx
842 );
843 
844 /*!
845  * Get the user callbacks of a walker state.
846  */
847 const MMU_WALK_CALLBACKS *
848 mmuWalkGetCallbacks
849 (
850     const MMU_WALK *pWalk
851 );
852 
853 /*!
854  * Set the user callbacks of a walker state.
855  */
856 void
857 mmuWalkSetCallbacks
858 (
859     MMU_WALK                 *pWalk,
860     const MMU_WALK_CALLBACKS *pCb
861 );
862 
863 #ifdef __cplusplus
864 }
865 #endif
866 
867 #endif
868