1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 1999-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  * SPDX-License-Identifier: MIT
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "os-interface.h"
25 #include "nv.h"
26 #include "nv-linux.h"
27 
28 static inline void nv_set_contig_memory_uc(nvidia_pte_t *page_ptr, NvU32 num_pages)
29 {
30 #if defined(NV_SET_MEMORY_UC_PRESENT)
31     struct page *page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr);
32     unsigned long addr = (unsigned long)page_address(page);
33     set_memory_uc(addr, num_pages);
34 #elif defined(NV_SET_PAGES_UC_PRESENT)
35     struct page *page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr);
36     set_pages_uc(page, num_pages);
37 #endif
38 }
39 
40 static inline void nv_set_contig_memory_wb(nvidia_pte_t *page_ptr, NvU32 num_pages)
41 {
42 #if defined(NV_SET_MEMORY_UC_PRESENT)
43     struct page *page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr);
44     unsigned long addr = (unsigned long)page_address(page);
45     set_memory_wb(addr, num_pages);
46 #elif defined(NV_SET_PAGES_UC_PRESENT)
47     struct page *page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr);
48     set_pages_wb(page, num_pages);
49 #endif
50 }
51 
52 static inline int nv_set_memory_array_type_present(NvU32 type)
53 {
54     switch (type)
55     {
56 #if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT)
57         case NV_MEMORY_UNCACHED:
58             return 1;
59         case NV_MEMORY_WRITEBACK:
60             return 1;
61 #endif
62         default:
63             return 0;
64     }
65 }
66 
67 static inline int nv_set_pages_array_type_present(NvU32 type)
68 {
69     switch (type)
70     {
71 #if defined(NV_SET_PAGES_ARRAY_UC_PRESENT)
72         case NV_MEMORY_UNCACHED:
73             return 1;
74         case NV_MEMORY_WRITEBACK:
75             return 1;
76 #endif
77         default:
78             return 0;
79     }
80 }
81 
82 static inline void nv_set_memory_array_type(
83     unsigned long *pages,
84     NvU32 num_pages,
85     NvU32 type
86 )
87 {
88     switch (type)
89     {
90 #if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT)
91         case NV_MEMORY_UNCACHED:
92             set_memory_array_uc(pages, num_pages);
93             break;
94         case NV_MEMORY_WRITEBACK:
95             set_memory_array_wb(pages, num_pages);
96             break;
97 #endif
98         default:
99             nv_printf(NV_DBG_ERRORS,
100                 "NVRM: %s(): type %d unimplemented\n",
101                 __FUNCTION__, type);
102             break;
103     }
104 }
105 
106 static inline void nv_set_pages_array_type(
107     struct page **pages,
108     NvU32 num_pages,
109     NvU32 type
110 )
111 {
112     switch (type)
113     {
114 #if defined(NV_SET_PAGES_ARRAY_UC_PRESENT)
115         case NV_MEMORY_UNCACHED:
116             set_pages_array_uc(pages, num_pages);
117             break;
118         case NV_MEMORY_WRITEBACK:
119             set_pages_array_wb(pages, num_pages);
120             break;
121 #endif
122         default:
123             nv_printf(NV_DBG_ERRORS,
124                 "NVRM: %s(): type %d unimplemented\n",
125                 __FUNCTION__, type);
126             break;
127     }
128 }
129 
130 static inline void nv_set_contig_memory_type(
131     nvidia_pte_t *page_ptr,
132     NvU32 num_pages,
133     NvU32 type
134 )
135 {
136     switch (type)
137     {
138         case NV_MEMORY_UNCACHED:
139             nv_set_contig_memory_uc(page_ptr, num_pages);
140             break;
141         case NV_MEMORY_WRITEBACK:
142             nv_set_contig_memory_wb(page_ptr, num_pages);
143             break;
144         default:
145             nv_printf(NV_DBG_ERRORS,
146                 "NVRM: %s(): type %d unimplemented\n",
147                 __FUNCTION__, type);
148     }
149 }
150 
151 static inline void nv_set_memory_type(nv_alloc_t *at, NvU32 type)
152 {
153     NvU32 i;
154     NV_STATUS status = NV_OK;
155 #if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT)
156     unsigned long *pages = NULL;
157 #elif defined(NV_SET_PAGES_ARRAY_UC_PRESENT)
158     struct page **pages = NULL;
159 #else
160     unsigned long *pages = NULL;
161 #endif
162 
163     nvidia_pte_t *page_ptr;
164     struct page *page;
165 
166     if (nv_set_memory_array_type_present(type))
167     {
168         status = os_alloc_mem((void **)&pages,
169                 at->num_pages * sizeof(unsigned long));
170 
171     }
172     else if (nv_set_pages_array_type_present(type))
173     {
174         status = os_alloc_mem((void **)&pages,
175                 at->num_pages * sizeof(struct page*));
176     }
177 
178     if (status != NV_OK)
179         pages = NULL;
180 
181     //
182     // If the set_{memory,page}_array_* functions are in the kernel interface,
183     // it's faster to use them since they work on non-contiguous memory,
184     // whereas the set_{memory,page}_*  functions do not.
185     //
186     if (pages)
187     {
188         for (i = 0; i < at->num_pages; i++)
189         {
190             page_ptr = at->page_table[i];
191             page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr);
192 #if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT)
193             pages[i] = (unsigned long)page_address(page);
194 #elif defined(NV_SET_PAGES_ARRAY_UC_PRESENT)
195             pages[i] = page;
196 #endif
197         }
198 #if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT)
199         nv_set_memory_array_type(pages, at->num_pages, type);
200 #elif defined(NV_SET_PAGES_ARRAY_UC_PRESENT)
201         nv_set_pages_array_type(pages, at->num_pages, type);
202 #endif
203         os_free_mem(pages);
204     }
205 
206     //
207     // If the set_{memory,page}_array_* functions aren't present in the kernel
208     // interface, each page has to be set individually, which has been measured
209     // to be ~10x slower than using the set_{memory,page}_array_* functions.
210     //
211     else
212     {
213         for (i = 0; i < at->num_pages; i++)
214             nv_set_contig_memory_type(at->page_table[i], 1, type);
215     }
216 }
217 
218 static NvU64 nv_get_max_sysmem_address(void)
219 {
220     NvU64 global_max_pfn = 0ULL;
221     int node_id;
222 
223     for_each_online_node(node_id)
224     {
225         global_max_pfn = max(global_max_pfn, (NvU64)node_end_pfn(node_id));
226     }
227 
228     return ((global_max_pfn + 1) << PAGE_SHIFT) - 1;
229 }
230 
231 static unsigned int nv_compute_gfp_mask(
232     nv_state_t *nv,
233     nv_alloc_t *at
234 )
235 {
236     unsigned int gfp_mask = NV_GFP_KERNEL;
237     struct device *dev = at->dev;
238 
239     /*
240      * If we know that SWIOTLB is enabled (and therefore we avoid calling the
241      * kernel to DMA-remap the pages), or if we are using dma_direct (which may
242      * transparently use the SWIOTLB for pages that are unaddressable by the
243      * device, in kernel versions 5.0 and later), limit our allocation pool
244      * to the first 4GB to avoid allocating pages outside of our device's
245      * addressable limit.
246      * Also, limit the allocation to the first 4GB if explicitly requested by
247      * setting the "nv->force_dma32_alloc" variable.
248      */
249     if (!nv || !nv_requires_dma_remap(nv) || nv_is_dma_direct(dev) || nv->force_dma32_alloc)
250     {
251         NvU64 max_sysmem_address = nv_get_max_sysmem_address();
252         if ((dev && dev->dma_mask && (*(dev->dma_mask) < max_sysmem_address)) ||
253             (nv && nv->force_dma32_alloc))
254         {
255             gfp_mask = NV_GFP_DMA32;
256         }
257     }
258 #if defined(__GFP_RETRY_MAYFAIL)
259     gfp_mask |= __GFP_RETRY_MAYFAIL;
260 #elif defined(__GFP_NORETRY)
261     gfp_mask |= __GFP_NORETRY;
262 #endif
263 #if defined(__GFP_ZERO)
264     if (at->flags.zeroed)
265         gfp_mask |= __GFP_ZERO;
266 #endif
267 #if defined(__GFP_THISNODE)
268     if (at->flags.node)
269         gfp_mask |= __GFP_THISNODE;
270 #endif
271     // Compound pages are required by vm_insert_page for high-order page
272     // allocations
273     if (at->order > 0)
274         gfp_mask |= __GFP_COMP;
275 
276     return gfp_mask;
277 }
278 
279 /*
280  * This function is needed for allocating contiguous physical memory in xen
281  * dom0. Because of the use of xen sw iotlb in xen dom0, memory allocated by
282  * NV_GET_FREE_PAGES may not be machine contiguous when size is more than
283  * 1 page. nv_alloc_coherent_pages() will give us machine contiguous memory.
284  * Even though we get dma_address directly in this function, we will
285  * still call pci_map_page() later to get dma address. This is fine as it
286  * will return the same machine address.
287  */
288 static NV_STATUS nv_alloc_coherent_pages(
289     nv_state_t *nv,
290     nv_alloc_t *at
291 )
292 {
293     nvidia_pte_t *page_ptr;
294     NvU32 i;
295     unsigned int gfp_mask;
296     unsigned long virt_addr = 0;
297     dma_addr_t bus_addr;
298     nv_linux_state_t *nvl;
299     struct device *dev;
300 
301     if (!nv)
302     {
303         nv_printf(NV_DBG_MEMINFO,
304             "NVRM: VM: %s: coherent page alloc on nvidiactl not supported\n", __FUNCTION__);
305         return NV_ERR_NOT_SUPPORTED;
306     }
307 
308     nvl = NV_GET_NVL_FROM_NV_STATE(nv);
309     dev = nvl->dev;
310 
311     gfp_mask = nv_compute_gfp_mask(nv, at);
312 
313     virt_addr = (unsigned long)dma_alloc_coherent(dev,
314                                                   at->num_pages * PAGE_SIZE,
315                                                   &bus_addr,
316                                                   gfp_mask);
317     if (!virt_addr)
318     {
319         nv_printf(NV_DBG_MEMINFO,
320             "NVRM: VM: %s: failed to allocate memory\n", __FUNCTION__);
321         return NV_ERR_NO_MEMORY;
322     }
323 
324     for (i = 0; i < at->num_pages; i++)
325     {
326         page_ptr = at->page_table[i];
327 
328         page_ptr->virt_addr = virt_addr + i * PAGE_SIZE;
329         page_ptr->phys_addr = virt_to_phys((void *)page_ptr->virt_addr);
330         page_ptr->dma_addr  = bus_addr + i * PAGE_SIZE;
331     }
332 
333     if (at->cache_type != NV_MEMORY_CACHED)
334     {
335         nv_set_contig_memory_type(at->page_table[0],
336                                   at->num_pages,
337                                   NV_MEMORY_UNCACHED);
338     }
339 
340     at->flags.coherent = NV_TRUE;
341     return NV_OK;
342 }
343 
344 static void nv_free_coherent_pages(
345     nv_alloc_t *at
346 )
347 {
348     nvidia_pte_t *page_ptr;
349     struct device *dev = at->dev;
350 
351     page_ptr = at->page_table[0];
352 
353     if (at->cache_type != NV_MEMORY_CACHED)
354     {
355         nv_set_contig_memory_type(at->page_table[0],
356                                   at->num_pages,
357                                   NV_MEMORY_WRITEBACK);
358     }
359 
360     dma_free_coherent(dev, at->num_pages * PAGE_SIZE,
361                       (void *)page_ptr->virt_addr, page_ptr->dma_addr);
362 }
363 
364 NV_STATUS nv_alloc_contig_pages(
365     nv_state_t *nv,
366     nv_alloc_t *at
367 )
368 {
369     NV_STATUS status;
370     nvidia_pte_t *page_ptr;
371     NvU32 i, j;
372     unsigned int gfp_mask;
373     unsigned long virt_addr = 0;
374     NvU64 phys_addr;
375     struct device *dev = at->dev;
376 
377     nv_printf(NV_DBG_MEMINFO,
378             "NVRM: VM: %s: %u pages\n", __FUNCTION__, at->num_pages);
379 
380     // TODO: This is a temporary WAR, and will be removed after fixing bug 200732409.
381     if (os_is_xen_dom0() || at->flags.unencrypted)
382         return nv_alloc_coherent_pages(nv, at);
383 
384     at->order = get_order(at->num_pages * PAGE_SIZE);
385     gfp_mask = nv_compute_gfp_mask(nv, at);
386 
387     if (at->flags.node)
388     {
389         NV_ALLOC_PAGES_NODE(virt_addr, at->node_id, at->order, gfp_mask);
390     }
391     else
392     {
393         NV_GET_FREE_PAGES(virt_addr, at->order, gfp_mask);
394     }
395     if (virt_addr == 0)
396     {
397         if (os_is_vgx_hyper())
398         {
399             nv_printf(NV_DBG_MEMINFO,
400                 "NVRM: VM: %s: failed to allocate memory, trying coherent memory \n", __FUNCTION__);
401 
402             status = nv_alloc_coherent_pages(nv, at);
403             return status;
404         }
405 
406         nv_printf(NV_DBG_MEMINFO,
407             "NVRM: VM: %s: failed to allocate memory\n", __FUNCTION__);
408         return NV_ERR_NO_MEMORY;
409     }
410 #if !defined(__GFP_ZERO)
411     if (at->flags.zeroed)
412         memset((void *)virt_addr, 0, (at->num_pages * PAGE_SIZE));
413 #endif
414 
415     for (i = 0; i < at->num_pages; i++, virt_addr += PAGE_SIZE)
416     {
417         phys_addr = nv_get_kern_phys_address(virt_addr);
418         if (phys_addr == 0)
419         {
420             nv_printf(NV_DBG_ERRORS,
421                 "NVRM: VM: %s: failed to look up physical address\n",
422                 __FUNCTION__);
423             status = NV_ERR_OPERATING_SYSTEM;
424             goto failed;
425         }
426 
427         page_ptr = at->page_table[i];
428         page_ptr->phys_addr = phys_addr;
429         page_ptr->page_count = NV_GET_PAGE_COUNT(page_ptr);
430         page_ptr->virt_addr = virt_addr;
431         page_ptr->dma_addr = nv_phys_to_dma(dev, page_ptr->phys_addr);
432 
433         NV_MAYBE_RESERVE_PAGE(page_ptr);
434     }
435 
436     if (at->cache_type != NV_MEMORY_CACHED)
437     {
438         nv_set_contig_memory_type(at->page_table[0],
439                                   at->num_pages,
440                                   NV_MEMORY_UNCACHED);
441     }
442 
443     at->flags.coherent = NV_FALSE;
444 
445     return NV_OK;
446 
447 failed:
448     if (i > 0)
449     {
450         for (j = 0; j < i; j++)
451             NV_MAYBE_UNRESERVE_PAGE(at->page_table[j]);
452     }
453 
454     page_ptr = at->page_table[0];
455     NV_FREE_PAGES(page_ptr->virt_addr, at->order);
456 
457     return status;
458 }
459 
460 void nv_free_contig_pages(
461     nv_alloc_t *at
462 )
463 {
464     nvidia_pte_t *page_ptr;
465     unsigned int i;
466 
467     nv_printf(NV_DBG_MEMINFO,
468             "NVRM: VM: %s: %u pages\n", __FUNCTION__, at->num_pages);
469 
470     if (at->flags.coherent)
471         return nv_free_coherent_pages(at);
472 
473     if (at->cache_type != NV_MEMORY_CACHED)
474     {
475         nv_set_contig_memory_type(at->page_table[0],
476                                   at->num_pages,
477                                   NV_MEMORY_WRITEBACK);
478     }
479 
480     for (i = 0; i < at->num_pages; i++)
481     {
482         page_ptr = at->page_table[i];
483 
484         if (NV_GET_PAGE_COUNT(page_ptr) != page_ptr->page_count)
485         {
486             static int count = 0;
487             if (count++ < NV_MAX_RECURRING_WARNING_MESSAGES)
488             {
489                 nv_printf(NV_DBG_ERRORS,
490                     "NVRM: VM: %s: page count != initial page count (%u,%u)\n",
491                     __FUNCTION__, NV_GET_PAGE_COUNT(page_ptr),
492                     page_ptr->page_count);
493             }
494         }
495         NV_MAYBE_UNRESERVE_PAGE(page_ptr);
496     }
497 
498     page_ptr = at->page_table[0];
499 
500     NV_FREE_PAGES(page_ptr->virt_addr, at->order);
501 }
502 
503 NV_STATUS nv_alloc_system_pages(
504     nv_state_t *nv,
505     nv_alloc_t *at
506 )
507 {
508     NV_STATUS status;
509     nvidia_pte_t *page_ptr;
510     NvU32 i, j;
511     unsigned int gfp_mask;
512     unsigned long virt_addr = 0;
513     NvU64 phys_addr;
514     struct device *dev = at->dev;
515     dma_addr_t bus_addr;
516 
517     // Order should be zero except for EGM allocations.
518     unsigned int alloc_page_size = PAGE_SIZE << at->order;
519     unsigned int alloc_page_shift = BIT_IDX_32(alloc_page_size);
520     unsigned int alloc_num_pages = NV_CEIL(at->num_pages * PAGE_SIZE, alloc_page_size);
521 
522     unsigned int sub_page_idx;
523     unsigned int sub_page_offset;
524     unsigned int os_pages_in_page = alloc_page_size / PAGE_SIZE;
525 
526     nv_printf(NV_DBG_MEMINFO,
527             "NVRM: VM: %u: %u order0 pages, %u order\n", __FUNCTION__, at->num_pages, at->order);
528 
529     gfp_mask = nv_compute_gfp_mask(nv, at);
530 
531     for (i = 0; i < alloc_num_pages; i++)
532     {
533         if (at->flags.unencrypted && (dev != NULL))
534         {
535             virt_addr = (unsigned long)dma_alloc_coherent(dev,
536                                                           alloc_page_size,
537                                                           &bus_addr,
538                                                           gfp_mask);
539             at->flags.coherent = NV_TRUE;
540         }
541         else if (at->flags.node)
542         {
543             NV_ALLOC_PAGES_NODE(virt_addr, at->node_id, at->order, gfp_mask);
544         }
545         else
546         {
547             NV_GET_FREE_PAGES(virt_addr, at->order, gfp_mask);
548         }
549 
550         if (virt_addr == 0)
551         {
552             nv_printf(NV_DBG_MEMINFO,
553                 "NVRM: VM: %s: failed to allocate memory\n", __FUNCTION__);
554             status = NV_ERR_NO_MEMORY;
555             goto failed;
556         }
557 #if !defined(__GFP_ZERO)
558         if (at->flags.zeroed)
559             memset((void *)virt_addr, 0, alloc_page_size);
560 #endif
561 
562         sub_page_offset = 0;
563         for (sub_page_idx = 0; sub_page_idx < os_pages_in_page; sub_page_idx++)
564         {
565             unsigned long sub_page_virt_addr = virt_addr + sub_page_offset;
566             phys_addr = nv_get_kern_phys_address(sub_page_virt_addr);
567             if (phys_addr == 0)
568             {
569                 nv_printf(NV_DBG_ERRORS,
570                     "NVRM: VM: %s: failed to look up physical address\n",
571                     __FUNCTION__);
572                 NV_FREE_PAGES(sub_page_virt_addr, at->order);
573                 status = NV_ERR_OPERATING_SYSTEM;
574                 goto failed;
575             }
576 
577 #if defined(_PAGE_NX)
578             if (((_PAGE_NX & pgprot_val(PAGE_KERNEL)) != 0) &&
579                     (phys_addr < 0x400000))
580             {
581                 nv_printf(NV_DBG_SETUP,
582                     "NVRM: VM: %s: discarding page @ 0x%llx\n",
583                     __FUNCTION__, phys_addr);
584                 --i;
585                 continue;
586             }
587 #endif
588 
589             page_ptr = at->page_table[(i * os_pages_in_page) + sub_page_idx];
590             page_ptr->phys_addr = phys_addr;
591             page_ptr->page_count = NV_GET_PAGE_COUNT(page_ptr);
592             page_ptr->virt_addr = sub_page_virt_addr;
593 
594             //
595             // Use unencrypted dma_addr returned by dma_alloc_coherent() as
596             // nv_phys_to_dma() returns encrypted dma_addr when AMD SEV is enabled.
597             //
598             if (at->flags.coherent)
599                 page_ptr->dma_addr = bus_addr;
600             else if (dev != NULL)
601                 page_ptr->dma_addr = nv_phys_to_dma(dev, page_ptr->phys_addr);
602             else
603                 page_ptr->dma_addr = page_ptr->phys_addr;
604 
605             NV_MAYBE_RESERVE_PAGE(page_ptr);
606             sub_page_offset += PAGE_SIZE;
607         }
608     }
609 
610     if (at->cache_type != NV_MEMORY_CACHED)
611         nv_set_memory_type(at, NV_MEMORY_UNCACHED);
612 
613     return NV_OK;
614 
615 failed:
616     if (i > 0)
617     {
618         for (j = 0; j < i; j++)
619         {
620             page_ptr = at->page_table[j * os_pages_in_page];
621             NV_MAYBE_UNRESERVE_PAGE(page_ptr);
622             if (at->flags.coherent)
623             {
624                 dma_free_coherent(dev, alloc_page_size, (void *)page_ptr->virt_addr,
625                                   page_ptr->dma_addr);
626             }
627             else
628             {
629                 NV_FREE_PAGES(page_ptr->virt_addr, at->order);
630             }
631         }
632     }
633 
634     return status;
635 }
636 
637 void nv_free_system_pages(
638     nv_alloc_t *at
639 )
640 {
641     nvidia_pte_t *page_ptr;
642     unsigned int i;
643     struct device *dev = at->dev;
644 
645     // Order should be zero except for EGM allocations.
646     unsigned int alloc_page_size = PAGE_SIZE << at->order;
647     unsigned int alloc_page_shift = BIT_IDX_32(alloc_page_size);
648     unsigned int alloc_num_pages = NV_CEIL(at->num_pages * PAGE_SIZE, alloc_page_size);
649     unsigned int os_pages_in_page = alloc_page_size / PAGE_SIZE;
650 
651     nv_printf(NV_DBG_MEMINFO,
652             "NVRM: VM: %s: %u pages\n", __FUNCTION__, at->num_pages);
653 
654     if (at->cache_type != NV_MEMORY_CACHED)
655         nv_set_memory_type(at, NV_MEMORY_WRITEBACK);
656 
657     for (i = 0; i < at->num_pages; i++)
658     {
659         page_ptr = at->page_table[i];
660 
661         if (NV_GET_PAGE_COUNT(page_ptr) != page_ptr->page_count)
662         {
663             static int count = 0;
664             if (count++ < NV_MAX_RECURRING_WARNING_MESSAGES)
665             {
666                 nv_printf(NV_DBG_ERRORS,
667                     "NVRM: VM: %s: page count != initial page count (%u,%u)\n",
668                     __FUNCTION__, NV_GET_PAGE_COUNT(page_ptr),
669                     page_ptr->page_count);
670             }
671         }
672 
673         NV_MAYBE_UNRESERVE_PAGE(page_ptr);
674     }
675 
676     for (i = 0; i < at->num_pages; i += os_pages_in_page)
677     {
678         page_ptr = at->page_table[i];
679 
680         if (at->flags.coherent)
681         {
682             dma_free_coherent(dev, alloc_page_size, (void *)page_ptr->virt_addr,
683                               page_ptr->dma_addr);
684         }
685         else
686         {
687             NV_FREE_PAGES(page_ptr->virt_addr, at->order);
688         }
689     }
690 }
691 
692 NvUPtr nv_vm_map_pages(
693     struct page **pages,
694     NvU32 count,
695     NvBool cached,
696     NvBool unencrypted
697 )
698 {
699     NvUPtr virt_addr = 0;
700 
701     if (!NV_MAY_SLEEP())
702     {
703         nv_printf(NV_DBG_ERRORS,
704                   "NVRM: %s: can't map %d pages, invalid context!\n",
705                   __FUNCTION__, count);
706         os_dbg_breakpoint();
707         return virt_addr;
708     }
709 
710     virt_addr = nv_vmap(pages, count, cached, unencrypted);
711     return virt_addr;
712 }
713 
714 void nv_vm_unmap_pages(
715     NvUPtr virt_addr,
716     NvU32 count
717 )
718 {
719     if (!NV_MAY_SLEEP())
720     {
721         nv_printf(NV_DBG_ERRORS,
722                   "NVRM: %s: can't unmap %d pages at 0x%0llx, "
723                   "invalid context!\n", __FUNCTION__, count, virt_addr);
724         os_dbg_breakpoint();
725         return;
726     }
727 
728     nv_vunmap(virt_addr, count);
729 }
730 
731