xref: /linux/drivers/xen/mem-reservation.c (revision d99bb72a)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /******************************************************************************
4  * Xen memory reservation utilities.
5  *
6  * Copyright (c) 2003, B Dragovic
7  * Copyright (c) 2003-2004, M Williamson, K Fraser
8  * Copyright (c) 2005 Dan M. Smith, IBM Corporation
9  * Copyright (c) 2010 Daniel Kiper
10  * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
11  */
12 
13 #include <asm/xen/hypercall.h>
14 
15 #include <xen/interface/memory.h>
16 #include <xen/mem-reservation.h>
17 #include <linux/moduleparam.h>
18 
19 bool __read_mostly xen_scrub_pages = IS_ENABLED(CONFIG_XEN_SCRUB_PAGES_DEFAULT);
20 core_param(xen_scrub_pages, xen_scrub_pages, bool, 0);
21 
22 /*
23  * Use one extent per PAGE_SIZE to avoid to break down the page into
24  * multiple frame.
25  */
26 #define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)
27 
28 #ifdef CONFIG_XEN_HAVE_PVMMU
__xenmem_reservation_va_mapping_update(unsigned long count,struct page ** pages,xen_pfn_t * frames)29 void __xenmem_reservation_va_mapping_update(unsigned long count,
30 					    struct page **pages,
31 					    xen_pfn_t *frames)
32 {
33 	int i;
34 
35 	for (i = 0; i < count; i++) {
36 		struct page *page = pages[i];
37 		unsigned long pfn = page_to_pfn(page);
38 		int ret;
39 
40 		BUG_ON(!page);
41 
42 		/*
43 		 * We don't support PV MMU when Linux and Xen is using
44 		 * different page granularity.
45 		 */
46 		BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
47 
48 		set_phys_to_machine(pfn, frames[i]);
49 
50 		ret = HYPERVISOR_update_va_mapping(
51 				(unsigned long)__va(pfn << PAGE_SHIFT),
52 				mfn_pte(frames[i], PAGE_KERNEL), 0);
53 		BUG_ON(ret);
54 	}
55 }
56 EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_update);
57 
__xenmem_reservation_va_mapping_reset(unsigned long count,struct page ** pages)58 void __xenmem_reservation_va_mapping_reset(unsigned long count,
59 					   struct page **pages)
60 {
61 	int i;
62 
63 	for (i = 0; i < count; i++) {
64 		struct page *page = pages[i];
65 		unsigned long pfn = page_to_pfn(page);
66 		int ret;
67 
68 		/*
69 		 * We don't support PV MMU when Linux and Xen are using
70 		 * different page granularity.
71 		 */
72 		BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
73 
74 		ret = HYPERVISOR_update_va_mapping(
75 				(unsigned long)__va(pfn << PAGE_SHIFT),
76 				__pte_ma(0), 0);
77 		BUG_ON(ret);
78 
79 		__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
80 	}
81 }
82 EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_reset);
83 #endif /* CONFIG_XEN_HAVE_PVMMU */
84 
85 /* @frames is an array of PFNs */
xenmem_reservation_increase(int count,xen_pfn_t * frames)86 int xenmem_reservation_increase(int count, xen_pfn_t *frames)
87 {
88 	struct xen_memory_reservation reservation = {
89 		.address_bits = 0,
90 		.extent_order = EXTENT_ORDER,
91 		.domid        = DOMID_SELF
92 	};
93 
94 	/* XENMEM_populate_physmap requires a PFN based on Xen granularity. */
95 	set_xen_guest_handle(reservation.extent_start, frames);
96 	reservation.nr_extents = count;
97 	return HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
98 }
99 EXPORT_SYMBOL_GPL(xenmem_reservation_increase);
100 
101 /* @frames is an array of GFNs */
xenmem_reservation_decrease(int count,xen_pfn_t * frames)102 int xenmem_reservation_decrease(int count, xen_pfn_t *frames)
103 {
104 	struct xen_memory_reservation reservation = {
105 		.address_bits = 0,
106 		.extent_order = EXTENT_ORDER,
107 		.domid        = DOMID_SELF
108 	};
109 
110 	/* XENMEM_decrease_reservation requires a GFN */
111 	set_xen_guest_handle(reservation.extent_start, frames);
112 	reservation.nr_extents = count;
113 	return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
114 }
115 EXPORT_SYMBOL_GPL(xenmem_reservation_decrease);
116