1 /* $NetBSD: xenpmap.h,v 1.44 2020/05/26 10:10:32 bouyer Exp $ */
2
3 /*
4 *
5 * Copyright (c) 2004 Christian Limpach.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29
30 #ifndef _XEN_XENPMAP_H_
31 #define _XEN_XENPMAP_H_
32
33 #ifdef _KERNEL_OPT
34 #include "opt_xen.h"
35 #endif
36
37 #include <sys/types.h>
38 #include <sys/kcpuset.h>
39
40 #define INVALID_P2M_ENTRY (~0UL)
41
42 void xpq_queue_machphys_update(paddr_t, paddr_t);
43 void xpq_queue_invlpg(vaddr_t);
44 void xpq_queue_pte_update(paddr_t, pt_entry_t);
45 void xpq_queue_pt_switch(paddr_t);
46 void xpq_flush_queue(void);
47 void xpq_queue_set_ldt(vaddr_t, uint32_t);
48 void xpq_queue_tlb_flush(void);
49 void xpq_queue_pin_table(paddr_t, int);
50 void xpq_queue_unpin_table(paddr_t);
51 int xpq_update_foreign(paddr_t, pt_entry_t, int, u_int);
52 void xen_mcast_tlbflush(kcpuset_t *);
53 void xen_bcast_tlbflush(void);
54 void xen_mcast_invlpg(vaddr_t, kcpuset_t *);
55 void xen_bcast_invlpg(vaddr_t);
56 void xen_copy_page(paddr_t, paddr_t);
57 void xen_pagezero(paddr_t);
58
59 void pmap_xen_resume(void);
60 void pmap_xen_suspend(void);
61 void pmap_map_recursive_entries(void);
62 void pmap_unmap_recursive_entries(void);
63
64 void xen_kpm_sync(struct pmap *, int);
65
66 #define xpq_queue_pin_l1_table(pa) \
67 xpq_queue_pin_table(pa, MMUEXT_PIN_L1_TABLE)
68 #define xpq_queue_pin_l2_table(pa) \
69 xpq_queue_pin_table(pa, MMUEXT_PIN_L2_TABLE)
70 #define xpq_queue_pin_l3_table(pa) \
71 xpq_queue_pin_table(pa, MMUEXT_PIN_L3_TABLE)
72 #define xpq_queue_pin_l4_table(pa) \
73 xpq_queue_pin_table(pa, MMUEXT_PIN_L4_TABLE)
74
75 #ifdef XENPV
76 extern unsigned long *xpmap_phys_to_machine_mapping;
77
78 /* MD PMAP flags */
79 /* mmu_update with MMU_PT_UPDATE_NO_TRANSLATE */
80 #define PMAP_MD_XEN_NOTR 0x01000000
81
82 static __inline paddr_t
xpmap_mtop_masked(paddr_t mpa)83 xpmap_mtop_masked(paddr_t mpa)
84 {
85 return (
86 (paddr_t)machine_to_phys_mapping[mpa >> PAGE_SHIFT] << PAGE_SHIFT);
87 }
88
89 static __inline paddr_t
xpmap_mtop(paddr_t mpa)90 xpmap_mtop(paddr_t mpa)
91 {
92 return (xpmap_mtop_masked(mpa) | (mpa & ~PTE_4KFRAME));
93 }
94
95 static __inline paddr_t
xpmap_ptom_masked(paddr_t ppa)96 xpmap_ptom_masked(paddr_t ppa)
97 {
98 return (
99 (paddr_t)xpmap_phys_to_machine_mapping[ppa >> PAGE_SHIFT]
100 << PAGE_SHIFT);
101 }
102
103 static __inline paddr_t
xpmap_ptom(paddr_t ppa)104 xpmap_ptom(paddr_t ppa)
105 {
106 return (xpmap_ptom_masked(ppa) | (ppa & ~PTE_4KFRAME));
107 }
108
109 static __inline void
xpmap_ptom_map(paddr_t ppa,paddr_t mpa)110 xpmap_ptom_map(paddr_t ppa, paddr_t mpa)
111 {
112 xpmap_phys_to_machine_mapping[ppa >> PAGE_SHIFT] = mpa >> PAGE_SHIFT;
113 }
114
115 static __inline void
xpmap_ptom_unmap(paddr_t ppa)116 xpmap_ptom_unmap(paddr_t ppa)
117 {
118 xpmap_phys_to_machine_mapping[ppa >> PAGE_SHIFT] = INVALID_P2M_ENTRY;
119 }
120
121 static __inline bool
xpmap_ptom_isvalid(paddr_t ppa)122 xpmap_ptom_isvalid(paddr_t ppa)
123 {
124 return (
125 xpmap_phys_to_machine_mapping[ppa >> PAGE_SHIFT]
126 != INVALID_P2M_ENTRY);
127 }
128
129
130 static inline void
MULTI_update_va_mapping(multicall_entry_t * mcl,vaddr_t va,pt_entry_t new_val,unsigned long flags)131 MULTI_update_va_mapping(
132 multicall_entry_t *mcl, vaddr_t va,
133 pt_entry_t new_val, unsigned long flags)
134 {
135 mcl->op = __HYPERVISOR_update_va_mapping;
136 mcl->args[0] = va;
137 #if defined(__x86_64__)
138 mcl->args[1] = new_val;
139 mcl->args[2] = flags;
140 #else
141 mcl->args[1] = (new_val & 0xffffffff);
142 mcl->args[2] = (new_val >> 32);
143 mcl->args[3] = flags;
144 #endif
145 }
146
147 static inline void
MULTI_update_va_mapping_otherdomain(multicall_entry_t * mcl,vaddr_t va,pt_entry_t new_val,unsigned long flags,domid_t domid)148 MULTI_update_va_mapping_otherdomain(
149 multicall_entry_t *mcl, vaddr_t va,
150 pt_entry_t new_val, unsigned long flags, domid_t domid)
151 {
152 mcl->op = __HYPERVISOR_update_va_mapping_otherdomain;
153 mcl->args[0] = va;
154 #if defined(__x86_64__)
155 mcl->args[1] = new_val;
156 mcl->args[2] = flags;
157 mcl->args[3] = domid;
158 #else
159 mcl->args[1] = (new_val & 0xffffffff);
160 mcl->args[2] = (new_val >> 32);
161 mcl->args[3] = flags;
162 mcl->args[4] = domid;
163 #endif
164 }
165 #if defined(__x86_64__)
166 #define MULTI_UVMFLAGS_INDEX 2
167 #define MULTI_UVMDOMID_INDEX 3
168 #else
169 #define MULTI_UVMFLAGS_INDEX 3
170 #define MULTI_UVMDOMID_INDEX 4
171 #endif
172
173 #if defined(__x86_64__)
174 void xen_set_user_pgd(paddr_t);
175 #endif
176 #endif /* XENPV */
177
178 int pmap_enter_gnt(struct pmap *, vaddr_t, vaddr_t, int,
179 const struct gnttab_map_grant_ref *);
180
181 #endif /* _XEN_XENPMAP_H_ */
182