xref: /openbsd/sys/arch/hppa/include/pmap.h (revision 551b33bf)
1 /*	$OpenBSD: pmap.h,v 1.56 2023/12/11 22:12:53 kettenis Exp $	*/
2 
3 /*
4  * Copyright (c) 2002-2004 Michael Shalayeff
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
20  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26  * THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #ifndef _MACHINE_PMAP_H_
30 #define _MACHINE_PMAP_H_
31 
32 #include <uvm/uvm_object.h>
33 #include <sys/mutex.h>
34 
35 #ifdef	_KERNEL
36 #include <sys/systm.h>
37 #include <machine/pte.h>
38 
39 struct pmap {
40 	struct mutex pm_mtx;
41 	struct uvm_object pm_obj;
42 	struct vm_page	*pm_ptphint;
43 	struct vm_page	*pm_pdir_pg;	/* vm_page for pdir */
44 	volatile u_int32_t *pm_pdir;	/* page dir (read-only after create) */
45 	pa_space_t	pm_space;	/* space id (read-only after create) */
46 	u_int		pm_pid;		/* prot id (read-only after create) */
47 
48 	struct pmap_statistics	pm_stats;
49 };
50 typedef struct pmap *pmap_t;
51 
52 #define HPPA_MAX_PID    0xfffa
53 #define	HPPA_SID_MAX	0x7ffd
54 #define HPPA_SID_KERNEL 0
55 #define HPPA_PID_KERNEL 2
56 
57 #define KERNEL_ACCESS_ID 1
58 #define KERNEL_TEXT_PROT (TLB_AR_KRX | (KERNEL_ACCESS_ID << 1))
59 #define KERNEL_DATA_PROT (TLB_AR_KRW | (KERNEL_ACCESS_ID << 1))
60 
61 struct pv_entry {			/* locked by its list's pvh_lock */
62 	struct pv_entry	*pv_next;
63 	struct pmap	*pv_pmap;	/* the pmap */
64 	vaddr_t		pv_va;		/* the virtual address */
65 	struct vm_page	*pv_ptp;	/* the vm_page of the PTP */
66 };
67 
68 /* also match the hardware tlb walker definition */
69 struct vp_entry {
70 	u_int	vp_tag;
71 	u_int	vp_tlbprot;
72 	u_int	vp_tlbpage;
73 	u_int	vp_ptr;
74 };
75 
76 extern void gateway_page(void);
77 extern struct pmap kernel_pmap_store;
78 
79 #if defined(HP7100LC_CPU) || defined(HP7300LC_CPU)
80 extern int pmap_hptsize;
81 extern struct pdc_hwtlb pdc_hwtlb;
82 #endif
83 
84 /*
85  * pool quickmaps
86  */
87 #define	pmap_map_direct(pg)	((vaddr_t)VM_PAGE_TO_PHYS(pg))
88 struct vm_page *pmap_unmap_direct(vaddr_t);
89 #define	__HAVE_PMAP_DIRECT
90 
91 /*
92  * according to the parisc manual aliased va's should be
93  * different by high 12 bits only.
94  */
95 #define	PMAP_PREFER
96 /* pmap prefer alignment */
97 #define PMAP_PREFER_ALIGN()	(HPPA_PGALIAS)
98 /* pmap prefer offset within alignment */
99 #define PMAP_PREFER_OFFSET(of)	((of) & HPPA_PGAOFF)
100 
101 #define	pmap_sid2pid(s)			(((s) + 1) << 1)
102 #define pmap_kernel()			(&kernel_pmap_store)
103 #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
104 #define	pmap_update(pm)			(void)(pm)
105 
106 #define	PG_PMAP_MOD		PG_PMAP0	/* modified */
107 #define	PG_PMAP_REF		PG_PMAP1	/* referenced */
108 
109 #define pmap_clear_modify(pg)	pmap_changebit(pg, 0, PTE_PROT(TLB_DIRTY))
110 #define pmap_clear_reference(pg) pmap_changebit(pg, PTE_PROT(TLB_REFTRAP), 0)
111 #define pmap_is_modified(pg)	pmap_testbit(pg, PG_PMAP_MOD)
112 #define pmap_is_referenced(pg)	pmap_testbit(pg, PG_PMAP_REF)
113 
114 #define pmap_init_percpu()		do { /* nothing */ } while (0)
115 #define pmap_unuse_final(p)		/* nothing */
116 #define	pmap_remove_holes(vm)		do { /* nothing */ } while (0)
117 
118 void pmap_bootstrap(vaddr_t);
119 boolean_t pmap_changebit(struct vm_page *, pt_entry_t, pt_entry_t);
120 boolean_t pmap_testbit(struct vm_page *, int);
121 void pmap_page_write_protect(struct vm_page *);
122 void pmap_write_protect(struct pmap *, vaddr_t, vaddr_t, vm_prot_t);
123 void pmap_remove(struct pmap *pmap, vaddr_t sva, vaddr_t eva);
124 void pmap_page_remove(struct vm_page *pg);
125 
126 static __inline int
pmap_prot(struct pmap * pmap,int prot)127 pmap_prot(struct pmap *pmap, int prot)
128 {
129 	extern u_int hppa_prot[];
130 	return (hppa_prot[prot] | (pmap == pmap_kernel()? 0 : TLB_USER));
131 }
132 
133 static __inline void
pmap_page_protect(struct vm_page * pg,vm_prot_t prot)134 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
135 {
136 	if (prot == PROT_READ) {
137 		pmap_page_write_protect(pg);
138 	} else {
139 		KASSERT(prot == PROT_NONE);
140 		pmap_page_remove(pg);
141 	}
142 }
143 
144 static __inline void
pmap_protect(struct pmap * pmap,vaddr_t sva,vaddr_t eva,vm_prot_t prot)145 pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
146 {
147 	if (prot != PROT_NONE)
148 		pmap_write_protect(pmap, sva, eva, prot);
149 	else
150 		pmap_remove(pmap, sva, eva);
151 }
152 
153 #endif /* _KERNEL */
154 
155 #if !defined(_LOCORE)
156 struct pv_entry;
157 struct vm_page_md {
158 	struct mutex pvh_mtx;
159 	struct pv_entry	*pvh_list;	/* head of list (locked by pvh_mtx) */
160 };
161 
162 #define	VM_MDPAGE_INIT(pg) do {				\
163 	mtx_init(&(pg)->mdpage.pvh_mtx, IPL_VM);	\
164 	(pg)->mdpage.pvh_list = NULL;			\
165 } while (0)
166 #endif
167 
168 #endif /* _MACHINE_PMAP_H_ */
169