1 /* $NetBSD: pmap.h,v 1.10 2001/09/10 21:19:33 chris Exp $ */ 2 3 /* 4 * Copyright (c) 1987 Carnegie-Mellon University 5 * Copyright (c) 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the Systems Programming Group of the University of Utah Computer 10 * Science Department. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * @(#)pmap.h 8.1 (Berkeley) 6/10/93 41 */ 42 43 #ifndef _NEWS68K_PMAP_H_ 44 #define _NEWS68K_PMAP_H_ 45 46 #include <machine/cpu.h> 47 #include <machine/pte.h> 48 49 /* 50 * Pmap stuff 51 */ 52 struct pmap { 53 pt_entry_t *pm_ptab; /* KVA of page table */ 54 st_entry_t *pm_stab; /* KVA of segment table */ 55 int pm_stfree; /* 040: free lev2 blocks */ 56 st_entry_t *pm_stpa; /* 040: ST phys addr */ 57 short pm_sref; /* segment table ref count */ 58 short pm_count; /* pmap reference count */ 59 struct simplelock pm_lock; /* lock on pmap */ 60 struct pmap_statistics pm_stats; /* pmap statistics */ 61 long pm_ptpages; /* more stats: PT pages */ 62 }; 63 64 typedef struct pmap *pmap_t; 65 66 /* 67 * On the 040 we keep track of which level 2 blocks are already in use 68 * with the pm_stfree mask. Bits are arranged from LSB (block 0) to MSB 69 * (block 31). For convenience, the level 1 table is considered to be 70 * block 0. 71 * 72 * MAX[KU]L2SIZE control how many pages of level 2 descriptors are allowed. 73 * for the kernel and users. 8 implies only the initial "segment table" 74 * page is used. WARNING: don't change MAXUL2SIZE unless you can allocate 75 * physically contiguous pages for the ST in pmap.c! 76 */ 77 #define MAXKL2SIZE 32 78 #define MAXUL2SIZE 8 79 #define l2tobm(n) (1 << (n)) 80 #define bmtol2(n) (ffs(n) - 1) 81 82 /* 83 * Macros for speed 84 */ 85 #define PMAP_ACTIVATE(pmap, loadhw) \ 86 { \ 87 if ((loadhw)) \ 88 loadustp(m68k_btop((paddr_t)(pmap)->pm_stpa)); \ 89 } 90 91 /* 92 * For each struct vm_page, there is a list of all currently valid virtual 93 * mappings of that page. An entry is a pv_entry, the list is pv_table. 94 */ 95 struct pv_entry { 96 struct pv_entry *pv_next; /* next pv_entry */ 97 struct pmap *pv_pmap; /* pmap where mapping lies */ 98 vaddr_t pv_va; /* virtual address for mapping */ 99 st_entry_t *pv_ptste; /* non-zero if VA maps a PT page */ 100 struct pmap *pv_ptpmap; /* if pv_ptste, pmap for PT page */ 101 int pv_flags; /* flags */ 102 }; 103 104 #define PV_CI 0x01 /* header: all entries are cache inhibited */ 105 #define PV_PTPAGE 0x02 /* header: entry maps a page table page */ 106 107 struct pv_page; 108 109 struct pv_page_info { 110 TAILQ_ENTRY(pv_page) pgi_list; 111 struct pv_entry *pgi_freelist; 112 int pgi_nfree; 113 }; 114 115 /* 116 * This is basically: 117 * ((NBPG - sizeof(struct pv_page_info)) / sizeof(struct pv_entry)) 118 */ 119 #define NPVPPG 170 120 121 struct pv_page { 122 struct pv_page_info pvp_pgi; 123 struct pv_entry pvp_pv[NPVPPG]; 124 }; 125 126 extern struct pmap kernel_pmap_store; 127 128 #define pmap_kernel() (&kernel_pmap_store) 129 #define active_pmap(pm) \ 130 ((pm) == pmap_kernel() || (pm) == curproc->p_vmspace->vm_map.pmap) 131 #define active_user_pmap(pm) \ 132 (curproc && \ 133 (pm) != pmap_kernel() && (pm) == curproc->p_vmspace->vm_map.pmap) 134 135 extern struct pv_entry *pv_table; /* array of entries, one per page */ 136 137 #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count) 138 #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count) 139 140 #define pmap_update(pmap) /* nothing (yet) */ 141 142 extern pt_entry_t *Sysmap; 143 extern char *vmmap; /* map for mem, dumps, etc. */ 144 145 vaddr_t pmap_map __P((vaddr_t, paddr_t, paddr_t, int)); 146 void pmap_procwr __P((struct proc *, vaddr_t, u_long)); 147 #define PMAP_NEED_PROCWR 148 149 /* 150 * Do idle page zero'ing uncached to avoid polluting the cache. 151 */ 152 boolean_t pmap_zero_page_uncached __P((paddr_t)); 153 #define PMAP_PAGEIDLEZERO(pa) pmap_zero_page_uncached(pa) 154 155 #ifdef CACHE_HAVE_VAC 156 void pmap_prefer __P((vaddr_t, vaddr_t *)); 157 #define PMAP_PREFER(foff, vap) pmap_prefer((foff), (vap)) 158 #endif 159 160 #endif /* !_NEWS68K_PMAP_H_ */ 161