xref: /original-bsd/sys/luna68k/include/pmap.h (revision 3705696b)
1 /*
2  * Copyright (c) 1987 Carnegie-Mellon University
3  * Copyright (c) 1992 OMRON Corporation.
4  * Copyright (c) 1991, 1992, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * the Systems Programming Group of the University of Utah Computer
9  * Science Department.
10  *
11  * %sccs.include.redist.c%
12  *
13  * from: hp300/include/pmap.h	7.11 (Berkeley) 12/27/92
14  *
15  *	@(#)pmap.h	8.1 (Berkeley) 06/10/93
16  */
17 
18 #ifndef	_PMAP_MACHINE_
19 #define	_PMAP_MACHINE_
20 
21 #define LUNA_PAGE_SIZE	NBPG
22 #if defined(LUNA2)
23 #define LUNA_SEG_SIZE	(mmutype == MMU_68040 ? 0x40000 : NBSEG)
24 #else
25 #define LUNA_SEG_SIZE	NBSEG
26 #endif
27 
28 #define luna_trunc_seg(x)	(((unsigned)(x)) & ~(LUNA_SEG_SIZE-1))
29 #define luna_round_seg(x)	luna_trunc_seg((unsigned)(x) + LUNA_SEG_SIZE-1)
30 
31 /*
32  * Pmap stuff
33  */
34 struct pmap {
35 	struct pte		*pm_ptab;	/* KVA of page table */
36 	struct ste		*pm_stab;	/* KVA of segment table */
37 	int			pm_stchanged;	/* ST changed */
38 	int			pm_stfree;	/* 040: free lev2 blocks */
39 	struct ste		*pm_stpa;	/* 040: ST phys addr */
40 	short			pm_sref;	/* segment table ref count */
41 	short			pm_count;	/* pmap reference count */
42 	simple_lock_data_t	pm_lock;	/* lock on pmap */
43 	struct pmap_statistics	pm_stats;	/* pmap statistics */
44 	long			pm_ptpages;	/* more stats: PT pages */
45 };
46 
47 typedef struct pmap	*pmap_t;
48 
49 extern struct pmap	kernel_pmap_store;
50 
51 #define kernel_pmap	(&kernel_pmap_store)
52 #define	active_pmap(pm) \
53 	((pm) == kernel_pmap || (pm) == curproc->p_vmspace->vm_map.pmap)
54 
55 /*
56  * On the 040 we keep track of which level 2 blocks are already in use
57  * with the pm_stfree mask.  Bits are arranged from LSB (block 0) to MSB
58  * (block 31).  For convenience, the level 1 table is considered to be
59  * block 0.
60  *
61  * MAX[KU]L2SIZE control how many pages of level 2 descriptors are allowed.
62  * for the kernel and users.  8 implies only the initial "segment table"
63  * page is used.  WARNING: don't change MAXUL2SIZE unless you can allocate
64  * physically contiguous pages for the ST in pmap.c!
65  */
66 #define	MAXKL2SIZE	16
67 #define MAXUL2SIZE	8
68 #define l2tobm(n)	(1 << (n))
69 #define	bmtol2(n)	(ffs(n) - 1)
70 
71 /*
72  * Macros for speed
73  */
74 #define PMAP_ACTIVATE(pmapp, pcbp, iscurproc) \
75 	if ((pmapp) != NULL && (pmapp)->pm_stchanged) { \
76 		(pcbp)->pcb_ustp = luna_btop((vm_offset_t)(pmapp)->pm_stpa); \
77 		if (iscurproc) \
78 			loadustp((pcbp)->pcb_ustp); \
79 		(pmapp)->pm_stchanged = FALSE; \
80 	}
81 #define PMAP_DEACTIVATE(pmapp, pcbp)
82 
83 /*
84  * For each vm_page_t, there is a list of all currently valid virtual
85  * mappings of that page.  An entry is a pv_entry_t, the list is pv_table.
86  */
87 typedef struct pv_entry {
88 	struct pv_entry	*pv_next;	/* next pv_entry */
89 	struct pmap	*pv_pmap;	/* pmap where mapping lies */
90 	vm_offset_t	pv_va;		/* virtual address for mapping */
91 	struct ste	*pv_ptste;	/* non-zero if VA maps a PT page */
92 	struct pmap	*pv_ptpmap;	/* if pv_ptste, pmap for PT page */
93 	int		pv_flags;	/* flags */
94 } *pv_entry_t;
95 
96 #define	PV_CI		0x01	/* header: all entries are cache inhibited */
97 #define PV_PTPAGE	0x02	/* header: entry maps a page table page */
98 
99 #ifdef	KERNEL
100 
101 pv_entry_t	pv_table;		/* array of entries, one per page */
102 
103 #define pa_index(pa)		atop(pa - vm_first_phys)
104 #define pa_to_pvh(pa)		(&pv_table[pa_index(pa)])
105 
106 #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
107 #define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
108 
109 extern	struct pte *Sysmap;
110 extern	char *vmmap;			/* map for mem, dumps, etc. */
111 
112 /*
113  * definitions LUNA IO space mapping
114  */
115 struct physmap {
116 	int pm_phys;
117 	int pm_size;
118 	int pm_cache;
119 } ;
120 
121 #endif
122 
123 #endif	_PMAP_MACHINE_
124