xref: /netbsd/sys/arch/m68k/include/pmap_coldfire.h (revision ac57e623)
1 /*	$NetBSD: pmap_coldfire.h,v 1.4 2020/12/20 16:38:25 skrll Exp $	*/
2 /*-
3  * Copyright (c) 2013 The NetBSD Foundation, Inc.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to The NetBSD Foundation
7  * by Matt Thomas of 3am Software Foundry.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #ifndef _M68K_PMAP_COLDFIRE_H_
32 #define M68K_PMAP_COLDFIRE_H_
33 
34 #ifdef _LOCORE
35 #error use assym.h instead
36 #endif
37 
38 #if defined(_MODULE)
39 #error this file should not be included by loadable kernel modules
40 #endif
41 
42 #ifdef _KERNEL_OPT
43 #include "opt_pmap.h"
44 #endif
45 
46 #include <sys/cpu.h>
47 #include <sys/kcore.h>
48 #include <uvm/uvm_page.h>
49 #ifdef __PMAP_PRIVATE
50 #include <powerpc/booke/cpuvar.h>
51 #include <powerpc/cpuset.h>
52 #endif
53 
54 #define	PMAP_NEED_PROCWR
55 
56 #include <uvm/pmap/vmpagemd.h>
57 
58 #include <m68k/pte_coldfire.h>
59 
60 #define	NBSEG		(NBPG*NPTEPG)
61 #define	SEGSHIFT	(PGSHIFT + PGSHIFT - 2)
62 #define SEGOFSET	((1 << SEGSHIFT) - 1)
63 #define PMAP_SEGTABSIZE	(1 << (32 - SEGSHIFT))
64 #define	NPTEPG		(NBPG >> 2)
65 
66 #define	KERNEL_PID	0
67 
68 #define PMAP_TLB_MAX			  1
69 #define	PMAP_TLB_NUM_PIDS		256
70 #define	PMAP_INVALID_SEGTAB_ADDRESS	((pmap_segtab_t *)0xfeeddead)
71 
72 #define	pmap_phys_address(x)		(x)
73 
74 void	pmap_procwr(struct proc *, vaddr_t, size_t);
75 #define	PMAP_NEED_PROCWR
76 
77 #ifdef __PMAP_PRIVATE
78 struct vm_page *
79 	pmap_md_alloc_poolpage(int flags);
80 vaddr_t	pmap_md_map_poolpage(paddr_t, vsize_t);
81 void	pmap_md_unmap_poolpage(vaddr_t, vsize_t);
82 bool	pmap_md_direct_mapped_vaddr_p(vaddr_t);
83 bool	pmap_md_io_vaddr_p(vaddr_t);
84 paddr_t	pmap_md_direct_mapped_vaddr_to_paddr(vaddr_t);
85 vaddr_t	pmap_md_direct_map_paddr(paddr_t);
86 void	pmap_md_init(void);
87 
88 bool	pmap_md_tlb_check_entry(void *, vaddr_t, tlb_asid_t, pt_entry_t);
89 
90 #ifdef PMAP_MINIMALTLB
91 vaddr_t	pmap_kvptefill(vaddr_t, vaddr_t, pt_entry_t);
92 #endif
93 #endif
94 
95 void	pmap_md_page_syncicache(struct vm_page_md *, const kcpuset_t *);
96 vaddr_t	pmap_bootstrap(vaddr_t, vaddr_t, phys_ram_seg_t *, size_t);
97 bool	pmap_extract(struct pmap *, vaddr_t, paddr_t *);
98 
99 static inline paddr_t vtophys(vaddr_t);
100 
101 static inline paddr_t
vtophys(vaddr_t va)102 vtophys(vaddr_t va)
103 {
104 	paddr_t pa;
105 
106 	if (pmap_extract(pmap_kernel(), va, &pa))
107 		return pa;
108 	KASSERT(0);
109 	return (paddr_t) -1;
110 }
111 
112 #ifdef __PMAP_PRIVATE
113 /*
114  * Virtual Cache Alias helper routines.  Not a problem for Booke CPUs.
115  */
116 static inline bool
pmap_md_vca_add(struct vm_page_md * mdpg,vaddr_t va,pt_entry_t * nptep)117 pmap_md_vca_add(struct vm_page_md *mdpg, vaddr_t va, pt_entry_t *nptep)
118 {
119 	return false;
120 }
121 
122 static inline void
pmap_md_vca_remove(struct vm_page * pg,vaddr_t va)123 pmap_md_vca_remove(struct vm_page *pg, vaddr_t va)
124 {
125 
126 }
127 
128 static inline void
pmap_md_vca_clean(struct vm_page_md * mdpg,vaddr_t va,int op)129 pmap_md_vca_clean(struct vm_page_md *mdpg, vaddr_t va, int op)
130 {
131 }
132 
133 static inline size_t
pmap_md_tlb_asid_max(void)134 pmap_md_tlb_asid_max(void)
135 {
136 	return PMAP_TLB_NUM_PIDS - 1;
137 }
138 
139 static inline void
pmap_md_xtab_activate(struct pmap * pm,struct lwp * l)140 pmap_md_xtab_activate(struct pmap *pm, struct lwp *l)
141 {
142 
143 	/* nothing */
144 }
145 
146 static inline void
pmap_md_xtab_deactivate(struct pmap * pm)147 pmap_md_xtab_deactivate(struct pmap *pm)
148 {
149 
150 	/* nothing */
151 }
152 #endif
153 
154 #define	POOL_VTOPHYS(va)	((paddr_t)(vaddr_t)(va))
155 #define	POOL_PHYSTOV(pa)	((vaddr_t)(paddr_t)(pa))
156 
157 #include <uvm/pmap/pmap.h>
158 
159 #endif /* !_M68K_PMAP_COLDFIRE_H_ */
160