xref: /linux/arch/m68k/include/asm/page_mm.h (revision 44f57d78)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _M68K_PAGE_MM_H
3 #define _M68K_PAGE_MM_H
4 
5 #ifndef __ASSEMBLY__
6 
7 #include <linux/compiler.h>
8 #include <asm/module.h>
9 
10 /*
11  * We don't need to check for alignment etc.
12  */
13 #ifdef CPU_M68040_OR_M68060_ONLY
14 static inline void copy_page(void *to, void *from)
15 {
16   unsigned long tmp;
17 
18   __asm__ __volatile__("1:\t"
19 		       ".chip 68040\n\t"
20 		       "move16 %1@+,%0@+\n\t"
21 		       "move16 %1@+,%0@+\n\t"
22 		       ".chip 68k\n\t"
23 		       "dbra  %2,1b\n\t"
24 		       : "=a" (to), "=a" (from), "=d" (tmp)
25 		       : "0" (to), "1" (from) , "2" (PAGE_SIZE / 32 - 1)
26 		       );
27 }
28 
29 static inline void clear_page(void *page)
30 {
31 	unsigned long tmp;
32 	unsigned long *sp = page;
33 
34 	*sp++ = 0;
35 	*sp++ = 0;
36 	*sp++ = 0;
37 	*sp++ = 0;
38 
39 	__asm__ __volatile__("1:\t"
40 			     ".chip 68040\n\t"
41 			     "move16 %2@+,%0@+\n\t"
42 			     ".chip 68k\n\t"
43 			     "subqw  #8,%2\n\t"
44 			     "subqw  #8,%2\n\t"
45 			     "dbra   %1,1b\n\t"
46 			     : "=a" (sp), "=d" (tmp)
47 			     : "a" (page), "0" (sp),
48 			       "1" ((PAGE_SIZE - 16) / 16 - 1));
49 }
50 
51 #else
52 #define clear_page(page)	memset((page), 0, PAGE_SIZE)
53 #define copy_page(to,from)	memcpy((to), (from), PAGE_SIZE)
54 #endif
55 
56 #define clear_user_page(addr, vaddr, page)	\
57 	do {	clear_page(addr);		\
58 		flush_dcache_page(page);	\
59 	} while (0)
60 #define copy_user_page(to, from, vaddr, page)	\
61 	do {	copy_page(to, from);		\
62 		flush_dcache_page(page);	\
63 	} while (0)
64 
65 extern unsigned long m68k_memoffset;
66 
67 #ifndef CONFIG_SUN3
68 
69 #define WANT_PAGE_VIRTUAL
70 
71 static inline unsigned long ___pa(void *vaddr)
72 {
73 	unsigned long paddr;
74 	asm (
75 		"1:	addl #0,%0\n"
76 		m68k_fixup(%c2, 1b+2)
77 		: "=r" (paddr)
78 		: "0" (vaddr), "i" (m68k_fixup_memoffset));
79 	return paddr;
80 }
81 #define __pa(vaddr)	___pa((void *)(long)(vaddr))
82 static inline void *__va(unsigned long paddr)
83 {
84 	void *vaddr;
85 	asm (
86 		"1:	subl #0,%0\n"
87 		m68k_fixup(%c2, 1b+2)
88 		: "=r" (vaddr)
89 		: "0" (paddr), "i" (m68k_fixup_memoffset));
90 	return vaddr;
91 }
92 
93 #else	/* !CONFIG_SUN3 */
94 /* This #define is a horrible hack to suppress lots of warnings. --m */
95 #define __pa(x) ___pa((unsigned long)(x))
96 static inline unsigned long ___pa(unsigned long x)
97 {
98      if(x == 0)
99 	  return 0;
100      if(x >= PAGE_OFFSET)
101         return (x-PAGE_OFFSET);
102      else
103         return (x+0x2000000);
104 }
105 
106 static inline void *__va(unsigned long x)
107 {
108      if(x == 0)
109 	  return (void *)0;
110 
111      if(x < 0x2000000)
112         return (void *)(x+PAGE_OFFSET);
113      else
114         return (void *)(x-0x2000000);
115 }
116 #endif	/* CONFIG_SUN3 */
117 
118 /*
119  * NOTE: virtual isn't really correct, actually it should be the offset into the
120  * memory node, but we have no highmem, so that works for now.
121  * TODO: implement (fast) pfn<->pgdat_idx conversion functions, this makes lots
122  * of the shifts unnecessary.
123  */
124 #define virt_to_pfn(kaddr)	(__pa(kaddr) >> PAGE_SHIFT)
125 #define pfn_to_virt(pfn)	__va((pfn) << PAGE_SHIFT)
126 
127 extern int m68k_virt_to_node_shift;
128 
129 #ifdef CONFIG_SINGLE_MEMORY_CHUNK
130 #define __virt_to_node(addr)	(&pg_data_map[0])
131 #else
132 extern struct pglist_data *pg_data_table[];
133 
134 static inline __attribute_const__ int __virt_to_node_shift(void)
135 {
136 	int shift;
137 
138 	asm (
139 		"1:	moveq	#0,%0\n"
140 		m68k_fixup(%c1, 1b)
141 		: "=d" (shift)
142 		: "i" (m68k_fixup_vnode_shift));
143 	return shift;
144 }
145 
146 #define __virt_to_node(addr)	(pg_data_table[(unsigned long)(addr) >> __virt_to_node_shift()])
147 #endif
148 
149 #define virt_to_page(addr) ({						\
150 	pfn_to_page(virt_to_pfn(addr));					\
151 })
152 #define page_to_virt(page) ({						\
153 	pfn_to_virt(page_to_pfn(page));					\
154 })
155 
156 #define pfn_to_page(pfn) ({						\
157 	unsigned long __pfn = (pfn);					\
158 	struct pglist_data *pgdat;					\
159 	pgdat = __virt_to_node((unsigned long)pfn_to_virt(__pfn));	\
160 	pgdat->node_mem_map + (__pfn - pgdat->node_start_pfn);		\
161 })
162 #define page_to_pfn(_page) ({						\
163 	const struct page *__p = (_page);				\
164 	struct pglist_data *pgdat;					\
165 	pgdat = &pg_data_map[page_to_nid(__p)];				\
166 	((__p) - pgdat->node_mem_map) + pgdat->node_start_pfn;		\
167 })
168 
169 #define virt_addr_valid(kaddr)	((void *)(kaddr) >= (void *)PAGE_OFFSET && (void *)(kaddr) < high_memory)
170 #define pfn_valid(pfn)		virt_addr_valid(pfn_to_virt(pfn))
171 
172 #endif /* __ASSEMBLY__ */
173 
174 #endif /* _M68K_PAGE_MM_H */
175