1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2005-2017 Andes Technology Corporation
3 
4 #include <linux/mm.h>
5 #include <linux/sched.h>
6 #include <linux/fs.h>
7 #include <linux/pagemap.h>
8 #include <linux/module.h>
9 #include <asm/cacheflush.h>
10 #include <asm/proc-fns.h>
11 #include <asm/shmparam.h>
12 #include <asm/cache_info.h>
13 
14 extern struct cache_info L1_cache_info[2];
15 
flush_icache_range(unsigned long start,unsigned long end)16 void flush_icache_range(unsigned long start, unsigned long end)
17 {
18 	unsigned long line_size, flags;
19 	line_size = L1_cache_info[DCACHE].line_size;
20 	start = start & ~(line_size - 1);
21 	end = (end + line_size - 1) & ~(line_size - 1);
22 	local_irq_save(flags);
23 	cpu_cache_wbinval_range(start, end, 1);
24 	local_irq_restore(flags);
25 }
26 EXPORT_SYMBOL(flush_icache_range);
27 
flush_icache_page(struct vm_area_struct * vma,struct page * page)28 void flush_icache_page(struct vm_area_struct *vma, struct page *page)
29 {
30 	unsigned long flags;
31 	unsigned long kaddr;
32 	local_irq_save(flags);
33 	kaddr = (unsigned long)kmap_atomic(page);
34 	cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
35 	kunmap_atomic((void *)kaddr);
36 	local_irq_restore(flags);
37 }
38 
flush_icache_user_page(struct vm_area_struct * vma,struct page * page,unsigned long addr,int len)39 void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
40 	                     unsigned long addr, int len)
41 {
42 	unsigned long kaddr;
43 	kaddr = (unsigned long)kmap_atomic(page) + (addr & ~PAGE_MASK);
44 	flush_icache_range(kaddr, kaddr + len);
45 	kunmap_atomic((void *)kaddr);
46 }
47 
update_mmu_cache(struct vm_area_struct * vma,unsigned long addr,pte_t * pte)48 void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
49 		      pte_t * pte)
50 {
51 	struct page *page;
52 	unsigned long pfn = pte_pfn(*pte);
53 	unsigned long flags;
54 
55 	if (!pfn_valid(pfn))
56 		return;
57 
58 	if (vma->vm_mm == current->active_mm) {
59 		local_irq_save(flags);
60 		__nds32__mtsr_dsb(addr, NDS32_SR_TLB_VPN);
61 		__nds32__tlbop_rwr(*pte);
62 		__nds32__isb();
63 		local_irq_restore(flags);
64 	}
65 	page = pfn_to_page(pfn);
66 
67 	if ((test_and_clear_bit(PG_dcache_dirty, &page->flags)) ||
68 	    (vma->vm_flags & VM_EXEC)) {
69 		unsigned long kaddr;
70 		local_irq_save(flags);
71 		kaddr = (unsigned long)kmap_atomic(page);
72 		cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
73 		kunmap_atomic((void *)kaddr);
74 		local_irq_restore(flags);
75 	}
76 }
77 #ifdef CONFIG_CPU_CACHE_ALIASING
78 extern pte_t va_present(struct mm_struct *mm, unsigned long addr);
79 
aliasing(unsigned long addr,unsigned long page)80 static inline unsigned long aliasing(unsigned long addr, unsigned long page)
81 {
82 	return ((addr & PAGE_MASK) ^ page) & (SHMLBA - 1);
83 }
84 
kremap0(unsigned long uaddr,unsigned long pa)85 static inline unsigned long kremap0(unsigned long uaddr, unsigned long pa)
86 {
87 	unsigned long kaddr, pte;
88 
89 #define BASE_ADDR0 0xffffc000
90 	kaddr = BASE_ADDR0 | (uaddr & L1_cache_info[DCACHE].aliasing_mask);
91 	pte = (pa | PAGE_KERNEL);
92 	__nds32__mtsr_dsb(kaddr, NDS32_SR_TLB_VPN);
93 	__nds32__tlbop_rwlk(pte);
94 	__nds32__isb();
95 	return kaddr;
96 }
97 
kunmap01(unsigned long kaddr)98 static inline void kunmap01(unsigned long kaddr)
99 {
100 	__nds32__tlbop_unlk(kaddr);
101 	__nds32__tlbop_inv(kaddr);
102 	__nds32__isb();
103 }
104 
kremap1(unsigned long uaddr,unsigned long pa)105 static inline unsigned long kremap1(unsigned long uaddr, unsigned long pa)
106 {
107 	unsigned long kaddr, pte;
108 
109 #define BASE_ADDR1 0xffff8000
110 	kaddr = BASE_ADDR1 | (uaddr & L1_cache_info[DCACHE].aliasing_mask);
111 	pte = (pa | PAGE_KERNEL);
112 	__nds32__mtsr_dsb(kaddr, NDS32_SR_TLB_VPN);
113 	__nds32__tlbop_rwlk(pte);
114 	__nds32__isb();
115 	return kaddr;
116 }
117 
flush_cache_mm(struct mm_struct * mm)118 void flush_cache_mm(struct mm_struct *mm)
119 {
120 	unsigned long flags;
121 
122 	local_irq_save(flags);
123 	cpu_dcache_wbinval_all();
124 	cpu_icache_inval_all();
125 	local_irq_restore(flags);
126 }
127 
flush_cache_dup_mm(struct mm_struct * mm)128 void flush_cache_dup_mm(struct mm_struct *mm)
129 {
130 }
131 
flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)132 void flush_cache_range(struct vm_area_struct *vma,
133 		       unsigned long start, unsigned long end)
134 {
135 	unsigned long flags;
136 
137 	if ((end - start) > 8 * PAGE_SIZE) {
138 		cpu_dcache_wbinval_all();
139 		if (vma->vm_flags & VM_EXEC)
140 			cpu_icache_inval_all();
141 		return;
142 	}
143 	local_irq_save(flags);
144 	while (start < end) {
145 		if (va_present(vma->vm_mm, start))
146 			cpu_cache_wbinval_page(start, vma->vm_flags & VM_EXEC);
147 		start += PAGE_SIZE;
148 	}
149 	local_irq_restore(flags);
150 	return;
151 }
152 
flush_cache_page(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn)153 void flush_cache_page(struct vm_area_struct *vma,
154 		      unsigned long addr, unsigned long pfn)
155 {
156 	unsigned long vto, flags;
157 
158 	local_irq_save(flags);
159 	vto = kremap0(addr, pfn << PAGE_SHIFT);
160 	cpu_cache_wbinval_page(vto, vma->vm_flags & VM_EXEC);
161 	kunmap01(vto);
162 	local_irq_restore(flags);
163 }
164 
flush_cache_vmap(unsigned long start,unsigned long end)165 void flush_cache_vmap(unsigned long start, unsigned long end)
166 {
167 	cpu_dcache_wbinval_all();
168 	cpu_icache_inval_all();
169 }
170 
flush_cache_vunmap(unsigned long start,unsigned long end)171 void flush_cache_vunmap(unsigned long start, unsigned long end)
172 {
173 	cpu_dcache_wbinval_all();
174 	cpu_icache_inval_all();
175 }
176 
copy_user_page(void * vto,void * vfrom,unsigned long vaddr,struct page * to)177 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
178 		    struct page *to)
179 {
180 	cpu_dcache_wbinval_page((unsigned long)vaddr);
181 	cpu_icache_inval_page((unsigned long)vaddr);
182 	copy_page(vto, vfrom);
183 	cpu_dcache_wbinval_page((unsigned long)vto);
184 	cpu_icache_inval_page((unsigned long)vto);
185 }
186 
clear_user_page(void * addr,unsigned long vaddr,struct page * page)187 void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
188 {
189 	cpu_dcache_wbinval_page((unsigned long)vaddr);
190 	cpu_icache_inval_page((unsigned long)vaddr);
191 	clear_page(addr);
192 	cpu_dcache_wbinval_page((unsigned long)addr);
193 	cpu_icache_inval_page((unsigned long)addr);
194 }
195 
copy_user_highpage(struct page * to,struct page * from,unsigned long vaddr,struct vm_area_struct * vma)196 void copy_user_highpage(struct page *to, struct page *from,
197 			unsigned long vaddr, struct vm_area_struct *vma)
198 {
199 	unsigned long vto, vfrom, flags, kto, kfrom, pfrom, pto;
200 	kto = ((unsigned long)page_address(to) & PAGE_MASK);
201 	kfrom = ((unsigned long)page_address(from) & PAGE_MASK);
202 	pto = page_to_phys(to);
203 	pfrom = page_to_phys(from);
204 
205 	local_irq_save(flags);
206 	if (aliasing(vaddr, (unsigned long)kfrom))
207 		cpu_dcache_wb_page((unsigned long)kfrom);
208 	vto = kremap0(vaddr, pto);
209 	vfrom = kremap1(vaddr, pfrom);
210 	copy_page((void *)vto, (void *)vfrom);
211 	kunmap01(vfrom);
212 	kunmap01(vto);
213 	local_irq_restore(flags);
214 }
215 
216 EXPORT_SYMBOL(copy_user_highpage);
217 
clear_user_highpage(struct page * page,unsigned long vaddr)218 void clear_user_highpage(struct page *page, unsigned long vaddr)
219 {
220 	unsigned long vto, flags, kto;
221 
222 	kto = ((unsigned long)page_address(page) & PAGE_MASK);
223 
224 	local_irq_save(flags);
225 	if (aliasing(kto, vaddr) && kto != 0) {
226 		cpu_dcache_inval_page(kto);
227 		cpu_icache_inval_page(kto);
228 	}
229 	vto = kremap0(vaddr, page_to_phys(page));
230 	clear_page((void *)vto);
231 	kunmap01(vto);
232 	local_irq_restore(flags);
233 }
234 
235 EXPORT_SYMBOL(clear_user_highpage);
236 
flush_dcache_page(struct page * page)237 void flush_dcache_page(struct page *page)
238 {
239 	struct address_space *mapping;
240 
241 	mapping = page_mapping_file(page);
242 	if (mapping && !mapping_mapped(mapping))
243 		set_bit(PG_dcache_dirty, &page->flags);
244 	else {
245 		unsigned long kaddr, flags;
246 
247 		kaddr = (unsigned long)page_address(page);
248 		local_irq_save(flags);
249 		cpu_dcache_wbinval_page(kaddr);
250 		if (mapping) {
251 			unsigned long vaddr, kto;
252 
253 			vaddr = page->index << PAGE_SHIFT;
254 			if (aliasing(vaddr, kaddr)) {
255 				kto = kremap0(vaddr, page_to_phys(page));
256 				cpu_dcache_wbinval_page(kto);
257 				kunmap01(kto);
258 			}
259 		}
260 		local_irq_restore(flags);
261 	}
262 }
263 EXPORT_SYMBOL(flush_dcache_page);
264 
copy_to_user_page(struct vm_area_struct * vma,struct page * page,unsigned long vaddr,void * dst,void * src,int len)265 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
266 		       unsigned long vaddr, void *dst, void *src, int len)
267 {
268 	unsigned long line_size, start, end, vto, flags;
269 
270 	local_irq_save(flags);
271 	vto = kremap0(vaddr, page_to_phys(page));
272 	dst = (void *)(vto | (vaddr & (PAGE_SIZE - 1)));
273 	memcpy(dst, src, len);
274 	if (vma->vm_flags & VM_EXEC) {
275 		line_size = L1_cache_info[DCACHE].line_size;
276 		start = (unsigned long)dst & ~(line_size - 1);
277 		end =
278 		    ((unsigned long)dst + len + line_size - 1) & ~(line_size -
279 								   1);
280 		cpu_cache_wbinval_range(start, end, 1);
281 	}
282 	kunmap01(vto);
283 	local_irq_restore(flags);
284 }
285 
copy_from_user_page(struct vm_area_struct * vma,struct page * page,unsigned long vaddr,void * dst,void * src,int len)286 void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
287 			 unsigned long vaddr, void *dst, void *src, int len)
288 {
289 	unsigned long vto, flags;
290 
291 	local_irq_save(flags);
292 	vto = kremap0(vaddr, page_to_phys(page));
293 	src = (void *)(vto | (vaddr & (PAGE_SIZE - 1)));
294 	memcpy(dst, src, len);
295 	kunmap01(vto);
296 	local_irq_restore(flags);
297 }
298 
flush_anon_page(struct vm_area_struct * vma,struct page * page,unsigned long vaddr)299 void flush_anon_page(struct vm_area_struct *vma,
300 		     struct page *page, unsigned long vaddr)
301 {
302 	unsigned long kaddr, flags, ktmp;
303 	if (!PageAnon(page))
304 		return;
305 
306 	if (vma->vm_mm != current->active_mm)
307 		return;
308 
309 	local_irq_save(flags);
310 	if (vma->vm_flags & VM_EXEC)
311 		cpu_icache_inval_page(vaddr & PAGE_MASK);
312 	kaddr = (unsigned long)page_address(page);
313 	if (aliasing(vaddr, kaddr)) {
314 		ktmp = kremap0(vaddr, page_to_phys(page));
315 		cpu_dcache_wbinval_page(ktmp);
316 		kunmap01(ktmp);
317 	}
318 	local_irq_restore(flags);
319 }
320 
flush_kernel_dcache_page(struct page * page)321 void flush_kernel_dcache_page(struct page *page)
322 {
323 	unsigned long flags;
324 	local_irq_save(flags);
325 	cpu_dcache_wbinval_page((unsigned long)page_address(page));
326 	local_irq_restore(flags);
327 }
328 EXPORT_SYMBOL(flush_kernel_dcache_page);
329 
flush_kernel_vmap_range(void * addr,int size)330 void flush_kernel_vmap_range(void *addr, int size)
331 {
332 	unsigned long flags;
333 	local_irq_save(flags);
334 	cpu_dcache_wb_range((unsigned long)addr, (unsigned long)addr +  size);
335 	local_irq_restore(flags);
336 }
337 EXPORT_SYMBOL(flush_kernel_vmap_range);
338 
invalidate_kernel_vmap_range(void * addr,int size)339 void invalidate_kernel_vmap_range(void *addr, int size)
340 {
341 	unsigned long flags;
342 	local_irq_save(flags);
343 	cpu_dcache_inval_range((unsigned long)addr, (unsigned long)addr + size);
344 	local_irq_restore(flags);
345 }
346 EXPORT_SYMBOL(invalidate_kernel_vmap_range);
347 #endif
348