1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
7  * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
8  * Carsten Langgaard, carstenl@mips.com
9  * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.
10  */
11 #include <linux/cpu_pm.h>
12 #include <linux/init.h>
13 #include <linux/sched.h>
14 #include <linux/smp.h>
15 #include <linux/mm.h>
16 #include <linux/hugetlb.h>
17 #include <linux/export.h>
18 
19 #include <asm/cpu.h>
20 #include <asm/cpu-type.h>
21 #include <asm/bootinfo.h>
22 #include <asm/hazards.h>
23 #include <asm/mmu_context.h>
24 #include <asm/tlb.h>
25 #include <asm/tlbmisc.h>
26 
27 extern void build_tlb_refill_handler(void);
28 
29 /*
30  * LOONGSON-2 has a 4 entry itlb which is a subset of jtlb, LOONGSON-3 has
31  * a 4 entry itlb and a 4 entry dtlb which are subsets of jtlb. Unfortunately,
32  * itlb/dtlb are not totally transparent to software.
33  */
flush_micro_tlb(void)34 static inline void flush_micro_tlb(void)
35 {
36 	switch (current_cpu_type()) {
37 	case CPU_LOONGSON2EF:
38 		write_c0_diag(LOONGSON_DIAG_ITLB);
39 		break;
40 	case CPU_LOONGSON64:
41 		write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
42 		break;
43 	default:
44 		break;
45 	}
46 }
47 
flush_micro_tlb_vm(struct vm_area_struct * vma)48 static inline void flush_micro_tlb_vm(struct vm_area_struct *vma)
49 {
50 	if (vma->vm_flags & VM_EXEC)
51 		flush_micro_tlb();
52 }
53 
local_flush_tlb_all(void)54 void local_flush_tlb_all(void)
55 {
56 	unsigned long flags;
57 	unsigned long old_ctx;
58 	int entry, ftlbhighset;
59 
60 	local_irq_save(flags);
61 	/* Save old context and create impossible VPN2 value */
62 	old_ctx = read_c0_entryhi();
63 	htw_stop();
64 	write_c0_entrylo0(0);
65 	write_c0_entrylo1(0);
66 
67 	entry = num_wired_entries();
68 
69 	/*
70 	 * Blast 'em all away.
71 	 * If there are any wired entries, fall back to iterating
72 	 */
73 	if (cpu_has_tlbinv && !entry) {
74 		if (current_cpu_data.tlbsizevtlb) {
75 			write_c0_index(0);
76 			mtc0_tlbw_hazard();
77 			tlbinvf();  /* invalidate VTLB */
78 		}
79 		ftlbhighset = current_cpu_data.tlbsizevtlb +
80 			current_cpu_data.tlbsizeftlbsets;
81 		for (entry = current_cpu_data.tlbsizevtlb;
82 		     entry < ftlbhighset;
83 		     entry++) {
84 			write_c0_index(entry);
85 			mtc0_tlbw_hazard();
86 			tlbinvf();  /* invalidate one FTLB set */
87 		}
88 	} else {
89 		while (entry < current_cpu_data.tlbsize) {
90 			/* Make sure all entries differ. */
91 			write_c0_entryhi(UNIQUE_ENTRYHI(entry));
92 			write_c0_index(entry);
93 			mtc0_tlbw_hazard();
94 			tlb_write_indexed();
95 			entry++;
96 		}
97 	}
98 	tlbw_use_hazard();
99 	write_c0_entryhi(old_ctx);
100 	htw_start();
101 	flush_micro_tlb();
102 	local_irq_restore(flags);
103 }
104 EXPORT_SYMBOL(local_flush_tlb_all);
105 
local_flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)106 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
107 	unsigned long end)
108 {
109 	struct mm_struct *mm = vma->vm_mm;
110 	int cpu = smp_processor_id();
111 
112 	if (cpu_context(cpu, mm) != 0) {
113 		unsigned long size, flags;
114 
115 		local_irq_save(flags);
116 		start = round_down(start, PAGE_SIZE << 1);
117 		end = round_up(end, PAGE_SIZE << 1);
118 		size = (end - start) >> (PAGE_SHIFT + 1);
119 		if (size <= (current_cpu_data.tlbsizeftlbsets ?
120 			     current_cpu_data.tlbsize / 8 :
121 			     current_cpu_data.tlbsize / 2)) {
122 			unsigned long old_entryhi, old_mmid;
123 			int newpid = cpu_asid(cpu, mm);
124 
125 			old_entryhi = read_c0_entryhi();
126 			if (cpu_has_mmid) {
127 				old_mmid = read_c0_memorymapid();
128 				write_c0_memorymapid(newpid);
129 			}
130 
131 			htw_stop();
132 			while (start < end) {
133 				int idx;
134 
135 				if (cpu_has_mmid)
136 					write_c0_entryhi(start);
137 				else
138 					write_c0_entryhi(start | newpid);
139 				start += (PAGE_SIZE << 1);
140 				mtc0_tlbw_hazard();
141 				tlb_probe();
142 				tlb_probe_hazard();
143 				idx = read_c0_index();
144 				write_c0_entrylo0(0);
145 				write_c0_entrylo1(0);
146 				if (idx < 0)
147 					continue;
148 				/* Make sure all entries differ. */
149 				write_c0_entryhi(UNIQUE_ENTRYHI(idx));
150 				mtc0_tlbw_hazard();
151 				tlb_write_indexed();
152 			}
153 			tlbw_use_hazard();
154 			write_c0_entryhi(old_entryhi);
155 			if (cpu_has_mmid)
156 				write_c0_memorymapid(old_mmid);
157 			htw_start();
158 		} else {
159 			drop_mmu_context(mm);
160 		}
161 		flush_micro_tlb();
162 		local_irq_restore(flags);
163 	}
164 }
165 
local_flush_tlb_kernel_range(unsigned long start,unsigned long end)166 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
167 {
168 	unsigned long size, flags;
169 
170 	local_irq_save(flags);
171 	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
172 	size = (size + 1) >> 1;
173 	if (size <= (current_cpu_data.tlbsizeftlbsets ?
174 		     current_cpu_data.tlbsize / 8 :
175 		     current_cpu_data.tlbsize / 2)) {
176 		int pid = read_c0_entryhi();
177 
178 		start &= (PAGE_MASK << 1);
179 		end += ((PAGE_SIZE << 1) - 1);
180 		end &= (PAGE_MASK << 1);
181 		htw_stop();
182 
183 		while (start < end) {
184 			int idx;
185 
186 			write_c0_entryhi(start);
187 			start += (PAGE_SIZE << 1);
188 			mtc0_tlbw_hazard();
189 			tlb_probe();
190 			tlb_probe_hazard();
191 			idx = read_c0_index();
192 			write_c0_entrylo0(0);
193 			write_c0_entrylo1(0);
194 			if (idx < 0)
195 				continue;
196 			/* Make sure all entries differ. */
197 			write_c0_entryhi(UNIQUE_ENTRYHI(idx));
198 			mtc0_tlbw_hazard();
199 			tlb_write_indexed();
200 		}
201 		tlbw_use_hazard();
202 		write_c0_entryhi(pid);
203 		htw_start();
204 	} else {
205 		local_flush_tlb_all();
206 	}
207 	flush_micro_tlb();
208 	local_irq_restore(flags);
209 }
210 
local_flush_tlb_page(struct vm_area_struct * vma,unsigned long page)211 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
212 {
213 	int cpu = smp_processor_id();
214 
215 	if (cpu_context(cpu, vma->vm_mm) != 0) {
216 		unsigned long old_mmid;
217 		unsigned long flags, old_entryhi;
218 		int idx;
219 
220 		page &= (PAGE_MASK << 1);
221 		local_irq_save(flags);
222 		old_entryhi = read_c0_entryhi();
223 		htw_stop();
224 		if (cpu_has_mmid) {
225 			old_mmid = read_c0_memorymapid();
226 			write_c0_entryhi(page);
227 			write_c0_memorymapid(cpu_asid(cpu, vma->vm_mm));
228 		} else {
229 			write_c0_entryhi(page | cpu_asid(cpu, vma->vm_mm));
230 		}
231 		mtc0_tlbw_hazard();
232 		tlb_probe();
233 		tlb_probe_hazard();
234 		idx = read_c0_index();
235 		write_c0_entrylo0(0);
236 		write_c0_entrylo1(0);
237 		if (idx < 0)
238 			goto finish;
239 		/* Make sure all entries differ. */
240 		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
241 		mtc0_tlbw_hazard();
242 		tlb_write_indexed();
243 		tlbw_use_hazard();
244 
245 	finish:
246 		write_c0_entryhi(old_entryhi);
247 		if (cpu_has_mmid)
248 			write_c0_memorymapid(old_mmid);
249 		htw_start();
250 		flush_micro_tlb_vm(vma);
251 		local_irq_restore(flags);
252 	}
253 }
254 
255 /*
256  * This one is only used for pages with the global bit set so we don't care
257  * much about the ASID.
258  */
local_flush_tlb_one(unsigned long page)259 void local_flush_tlb_one(unsigned long page)
260 {
261 	unsigned long flags;
262 	int oldpid, idx;
263 
264 	local_irq_save(flags);
265 	oldpid = read_c0_entryhi();
266 	htw_stop();
267 	page &= (PAGE_MASK << 1);
268 	write_c0_entryhi(page);
269 	mtc0_tlbw_hazard();
270 	tlb_probe();
271 	tlb_probe_hazard();
272 	idx = read_c0_index();
273 	write_c0_entrylo0(0);
274 	write_c0_entrylo1(0);
275 	if (idx >= 0) {
276 		/* Make sure all entries differ. */
277 		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
278 		mtc0_tlbw_hazard();
279 		tlb_write_indexed();
280 		tlbw_use_hazard();
281 	}
282 	write_c0_entryhi(oldpid);
283 	htw_start();
284 	flush_micro_tlb();
285 	local_irq_restore(flags);
286 }
287 
288 /*
289  * We will need multiple versions of update_mmu_cache(), one that just
290  * updates the TLB with the new pte(s), and another which also checks
291  * for the R4k "end of page" hardware bug and does the needy.
292  */
__update_tlb(struct vm_area_struct * vma,unsigned long address,pte_t pte)293 void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
294 {
295 	unsigned long flags;
296 	pgd_t *pgdp;
297 	p4d_t *p4dp;
298 	pud_t *pudp;
299 	pmd_t *pmdp;
300 	pte_t *ptep;
301 	int idx, pid;
302 
303 	/*
304 	 * Handle debugger faulting in for debugee.
305 	 */
306 	if (current->active_mm != vma->vm_mm)
307 		return;
308 
309 	local_irq_save(flags);
310 
311 	htw_stop();
312 	address &= (PAGE_MASK << 1);
313 	if (cpu_has_mmid) {
314 		write_c0_entryhi(address);
315 	} else {
316 		pid = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data);
317 		write_c0_entryhi(address | pid);
318 	}
319 	pgdp = pgd_offset(vma->vm_mm, address);
320 	mtc0_tlbw_hazard();
321 	tlb_probe();
322 	tlb_probe_hazard();
323 	p4dp = p4d_offset(pgdp, address);
324 	pudp = pud_offset(p4dp, address);
325 	pmdp = pmd_offset(pudp, address);
326 	idx = read_c0_index();
327 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
328 	/* this could be a huge page  */
329 	if (pmd_huge(*pmdp)) {
330 		unsigned long lo;
331 		write_c0_pagemask(PM_HUGE_MASK);
332 		ptep = (pte_t *)pmdp;
333 		lo = pte_to_entrylo(pte_val(*ptep));
334 		write_c0_entrylo0(lo);
335 		write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
336 
337 		mtc0_tlbw_hazard();
338 		if (idx < 0)
339 			tlb_write_random();
340 		else
341 			tlb_write_indexed();
342 		tlbw_use_hazard();
343 		write_c0_pagemask(PM_DEFAULT_MASK);
344 	} else
345 #endif
346 	{
347 		ptep = pte_offset_map(pmdp, address);
348 
349 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
350 #ifdef CONFIG_XPA
351 		write_c0_entrylo0(pte_to_entrylo(ptep->pte_high));
352 		if (cpu_has_xpa)
353 			writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK);
354 		ptep++;
355 		write_c0_entrylo1(pte_to_entrylo(ptep->pte_high));
356 		if (cpu_has_xpa)
357 			writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK);
358 #else
359 		write_c0_entrylo0(ptep->pte_high);
360 		ptep++;
361 		write_c0_entrylo1(ptep->pte_high);
362 #endif
363 #else
364 		write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
365 		write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
366 #endif
367 		mtc0_tlbw_hazard();
368 		if (idx < 0)
369 			tlb_write_random();
370 		else
371 			tlb_write_indexed();
372 	}
373 	tlbw_use_hazard();
374 	htw_start();
375 	flush_micro_tlb_vm(vma);
376 	local_irq_restore(flags);
377 }
378 
add_wired_entry(unsigned long entrylo0,unsigned long entrylo1,unsigned long entryhi,unsigned long pagemask)379 void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
380 		     unsigned long entryhi, unsigned long pagemask)
381 {
382 #ifdef CONFIG_XPA
383 	panic("Broken for XPA kernels");
384 #else
385 	unsigned int old_mmid;
386 	unsigned long flags;
387 	unsigned long wired;
388 	unsigned long old_pagemask;
389 	unsigned long old_ctx;
390 
391 	local_irq_save(flags);
392 	if (cpu_has_mmid) {
393 		old_mmid = read_c0_memorymapid();
394 		write_c0_memorymapid(MMID_KERNEL_WIRED);
395 	}
396 	/* Save old context and create impossible VPN2 value */
397 	old_ctx = read_c0_entryhi();
398 	htw_stop();
399 	old_pagemask = read_c0_pagemask();
400 	wired = num_wired_entries();
401 	write_c0_wired(wired + 1);
402 	write_c0_index(wired);
403 	tlbw_use_hazard();	/* What is the hazard here? */
404 	write_c0_pagemask(pagemask);
405 	write_c0_entryhi(entryhi);
406 	write_c0_entrylo0(entrylo0);
407 	write_c0_entrylo1(entrylo1);
408 	mtc0_tlbw_hazard();
409 	tlb_write_indexed();
410 	tlbw_use_hazard();
411 
412 	write_c0_entryhi(old_ctx);
413 	if (cpu_has_mmid)
414 		write_c0_memorymapid(old_mmid);
415 	tlbw_use_hazard();	/* What is the hazard here? */
416 	htw_start();
417 	write_c0_pagemask(old_pagemask);
418 	local_flush_tlb_all();
419 	local_irq_restore(flags);
420 #endif
421 }
422 
423 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
424 
has_transparent_hugepage(void)425 int has_transparent_hugepage(void)
426 {
427 	static unsigned int mask = -1;
428 
429 	if (mask == -1) {	/* first call comes during __init */
430 		unsigned long flags;
431 
432 		local_irq_save(flags);
433 		write_c0_pagemask(PM_HUGE_MASK);
434 		back_to_back_c0_hazard();
435 		mask = read_c0_pagemask();
436 		write_c0_pagemask(PM_DEFAULT_MASK);
437 		local_irq_restore(flags);
438 	}
439 	return mask == PM_HUGE_MASK;
440 }
441 EXPORT_SYMBOL(has_transparent_hugepage);
442 
443 #endif /* CONFIG_TRANSPARENT_HUGEPAGE  */
444 
445 /*
446  * Used for loading TLB entries before trap_init() has started, when we
447  * don't actually want to add a wired entry which remains throughout the
448  * lifetime of the system
449  */
450 
451 int temp_tlb_entry;
452 
add_temporary_entry(unsigned long entrylo0,unsigned long entrylo1,unsigned long entryhi,unsigned long pagemask)453 __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
454 			       unsigned long entryhi, unsigned long pagemask)
455 {
456 	int ret = 0;
457 	unsigned long flags;
458 	unsigned long wired;
459 	unsigned long old_pagemask;
460 	unsigned long old_ctx;
461 
462 	local_irq_save(flags);
463 	/* Save old context and create impossible VPN2 value */
464 	htw_stop();
465 	old_ctx = read_c0_entryhi();
466 	old_pagemask = read_c0_pagemask();
467 	wired = num_wired_entries();
468 	if (--temp_tlb_entry < wired) {
469 		printk(KERN_WARNING
470 		       "No TLB space left for add_temporary_entry\n");
471 		ret = -ENOSPC;
472 		goto out;
473 	}
474 
475 	write_c0_index(temp_tlb_entry);
476 	write_c0_pagemask(pagemask);
477 	write_c0_entryhi(entryhi);
478 	write_c0_entrylo0(entrylo0);
479 	write_c0_entrylo1(entrylo1);
480 	mtc0_tlbw_hazard();
481 	tlb_write_indexed();
482 	tlbw_use_hazard();
483 
484 	write_c0_entryhi(old_ctx);
485 	write_c0_pagemask(old_pagemask);
486 	htw_start();
487 out:
488 	local_irq_restore(flags);
489 	return ret;
490 }
491 
492 static int ntlb;
set_ntlb(char * str)493 static int __init set_ntlb(char *str)
494 {
495 	get_option(&str, &ntlb);
496 	return 1;
497 }
498 
499 __setup("ntlb=", set_ntlb);
500 
501 /*
502  * Configure TLB (for init or after a CPU has been powered off).
503  */
r4k_tlb_configure(void)504 static void r4k_tlb_configure(void)
505 {
506 	/*
507 	 * You should never change this register:
508 	 *   - On R4600 1.7 the tlbp never hits for pages smaller than
509 	 *     the value in the c0_pagemask register.
510 	 *   - The entire mm handling assumes the c0_pagemask register to
511 	 *     be set to fixed-size pages.
512 	 */
513 	write_c0_pagemask(PM_DEFAULT_MASK);
514 	back_to_back_c0_hazard();
515 	if (read_c0_pagemask() != PM_DEFAULT_MASK)
516 		panic("MMU doesn't support PAGE_SIZE=0x%lx", PAGE_SIZE);
517 
518 	write_c0_wired(0);
519 	if (current_cpu_type() == CPU_R10000 ||
520 	    current_cpu_type() == CPU_R12000 ||
521 	    current_cpu_type() == CPU_R14000 ||
522 	    current_cpu_type() == CPU_R16000)
523 		write_c0_framemask(0);
524 
525 	if (cpu_has_rixi) {
526 		/*
527 		 * Enable the no read, no exec bits, and enable large physical
528 		 * address.
529 		 */
530 #ifdef CONFIG_64BIT
531 		set_c0_pagegrain(PG_RIE | PG_XIE | PG_ELPA);
532 #else
533 		set_c0_pagegrain(PG_RIE | PG_XIE);
534 #endif
535 	}
536 
537 	temp_tlb_entry = current_cpu_data.tlbsize - 1;
538 
539 	/* From this point on the ARC firmware is dead.	 */
540 	local_flush_tlb_all();
541 
542 	/* Did I tell you that ARC SUCKS?  */
543 }
544 
tlb_init(void)545 void tlb_init(void)
546 {
547 	r4k_tlb_configure();
548 
549 	if (ntlb) {
550 		if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
551 			int wired = current_cpu_data.tlbsize - ntlb;
552 			write_c0_wired(wired);
553 			write_c0_index(wired-1);
554 			printk("Restricting TLB to %d entries\n", ntlb);
555 		} else
556 			printk("Ignoring invalid argument ntlb=%d\n", ntlb);
557 	}
558 
559 	build_tlb_refill_handler();
560 }
561 
r4k_tlb_pm_notifier(struct notifier_block * self,unsigned long cmd,void * v)562 static int r4k_tlb_pm_notifier(struct notifier_block *self, unsigned long cmd,
563 			       void *v)
564 {
565 	switch (cmd) {
566 	case CPU_PM_ENTER_FAILED:
567 	case CPU_PM_EXIT:
568 		r4k_tlb_configure();
569 		break;
570 	}
571 
572 	return NOTIFY_OK;
573 }
574 
575 static struct notifier_block r4k_tlb_pm_notifier_block = {
576 	.notifier_call = r4k_tlb_pm_notifier,
577 };
578 
r4k_tlb_init_pm(void)579 static int __init r4k_tlb_init_pm(void)
580 {
581 	return cpu_pm_register_notifier(&r4k_tlb_pm_notifier_block);
582 }
583 arch_initcall(r4k_tlb_init_pm);
584