xref: /linux/arch/powerpc/mm/book3s32/tlb.c (revision 2da68a77)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * This file contains the routines for TLB flushing.
4  * On machines where the MMU uses a hash table to store virtual to
5  * physical translations, these routines flush entries from the
6  * hash table also.
7  *  -- paulus
8  *
9  *  Derived from arch/ppc/mm/init.c:
10  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11  *
12  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
13  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
14  *    Copyright (C) 1996 Paul Mackerras
15  *
16  *  Derived from "arch/i386/mm/init.c"
17  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
18  */
19 
20 #include <linux/kernel.h>
21 #include <linux/mm.h>
22 #include <linux/init.h>
23 #include <linux/highmem.h>
24 #include <linux/pagemap.h>
25 #include <linux/export.h>
26 
27 #include <asm/tlbflush.h>
28 #include <asm/tlb.h>
29 
30 #include <mm/mmu_decl.h>
31 
32 /*
33  * TLB flushing:
34  *
35  *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
36  *  - flush_tlb_page(vma, vmaddr) flushes one page
37  *  - flush_tlb_range(vma, start, end) flushes a range of pages
38  *  - flush_tlb_kernel_range(start, end) flushes kernel pages
39  *
40  * since the hardware hash table functions as an extension of the
41  * tlb as far as the linux tables are concerned, flush it too.
42  *    -- Cort
43  */
44 
45 /*
46  * For each address in the range, find the pte for the address
47  * and check _PAGE_HASHPTE bit; if it is set, find and destroy
48  * the corresponding HPTE.
49  */
50 void hash__flush_range(struct mm_struct *mm, unsigned long start, unsigned long end)
51 {
52 	pmd_t *pmd;
53 	unsigned long pmd_end;
54 	int count;
55 	unsigned int ctx = mm->context.id;
56 
57 	start &= PAGE_MASK;
58 	if (start >= end)
59 		return;
60 	end = (end - 1) | ~PAGE_MASK;
61 	pmd = pmd_off(mm, start);
62 	for (;;) {
63 		pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1;
64 		if (pmd_end > end)
65 			pmd_end = end;
66 		if (!pmd_none(*pmd)) {
67 			count = ((pmd_end - start) >> PAGE_SHIFT) + 1;
68 			flush_hash_pages(ctx, start, pmd_val(*pmd), count);
69 		}
70 		if (pmd_end == end)
71 			break;
72 		start = pmd_end + 1;
73 		++pmd;
74 	}
75 }
76 EXPORT_SYMBOL(hash__flush_range);
77 
78 /*
79  * Flush all the (user) entries for the address space described by mm.
80  */
81 void hash__flush_tlb_mm(struct mm_struct *mm)
82 {
83 	struct vm_area_struct *mp;
84 	VMA_ITERATOR(vmi, mm, 0);
85 
86 	/*
87 	 * It is safe to iterate the vmas when called from dup_mmap,
88 	 * holding mmap_lock.  It would also be safe from unmap_region
89 	 * or exit_mmap, but not from vmtruncate on SMP - but it seems
90 	 * dup_mmap is the only SMP case which gets here.
91 	 */
92 	for_each_vma(vmi, mp)
93 		hash__flush_range(mp->vm_mm, mp->vm_start, mp->vm_end);
94 }
95 EXPORT_SYMBOL(hash__flush_tlb_mm);
96 
97 void hash__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
98 {
99 	struct mm_struct *mm;
100 	pmd_t *pmd;
101 
102 	mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
103 	pmd = pmd_off(mm, vmaddr);
104 	if (!pmd_none(*pmd))
105 		flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1);
106 }
107 EXPORT_SYMBOL(hash__flush_tlb_page);
108