xref: /linux/arch/riscv/mm/cacheflush.c (revision 9a6b55ac)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017 SiFive
4  */
5 
6 #include <asm/pgtable.h>
7 #include <asm/cacheflush.h>
8 
9 #ifdef CONFIG_SMP
10 
11 #include <asm/sbi.h>
12 
13 static void ipi_remote_fence_i(void *info)
14 {
15 	return local_flush_icache_all();
16 }
17 
18 void flush_icache_all(void)
19 {
20 	if (IS_ENABLED(CONFIG_RISCV_SBI))
21 		sbi_remote_fence_i(NULL);
22 	else
23 		on_each_cpu(ipi_remote_fence_i, NULL, 1);
24 }
25 EXPORT_SYMBOL(flush_icache_all);
26 
27 /*
28  * Performs an icache flush for the given MM context.  RISC-V has no direct
29  * mechanism for instruction cache shoot downs, so instead we send an IPI that
30  * informs the remote harts they need to flush their local instruction caches.
31  * To avoid pathologically slow behavior in a common case (a bunch of
32  * single-hart processes on a many-hart machine, ie 'make -j') we avoid the
33  * IPIs for harts that are not currently executing a MM context and instead
34  * schedule a deferred local instruction cache flush to be performed before
35  * execution resumes on each hart.
36  */
37 void flush_icache_mm(struct mm_struct *mm, bool local)
38 {
39 	unsigned int cpu;
40 	cpumask_t others, *mask;
41 
42 	preempt_disable();
43 
44 	/* Mark every hart's icache as needing a flush for this MM. */
45 	mask = &mm->context.icache_stale_mask;
46 	cpumask_setall(mask);
47 	/* Flush this hart's I$ now, and mark it as flushed. */
48 	cpu = smp_processor_id();
49 	cpumask_clear_cpu(cpu, mask);
50 	local_flush_icache_all();
51 
52 	/*
53 	 * Flush the I$ of other harts concurrently executing, and mark them as
54 	 * flushed.
55 	 */
56 	cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
57 	local |= cpumask_empty(&others);
58 	if (mm == current->active_mm && local) {
59 		/*
60 		 * It's assumed that at least one strongly ordered operation is
61 		 * performed on this hart between setting a hart's cpumask bit
62 		 * and scheduling this MM context on that hart.  Sending an SBI
63 		 * remote message will do this, but in the case where no
64 		 * messages are sent we still need to order this hart's writes
65 		 * with flush_icache_deferred().
66 		 */
67 		smp_mb();
68 	} else if (IS_ENABLED(CONFIG_RISCV_SBI)) {
69 		cpumask_t hartid_mask;
70 
71 		riscv_cpuid_to_hartid_mask(&others, &hartid_mask);
72 		sbi_remote_fence_i(cpumask_bits(&hartid_mask));
73 	} else {
74 		on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1);
75 	}
76 
77 	preempt_enable();
78 }
79 
80 #endif /* CONFIG_SMP */
81 
82 #ifdef CONFIG_MMU
83 void flush_icache_pte(pte_t pte)
84 {
85 	struct page *page = pte_page(pte);
86 
87 	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
88 		flush_icache_all();
89 }
90 #endif /* CONFIG_MMU */
91