xref: /freebsd/sys/arm/arm/cpuinfo.c (revision 32c48d07)
12b71562fSIan Lepore /*-
22b71562fSIan Lepore  * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
32b71562fSIan Lepore  * Copyright 2014 Michal Meloun <meloun@miracle.cz>
42b71562fSIan Lepore  * All rights reserved.
52b71562fSIan Lepore  *
62b71562fSIan Lepore  * Redistribution and use in source and binary forms, with or without
72b71562fSIan Lepore  * modification, are permitted provided that the following conditions
82b71562fSIan Lepore  * are met:
92b71562fSIan Lepore  * 1. Redistributions of source code must retain the above copyright
102b71562fSIan Lepore  *    notice, this list of conditions and the following disclaimer.
112b71562fSIan Lepore  * 2. Redistributions in binary form must reproduce the above copyright
122b71562fSIan Lepore  *    notice, this list of conditions and the following disclaimer in the
132b71562fSIan Lepore  *    documentation and/or other materials provided with the distribution.
142b71562fSIan Lepore  *
152b71562fSIan Lepore  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
162b71562fSIan Lepore  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
172b71562fSIan Lepore  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
182b71562fSIan Lepore  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
192b71562fSIan Lepore  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
202b71562fSIan Lepore  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
212b71562fSIan Lepore  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
222b71562fSIan Lepore  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
232b71562fSIan Lepore  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
242b71562fSIan Lepore  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
252b71562fSIan Lepore  * SUCH DAMAGE.
262b71562fSIan Lepore  */
272b71562fSIan Lepore 
282b71562fSIan Lepore #include <sys/cdefs.h>
292b71562fSIan Lepore __FBSDID("$FreeBSD$");
302b71562fSIan Lepore 
312b71562fSIan Lepore #include <sys/param.h>
322b71562fSIan Lepore #include <sys/systm.h>
33c40a5f8aSMichal Meloun #include <sys/kernel.h>
347bf5720aSMichal Meloun #include <sys/sysctl.h>
352b71562fSIan Lepore 
363025d19dSMichal Meloun #include <machine/cpu.h>
372b71562fSIan Lepore #include <machine/cpuinfo.h>
380cbf724eSMichal Meloun #include <machine/elf.h>
390cbf724eSMichal Meloun #include <machine/md_var.h>
402b71562fSIan Lepore 
417bf5720aSMichal Meloun #if __ARM_ARCH >= 6
427bf5720aSMichal Meloun void reinit_mmu(uint32_t ttb, uint32_t aux_clr, uint32_t aux_set);
437bf5720aSMichal Meloun #endif
447bf5720aSMichal Meloun 
45a286c311SIan Lepore struct cpuinfo cpuinfo =
46a286c311SIan Lepore {
47a286c311SIan Lepore 	/* Use safe defaults for start */
48a286c311SIan Lepore 	.dcache_line_size = 32,
49a286c311SIan Lepore 	.dcache_line_mask = 31,
50a286c311SIan Lepore 	.icache_line_size = 32,
51a286c311SIan Lepore 	.icache_line_mask = 31,
52a286c311SIan Lepore };
532b71562fSIan Lepore 
547bf5720aSMichal Meloun static SYSCTL_NODE(_hw, OID_AUTO, cpu, CTLFLAG_RD, 0,
557bf5720aSMichal Meloun     "CPU");
567bf5720aSMichal Meloun static SYSCTL_NODE(_hw_cpu, OID_AUTO, quirks, CTLFLAG_RD, 0,
577bf5720aSMichal Meloun     "CPU quirks");
587bf5720aSMichal Meloun 
597bf5720aSMichal Meloun /*
607bf5720aSMichal Meloun  * Tunable CPU quirks.
617bf5720aSMichal Meloun  * Be careful, ACTRL cannot be changed if CPU is started in secure
627bf5720aSMichal Meloun  * mode(world) and write to ACTRL can cause exception!
637bf5720aSMichal Meloun  * These quirks are intended for optimizing CPU performance, not for
647bf5720aSMichal Meloun  * applying errata workarounds. Nobody can expect that CPU with unfixed
657bf5720aSMichal Meloun  * errata is stable enough to execute the kernel until quirks are applied.
667bf5720aSMichal Meloun  */
677bf5720aSMichal Meloun static uint32_t cpu_quirks_actlr_mask;
687bf5720aSMichal Meloun SYSCTL_INT(_hw_cpu_quirks, OID_AUTO, actlr_mask,
697bf5720aSMichal Meloun     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &cpu_quirks_actlr_mask, 0,
707bf5720aSMichal Meloun     "Bits to be masked in ACTLR");
717bf5720aSMichal Meloun 
727bf5720aSMichal Meloun static uint32_t cpu_quirks_actlr_set;
737bf5720aSMichal Meloun SYSCTL_INT(_hw_cpu_quirks, OID_AUTO, actlr_set,
747bf5720aSMichal Meloun     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &cpu_quirks_actlr_set, 0,
757bf5720aSMichal Meloun     "Bits to be set in ACTLR");
767bf5720aSMichal Meloun 
777bf5720aSMichal Meloun 
782b71562fSIan Lepore /* Read and parse CPU id scheme */
792b71562fSIan Lepore void
802b71562fSIan Lepore cpuinfo_init(void)
812b71562fSIan Lepore {
820cbf724eSMichal Meloun #if __ARM_ARCH >= 6
830cbf724eSMichal Meloun 	uint32_t tmp;
840cbf724eSMichal Meloun #endif
852b71562fSIan Lepore 
86c40a5f8aSMichal Meloun 	/*
87c40a5f8aSMichal Meloun 	 * Prematurely fetch CPU quirks. Standard fetch for tunable
88c40a5f8aSMichal Meloun 	 * sysctls is handled using SYSINIT, thus too late for boot CPU.
89c40a5f8aSMichal Meloun 	 * Keep names in sync with sysctls.
90c40a5f8aSMichal Meloun 	 */
91c40a5f8aSMichal Meloun 	TUNABLE_INT_FETCH("hw.cpu.quirks.actlr_mask", &cpu_quirks_actlr_mask);
92c40a5f8aSMichal Meloun 	TUNABLE_INT_FETCH("hw.cpu.quirks.actlr_set", &cpu_quirks_actlr_set);
93c40a5f8aSMichal Meloun 
942b71562fSIan Lepore 	cpuinfo.midr = cp15_midr_get();
952b71562fSIan Lepore 	/* Test old version id schemes first */
962b71562fSIan Lepore 	if ((cpuinfo.midr & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD) {
972b71562fSIan Lepore 		if (CPU_ID_ISOLD(cpuinfo.midr)) {
982b71562fSIan Lepore 			/* obsolete ARMv2 or ARMv3 CPU */
992b71562fSIan Lepore 			cpuinfo.midr = 0;
1002b71562fSIan Lepore 			return;
1012b71562fSIan Lepore 		}
1022b71562fSIan Lepore 		if (CPU_ID_IS7(cpuinfo.midr)) {
1032b71562fSIan Lepore 			if ((cpuinfo.midr & (1 << 23)) == 0) {
1042b71562fSIan Lepore 				/* obsolete ARMv3 CPU */
1052b71562fSIan Lepore 				cpuinfo.midr = 0;
1062b71562fSIan Lepore 				return;
1072b71562fSIan Lepore 			}
1082b71562fSIan Lepore 			/* ARMv4T CPU */
1092b71562fSIan Lepore 			cpuinfo.architecture = 1;
1102b71562fSIan Lepore 			cpuinfo.revision = (cpuinfo.midr >> 16) & 0x7F;
1118e6dd301SIan Lepore 		} else {
1128e6dd301SIan Lepore 			/* ARM new id scheme */
1138e6dd301SIan Lepore 			cpuinfo.architecture = (cpuinfo.midr >> 16) & 0x0F;
1148e6dd301SIan Lepore 			cpuinfo.revision = (cpuinfo.midr >> 20) & 0x0F;
1152b71562fSIan Lepore 		}
1162b71562fSIan Lepore 	} else {
1178e6dd301SIan Lepore 		/* non ARM -> must be new id scheme */
1182b71562fSIan Lepore 		cpuinfo.architecture = (cpuinfo.midr >> 16) & 0x0F;
1192b71562fSIan Lepore 		cpuinfo.revision = (cpuinfo.midr >> 20) & 0x0F;
1202b71562fSIan Lepore 	}
1212b71562fSIan Lepore 	/* Parse rest of MIDR  */
1222b71562fSIan Lepore 	cpuinfo.implementer = (cpuinfo.midr >> 24) & 0xFF;
1232b71562fSIan Lepore 	cpuinfo.part_number = (cpuinfo.midr >> 4) & 0xFFF;
1242b71562fSIan Lepore 	cpuinfo.patch = cpuinfo.midr & 0x0F;
1252b71562fSIan Lepore 
1262b71562fSIan Lepore 	/* CP15 c0,c0 regs 0-7 exist on all CPUs (although aliased with MIDR) */
1272b71562fSIan Lepore 	cpuinfo.ctr = cp15_ctr_get();
1282b71562fSIan Lepore 	cpuinfo.tcmtr = cp15_tcmtr_get();
1293025d19dSMichal Meloun #if __ARM_ARCH >= 6
1302b71562fSIan Lepore 	cpuinfo.tlbtr = cp15_tlbtr_get();
1312b71562fSIan Lepore 	cpuinfo.mpidr = cp15_mpidr_get();
1322b71562fSIan Lepore 	cpuinfo.revidr = cp15_revidr_get();
1333025d19dSMichal Meloun #endif
1342b71562fSIan Lepore 
1352b71562fSIan Lepore 	/* if CPU is not v7 cpu id scheme */
1362b71562fSIan Lepore 	if (cpuinfo.architecture != 0xF)
1372b71562fSIan Lepore 		return;
1383025d19dSMichal Meloun #if __ARM_ARCH >= 6
1392b71562fSIan Lepore 	cpuinfo.id_pfr0 = cp15_id_pfr0_get();
1402b71562fSIan Lepore 	cpuinfo.id_pfr1 = cp15_id_pfr1_get();
1412b71562fSIan Lepore 	cpuinfo.id_dfr0 = cp15_id_dfr0_get();
1422b71562fSIan Lepore 	cpuinfo.id_afr0 = cp15_id_afr0_get();
1432b71562fSIan Lepore 	cpuinfo.id_mmfr0 = cp15_id_mmfr0_get();
1442b71562fSIan Lepore 	cpuinfo.id_mmfr1 = cp15_id_mmfr1_get();
1452b71562fSIan Lepore 	cpuinfo.id_mmfr2 = cp15_id_mmfr2_get();
1462b71562fSIan Lepore 	cpuinfo.id_mmfr3 = cp15_id_mmfr3_get();
1472b71562fSIan Lepore 	cpuinfo.id_isar0 = cp15_id_isar0_get();
1482b71562fSIan Lepore 	cpuinfo.id_isar1 = cp15_id_isar1_get();
1492b71562fSIan Lepore 	cpuinfo.id_isar2 = cp15_id_isar2_get();
1502b71562fSIan Lepore 	cpuinfo.id_isar3 = cp15_id_isar3_get();
1512b71562fSIan Lepore 	cpuinfo.id_isar4 = cp15_id_isar4_get();
1522b71562fSIan Lepore 	cpuinfo.id_isar5 = cp15_id_isar5_get();
1532b71562fSIan Lepore 
1542b71562fSIan Lepore /* Not yet - CBAR only exist on ARM SMP Cortex A CPUs
1552b71562fSIan Lepore 	cpuinfo.cbar = cp15_cbar_get();
1562b71562fSIan Lepore */
157ba0bb206SMichal Meloun 	if (CPU_CT_FORMAT(cpuinfo.ctr) == CPU_CT_ARMV7) {
158ba0bb206SMichal Meloun 		cpuinfo.ccsidr = cp15_ccsidr_get();
159ba0bb206SMichal Meloun 		cpuinfo.clidr = cp15_clidr_get();
160ba0bb206SMichal Meloun 	}
1612b71562fSIan Lepore 
1622b71562fSIan Lepore 	/* Test if revidr is implemented */
1632b71562fSIan Lepore 	if (cpuinfo.revidr == cpuinfo.midr)
1642b71562fSIan Lepore 		cpuinfo.revidr = 0;
1652b71562fSIan Lepore 
1662b71562fSIan Lepore 	/* parsed bits of above registers */
1672b71562fSIan Lepore 	/* id_mmfr0 */
1682b71562fSIan Lepore 	cpuinfo.outermost_shareability =  (cpuinfo.id_mmfr0 >> 8) & 0xF;
1692b71562fSIan Lepore 	cpuinfo.shareability_levels = (cpuinfo.id_mmfr0 >> 12) & 0xF;
1702b71562fSIan Lepore 	cpuinfo.auxiliary_registers = (cpuinfo.id_mmfr0 >> 20) & 0xF;
1712b71562fSIan Lepore 	cpuinfo.innermost_shareability = (cpuinfo.id_mmfr0 >> 28) & 0xF;
1722b71562fSIan Lepore 	/* id_mmfr2 */
1732b71562fSIan Lepore 	cpuinfo.mem_barrier = (cpuinfo.id_mmfr2 >> 20) & 0xF;
1742b71562fSIan Lepore 	/* id_mmfr3 */
1752b71562fSIan Lepore 	cpuinfo.coherent_walk = (cpuinfo.id_mmfr3 >> 20) & 0xF;
1762b71562fSIan Lepore 	cpuinfo.maintenance_broadcast =(cpuinfo.id_mmfr3 >> 12) & 0xF;
1772b71562fSIan Lepore 	/* id_pfr1 */
1782b71562fSIan Lepore 	cpuinfo.generic_timer_ext = (cpuinfo.id_pfr1 >> 16) & 0xF;
1792b71562fSIan Lepore 	cpuinfo.virtualization_ext = (cpuinfo.id_pfr1 >> 12) & 0xF;
1802b71562fSIan Lepore 	cpuinfo.security_ext = (cpuinfo.id_pfr1 >> 4) & 0xF;
181d029cb61SAndrew Turner 	/* mpidr */
182d029cb61SAndrew Turner 	cpuinfo.mp_ext = (cpuinfo.mpidr >> 31u) & 0x1;
183a286c311SIan Lepore 
184a286c311SIan Lepore 	/* L1 Cache sizes */
185a22f8196SIan Lepore 	if (CPU_CT_FORMAT(cpuinfo.ctr) == CPU_CT_ARMV7) {
186a22f8196SIan Lepore 		cpuinfo.dcache_line_size =
187a22f8196SIan Lepore 		    1 << (CPU_CT_DMINLINE(cpuinfo.ctr) + 2);
188a22f8196SIan Lepore 		cpuinfo.icache_line_size =
189a22f8196SIan Lepore 		    1 << (CPU_CT_IMINLINE(cpuinfo.ctr) + 2);
190a22f8196SIan Lepore 	} else {
191a22f8196SIan Lepore 		cpuinfo.dcache_line_size =
192a22f8196SIan Lepore 		    1 << (CPU_CT_xSIZE_LEN(CPU_CT_DSIZE(cpuinfo.ctr)) + 3);
193a22f8196SIan Lepore 		cpuinfo.icache_line_size =
194a22f8196SIan Lepore 		    1 << (CPU_CT_xSIZE_LEN(CPU_CT_ISIZE(cpuinfo.ctr)) + 3);
195a22f8196SIan Lepore 	}
196a286c311SIan Lepore 	cpuinfo.dcache_line_mask = cpuinfo.dcache_line_size - 1;
197a286c311SIan Lepore 	cpuinfo.icache_line_mask = cpuinfo.icache_line_size - 1;
1980cbf724eSMichal Meloun 
1990cbf724eSMichal Meloun 	/* Fill AT_HWCAP bits. */
20032c48d07SMichal Meloun 	elf_hwcap |= HWCAP_HALF | HWCAP_FAST_MULT; /* Required for all CPUs */
20132c48d07SMichal Meloun 	elf_hwcap |= HWCAP_TLS | HWCAP_EDSP;	   /* Required for v6+ CPUs */
2020cbf724eSMichal Meloun 
2030cbf724eSMichal Meloun 	tmp = (cpuinfo.id_isar0 >> 24) & 0xF;	/* Divide_instrs */
2040cbf724eSMichal Meloun 	if (tmp >= 1)
2050cbf724eSMichal Meloun 		elf_hwcap |= HWCAP_IDIVT;
2060cbf724eSMichal Meloun 	if (tmp >= 2)
2070cbf724eSMichal Meloun 		elf_hwcap |= HWCAP_IDIVA;
2080cbf724eSMichal Meloun 
2090cbf724eSMichal Meloun 	tmp = (cpuinfo.id_pfr0 >> 4) & 0xF; 	/* State1  */
2100cbf724eSMichal Meloun 	if (tmp >= 1)
2110cbf724eSMichal Meloun 		elf_hwcap |= HWCAP_THUMB;
2120cbf724eSMichal Meloun 
2130cbf724eSMichal Meloun 	tmp = (cpuinfo.id_pfr0 >> 12) & 0xF; 	/* State3  */
2140cbf724eSMichal Meloun 	if (tmp >= 1)
2150cbf724eSMichal Meloun 		elf_hwcap |= HWCAP_THUMBEE;
2160cbf724eSMichal Meloun 
2170cbf724eSMichal Meloun 	tmp = (cpuinfo.id_mmfr0 >> 0) & 0xF; 	/* VMSA */
2180cbf724eSMichal Meloun 	if (tmp >= 5)
2190cbf724eSMichal Meloun 		elf_hwcap |= HWCAP_LPAE;
2200cbf724eSMichal Meloun 
2210cbf724eSMichal Meloun 	/* Fill AT_HWCAP2 bits. */
2220cbf724eSMichal Meloun 	tmp = (cpuinfo.id_isar5 >> 4) & 0xF;	/* AES */
2230cbf724eSMichal Meloun 	if (tmp >= 1)
2240cbf724eSMichal Meloun 		elf_hwcap2 |= HWCAP2_AES;
2250cbf724eSMichal Meloun 	if (tmp >= 2)
2260cbf724eSMichal Meloun 		elf_hwcap2 |= HWCAP2_PMULL;
2270cbf724eSMichal Meloun 
2280cbf724eSMichal Meloun 	tmp = (cpuinfo.id_isar5 >> 8) & 0xF;	/* SHA1 */
2290cbf724eSMichal Meloun 	if (tmp >= 1)
2300cbf724eSMichal Meloun 		elf_hwcap2 |= HWCAP2_SHA1;
2310cbf724eSMichal Meloun 
2320cbf724eSMichal Meloun 	tmp = (cpuinfo.id_isar5 >> 12) & 0xF;	/* SHA2 */
2330cbf724eSMichal Meloun 	if (tmp >= 1)
2340cbf724eSMichal Meloun 		elf_hwcap2 |= HWCAP2_SHA2;
2350cbf724eSMichal Meloun 
2360cbf724eSMichal Meloun 	tmp = (cpuinfo.id_isar5 >> 16) & 0xF;	/* CRC32 */
2370cbf724eSMichal Meloun 	if (tmp >= 1)
2380cbf724eSMichal Meloun 		elf_hwcap2 |= HWCAP2_CRC32;
2393025d19dSMichal Meloun #endif
2402b71562fSIan Lepore }
241935c21a1SIan Lepore 
2427bf5720aSMichal Meloun #if __ARM_ARCH >= 6
243935c21a1SIan Lepore /*
244935c21a1SIan Lepore  * Get bits that must be set or cleared in ACLR register.
245935c21a1SIan Lepore  * Note: Bits in ACLR register are IMPLEMENTATION DEFINED.
246935c21a1SIan Lepore  * Its expected that SCU is in operational state before this
247935c21a1SIan Lepore  * function is called.
248935c21a1SIan Lepore  */
2497bf5720aSMichal Meloun static void
250935c21a1SIan Lepore cpuinfo_get_actlr_modifier(uint32_t *actlr_mask, uint32_t *actlr_set)
251935c21a1SIan Lepore {
2527bf5720aSMichal Meloun 
253935c21a1SIan Lepore 	*actlr_mask = 0;
254935c21a1SIan Lepore 	*actlr_set = 0;
255935c21a1SIan Lepore 
256935c21a1SIan Lepore 	if (cpuinfo.implementer == CPU_IMPLEMENTER_ARM) {
257935c21a1SIan Lepore 		switch (cpuinfo.part_number) {
258ba0bb206SMichal Meloun 		case CPU_ARCH_CORTEX_A73:
25955e447c9SMichal Meloun 		case CPU_ARCH_CORTEX_A72:
26055e447c9SMichal Meloun 		case CPU_ARCH_CORTEX_A57:
26155e447c9SMichal Meloun 		case CPU_ARCH_CORTEX_A53:
26255e447c9SMichal Meloun 			/* Nothing to do for AArch32 */
26355e447c9SMichal Meloun 			break;
264935c21a1SIan Lepore 		case CPU_ARCH_CORTEX_A17:
265935c21a1SIan Lepore 		case CPU_ARCH_CORTEX_A12: /* A12 is merged to A17 */
266935c21a1SIan Lepore 			/*
267935c21a1SIan Lepore 			 * Enable SMP mode
268935c21a1SIan Lepore 			 */
269935c21a1SIan Lepore 			*actlr_mask = (1 << 6);
270935c21a1SIan Lepore 			*actlr_set = (1 << 6);
271935c21a1SIan Lepore 			break;
272935c21a1SIan Lepore 		case CPU_ARCH_CORTEX_A15:
273935c21a1SIan Lepore 			/*
274935c21a1SIan Lepore 			 * Enable snoop-delayed exclusive handling
275935c21a1SIan Lepore 			 * Enable SMP mode
276935c21a1SIan Lepore 			 */
277935c21a1SIan Lepore 			*actlr_mask = (1U << 31) |(1 << 6);
278935c21a1SIan Lepore 			*actlr_set = (1U << 31) |(1 << 6);
279935c21a1SIan Lepore 			break;
280935c21a1SIan Lepore 		case CPU_ARCH_CORTEX_A9:
281935c21a1SIan Lepore 			/*
282935c21a1SIan Lepore 			 * Disable exclusive L1/L2 cache control
283935c21a1SIan Lepore 			 * Enable SMP mode
284935c21a1SIan Lepore 			 * Enable Cache and TLB maintenance broadcast
285935c21a1SIan Lepore 			 */
286935c21a1SIan Lepore 			*actlr_mask = (1 << 7) | (1 << 6) | (1 << 0);
287935c21a1SIan Lepore 			*actlr_set = (1 << 6) | (1 << 0);
288935c21a1SIan Lepore 			break;
289935c21a1SIan Lepore 		case CPU_ARCH_CORTEX_A8:
290935c21a1SIan Lepore 			/*
291935c21a1SIan Lepore 			 * Enable L2 cache
292935c21a1SIan Lepore 			 * Enable L1 data cache hardware alias checks
293935c21a1SIan Lepore 			 */
294935c21a1SIan Lepore 			*actlr_mask = (1 << 1) | (1 << 0);
295935c21a1SIan Lepore 			*actlr_set = (1 << 1);
296935c21a1SIan Lepore 			break;
297935c21a1SIan Lepore 		case CPU_ARCH_CORTEX_A7:
298935c21a1SIan Lepore 			/*
299935c21a1SIan Lepore 			 * Enable SMP mode
300935c21a1SIan Lepore 			 */
301935c21a1SIan Lepore 			*actlr_mask = (1 << 6);
302935c21a1SIan Lepore 			*actlr_set = (1 << 6);
303935c21a1SIan Lepore 			break;
304935c21a1SIan Lepore 		case CPU_ARCH_CORTEX_A5:
305935c21a1SIan Lepore 			/*
306935c21a1SIan Lepore 			 * Disable exclusive L1/L2 cache control
307935c21a1SIan Lepore 			 * Enable SMP mode
308935c21a1SIan Lepore 			 * Enable Cache and TLB maintenance broadcast
309935c21a1SIan Lepore 			 */
310935c21a1SIan Lepore 			*actlr_mask = (1 << 7) | (1 << 6) | (1 << 0);
311935c21a1SIan Lepore 			*actlr_set = (1 << 6) | (1 << 0);
312935c21a1SIan Lepore 			break;
313935c21a1SIan Lepore 		case CPU_ARCH_ARM1176:
314935c21a1SIan Lepore 			/*
315935c21a1SIan Lepore 			 * Restrict cache size to 16KB
316935c21a1SIan Lepore 			 * Enable the return stack
317935c21a1SIan Lepore 			 * Enable dynamic branch prediction
318935c21a1SIan Lepore 			 * Enable static branch prediction
319935c21a1SIan Lepore 			 */
320935c21a1SIan Lepore 			*actlr_mask = (1 << 6) | (1 << 2) | (1 << 1) | (1 << 0);
321935c21a1SIan Lepore 			*actlr_set = (1 << 6) | (1 << 2) | (1 << 1) | (1 << 0);
322935c21a1SIan Lepore 			break;
323935c21a1SIan Lepore 		}
324935c21a1SIan Lepore 		return;
325935c21a1SIan Lepore 	}
326935c21a1SIan Lepore }
3277bf5720aSMichal Meloun 
3287bf5720aSMichal Meloun /* Reinitialize MMU to final kernel mapping and apply all CPU quirks. */
3297bf5720aSMichal Meloun void
3307bf5720aSMichal Meloun cpuinfo_reinit_mmu(uint32_t ttb)
3317bf5720aSMichal Meloun {
3327bf5720aSMichal Meloun 	uint32_t actlr_mask;
3337bf5720aSMichal Meloun 	uint32_t actlr_set;
3347bf5720aSMichal Meloun 
3357bf5720aSMichal Meloun 	cpuinfo_get_actlr_modifier(&actlr_mask, &actlr_set);
3367bf5720aSMichal Meloun 	actlr_mask |= cpu_quirks_actlr_mask;
3377bf5720aSMichal Meloun 	actlr_set |= cpu_quirks_actlr_set;
3387bf5720aSMichal Meloun 	reinit_mmu(ttb, actlr_mask, actlr_set);
3397bf5720aSMichal Meloun }
3407bf5720aSMichal Meloun 
3417bf5720aSMichal Meloun #endif /* __ARM_ARCH >= 6 */
342