xref: /freebsd/sys/arm/arm/cpuinfo.c (revision c40a5f8a)
12b71562fSIan Lepore /*-
22b71562fSIan Lepore  * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
32b71562fSIan Lepore  * Copyright 2014 Michal Meloun <meloun@miracle.cz>
42b71562fSIan Lepore  * All rights reserved.
52b71562fSIan Lepore  *
62b71562fSIan Lepore  * Redistribution and use in source and binary forms, with or without
72b71562fSIan Lepore  * modification, are permitted provided that the following conditions
82b71562fSIan Lepore  * are met:
92b71562fSIan Lepore  * 1. Redistributions of source code must retain the above copyright
102b71562fSIan Lepore  *    notice, this list of conditions and the following disclaimer.
112b71562fSIan Lepore  * 2. Redistributions in binary form must reproduce the above copyright
122b71562fSIan Lepore  *    notice, this list of conditions and the following disclaimer in the
132b71562fSIan Lepore  *    documentation and/or other materials provided with the distribution.
142b71562fSIan Lepore  *
152b71562fSIan Lepore  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
162b71562fSIan Lepore  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
172b71562fSIan Lepore  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
182b71562fSIan Lepore  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
192b71562fSIan Lepore  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
202b71562fSIan Lepore  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
212b71562fSIan Lepore  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
222b71562fSIan Lepore  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
232b71562fSIan Lepore  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
242b71562fSIan Lepore  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
252b71562fSIan Lepore  * SUCH DAMAGE.
262b71562fSIan Lepore  */
272b71562fSIan Lepore 
282b71562fSIan Lepore #include <sys/cdefs.h>
292b71562fSIan Lepore __FBSDID("$FreeBSD$");
302b71562fSIan Lepore 
312b71562fSIan Lepore #include <sys/param.h>
322b71562fSIan Lepore #include <sys/systm.h>
33c40a5f8aSMichal Meloun #include <sys/kernel.h>
347bf5720aSMichal Meloun #include <sys/sysctl.h>
352b71562fSIan Lepore 
363025d19dSMichal Meloun #include <machine/cpu.h>
372b71562fSIan Lepore #include <machine/cpuinfo.h>
382b71562fSIan Lepore 
397bf5720aSMichal Meloun #if __ARM_ARCH >= 6
407bf5720aSMichal Meloun void reinit_mmu(uint32_t ttb, uint32_t aux_clr, uint32_t aux_set);
417bf5720aSMichal Meloun #endif
427bf5720aSMichal Meloun 
43a286c311SIan Lepore struct cpuinfo cpuinfo =
44a286c311SIan Lepore {
45a286c311SIan Lepore 	/* Use safe defaults for start */
46a286c311SIan Lepore 	.dcache_line_size = 32,
47a286c311SIan Lepore 	.dcache_line_mask = 31,
48a286c311SIan Lepore 	.icache_line_size = 32,
49a286c311SIan Lepore 	.icache_line_mask = 31,
50a286c311SIan Lepore };
512b71562fSIan Lepore 
527bf5720aSMichal Meloun static SYSCTL_NODE(_hw, OID_AUTO, cpu, CTLFLAG_RD, 0,
537bf5720aSMichal Meloun     "CPU");
547bf5720aSMichal Meloun static SYSCTL_NODE(_hw_cpu, OID_AUTO, quirks, CTLFLAG_RD, 0,
557bf5720aSMichal Meloun     "CPU quirks");
567bf5720aSMichal Meloun 
577bf5720aSMichal Meloun /*
587bf5720aSMichal Meloun  * Tunable CPU quirks.
597bf5720aSMichal Meloun  * Be careful, ACTRL cannot be changed if CPU is started in secure
607bf5720aSMichal Meloun  * mode(world) and write to ACTRL can cause exception!
617bf5720aSMichal Meloun  * These quirks are intended for optimizing CPU performance, not for
627bf5720aSMichal Meloun  * applying errata workarounds. Nobody can expect that CPU with unfixed
637bf5720aSMichal Meloun  * errata is stable enough to execute the kernel until quirks are applied.
647bf5720aSMichal Meloun  */
657bf5720aSMichal Meloun static uint32_t cpu_quirks_actlr_mask;
667bf5720aSMichal Meloun SYSCTL_INT(_hw_cpu_quirks, OID_AUTO, actlr_mask,
677bf5720aSMichal Meloun     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &cpu_quirks_actlr_mask, 0,
687bf5720aSMichal Meloun     "Bits to be masked in ACTLR");
697bf5720aSMichal Meloun 
707bf5720aSMichal Meloun static uint32_t cpu_quirks_actlr_set;
717bf5720aSMichal Meloun SYSCTL_INT(_hw_cpu_quirks, OID_AUTO, actlr_set,
727bf5720aSMichal Meloun     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &cpu_quirks_actlr_set, 0,
737bf5720aSMichal Meloun     "Bits to be set in ACTLR");
747bf5720aSMichal Meloun 
757bf5720aSMichal Meloun 
762b71562fSIan Lepore /* Read and parse CPU id scheme */
772b71562fSIan Lepore void
782b71562fSIan Lepore cpuinfo_init(void)
792b71562fSIan Lepore {
802b71562fSIan Lepore 
81c40a5f8aSMichal Meloun 	/*
82c40a5f8aSMichal Meloun 	 * Prematurely fetch CPU quirks. Standard fetch for tunable
83c40a5f8aSMichal Meloun 	 * sysctls is handled using SYSINIT, thus too late for boot CPU.
84c40a5f8aSMichal Meloun 	 * Keep names in sync with sysctls.
85c40a5f8aSMichal Meloun 	 */
86c40a5f8aSMichal Meloun 	TUNABLE_INT_FETCH("hw.cpu.quirks.actlr_mask", &cpu_quirks_actlr_mask);
87c40a5f8aSMichal Meloun 	TUNABLE_INT_FETCH("hw.cpu.quirks.actlr_set", &cpu_quirks_actlr_set);
88c40a5f8aSMichal Meloun 
892b71562fSIan Lepore 	cpuinfo.midr = cp15_midr_get();
902b71562fSIan Lepore 	/* Test old version id schemes first */
912b71562fSIan Lepore 	if ((cpuinfo.midr & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD) {
922b71562fSIan Lepore 		if (CPU_ID_ISOLD(cpuinfo.midr)) {
932b71562fSIan Lepore 			/* obsolete ARMv2 or ARMv3 CPU */
942b71562fSIan Lepore 			cpuinfo.midr = 0;
952b71562fSIan Lepore 			return;
962b71562fSIan Lepore 		}
972b71562fSIan Lepore 		if (CPU_ID_IS7(cpuinfo.midr)) {
982b71562fSIan Lepore 			if ((cpuinfo.midr & (1 << 23)) == 0) {
992b71562fSIan Lepore 				/* obsolete ARMv3 CPU */
1002b71562fSIan Lepore 				cpuinfo.midr = 0;
1012b71562fSIan Lepore 				return;
1022b71562fSIan Lepore 			}
1032b71562fSIan Lepore 			/* ARMv4T CPU */
1042b71562fSIan Lepore 			cpuinfo.architecture = 1;
1052b71562fSIan Lepore 			cpuinfo.revision = (cpuinfo.midr >> 16) & 0x7F;
1068e6dd301SIan Lepore 		} else {
1078e6dd301SIan Lepore 			/* ARM new id scheme */
1088e6dd301SIan Lepore 			cpuinfo.architecture = (cpuinfo.midr >> 16) & 0x0F;
1098e6dd301SIan Lepore 			cpuinfo.revision = (cpuinfo.midr >> 20) & 0x0F;
1102b71562fSIan Lepore 		}
1112b71562fSIan Lepore 	} else {
1128e6dd301SIan Lepore 		/* non ARM -> must be new id scheme */
1132b71562fSIan Lepore 		cpuinfo.architecture = (cpuinfo.midr >> 16) & 0x0F;
1142b71562fSIan Lepore 		cpuinfo.revision = (cpuinfo.midr >> 20) & 0x0F;
1152b71562fSIan Lepore 	}
1162b71562fSIan Lepore 	/* Parse rest of MIDR  */
1172b71562fSIan Lepore 	cpuinfo.implementer = (cpuinfo.midr >> 24) & 0xFF;
1182b71562fSIan Lepore 	cpuinfo.part_number = (cpuinfo.midr >> 4) & 0xFFF;
1192b71562fSIan Lepore 	cpuinfo.patch = cpuinfo.midr & 0x0F;
1202b71562fSIan Lepore 
1212b71562fSIan Lepore 	/* CP15 c0,c0 regs 0-7 exist on all CPUs (although aliased with MIDR) */
1222b71562fSIan Lepore 	cpuinfo.ctr = cp15_ctr_get();
1232b71562fSIan Lepore 	cpuinfo.tcmtr = cp15_tcmtr_get();
1243025d19dSMichal Meloun #if __ARM_ARCH >= 6
1252b71562fSIan Lepore 	cpuinfo.tlbtr = cp15_tlbtr_get();
1262b71562fSIan Lepore 	cpuinfo.mpidr = cp15_mpidr_get();
1272b71562fSIan Lepore 	cpuinfo.revidr = cp15_revidr_get();
1283025d19dSMichal Meloun #endif
1292b71562fSIan Lepore 
1302b71562fSIan Lepore 	/* if CPU is not v7 cpu id scheme */
1312b71562fSIan Lepore 	if (cpuinfo.architecture != 0xF)
1322b71562fSIan Lepore 		return;
1333025d19dSMichal Meloun #if __ARM_ARCH >= 6
1342b71562fSIan Lepore 	cpuinfo.id_pfr0 = cp15_id_pfr0_get();
1352b71562fSIan Lepore 	cpuinfo.id_pfr1 = cp15_id_pfr1_get();
1362b71562fSIan Lepore 	cpuinfo.id_dfr0 = cp15_id_dfr0_get();
1372b71562fSIan Lepore 	cpuinfo.id_afr0 = cp15_id_afr0_get();
1382b71562fSIan Lepore 	cpuinfo.id_mmfr0 = cp15_id_mmfr0_get();
1392b71562fSIan Lepore 	cpuinfo.id_mmfr1 = cp15_id_mmfr1_get();
1402b71562fSIan Lepore 	cpuinfo.id_mmfr2 = cp15_id_mmfr2_get();
1412b71562fSIan Lepore 	cpuinfo.id_mmfr3 = cp15_id_mmfr3_get();
1422b71562fSIan Lepore 	cpuinfo.id_isar0 = cp15_id_isar0_get();
1432b71562fSIan Lepore 	cpuinfo.id_isar1 = cp15_id_isar1_get();
1442b71562fSIan Lepore 	cpuinfo.id_isar2 = cp15_id_isar2_get();
1452b71562fSIan Lepore 	cpuinfo.id_isar3 = cp15_id_isar3_get();
1462b71562fSIan Lepore 	cpuinfo.id_isar4 = cp15_id_isar4_get();
1472b71562fSIan Lepore 	cpuinfo.id_isar5 = cp15_id_isar5_get();
1482b71562fSIan Lepore 
1492b71562fSIan Lepore /* Not yet - CBAR only exist on ARM SMP Cortex A CPUs
1502b71562fSIan Lepore 	cpuinfo.cbar = cp15_cbar_get();
1512b71562fSIan Lepore */
152ba0bb206SMichal Meloun 	if (CPU_CT_FORMAT(cpuinfo.ctr) == CPU_CT_ARMV7) {
153ba0bb206SMichal Meloun 		cpuinfo.ccsidr = cp15_ccsidr_get();
154ba0bb206SMichal Meloun 		cpuinfo.clidr = cp15_clidr_get();
155ba0bb206SMichal Meloun 	}
1562b71562fSIan Lepore 
1572b71562fSIan Lepore 	/* Test if revidr is implemented */
1582b71562fSIan Lepore 	if (cpuinfo.revidr == cpuinfo.midr)
1592b71562fSIan Lepore 		cpuinfo.revidr = 0;
1602b71562fSIan Lepore 
1612b71562fSIan Lepore 	/* parsed bits of above registers */
1622b71562fSIan Lepore 	/* id_mmfr0 */
1632b71562fSIan Lepore 	cpuinfo.outermost_shareability =  (cpuinfo.id_mmfr0 >> 8) & 0xF;
1642b71562fSIan Lepore 	cpuinfo.shareability_levels = (cpuinfo.id_mmfr0 >> 12) & 0xF;
1652b71562fSIan Lepore 	cpuinfo.auxiliary_registers = (cpuinfo.id_mmfr0 >> 20) & 0xF;
1662b71562fSIan Lepore 	cpuinfo.innermost_shareability = (cpuinfo.id_mmfr0 >> 28) & 0xF;
1672b71562fSIan Lepore 	/* id_mmfr2 */
1682b71562fSIan Lepore 	cpuinfo.mem_barrier = (cpuinfo.id_mmfr2 >> 20) & 0xF;
1692b71562fSIan Lepore 	/* id_mmfr3 */
1702b71562fSIan Lepore 	cpuinfo.coherent_walk = (cpuinfo.id_mmfr3 >> 20) & 0xF;
1712b71562fSIan Lepore 	cpuinfo.maintenance_broadcast =(cpuinfo.id_mmfr3 >> 12) & 0xF;
1722b71562fSIan Lepore 	/* id_pfr1 */
1732b71562fSIan Lepore 	cpuinfo.generic_timer_ext = (cpuinfo.id_pfr1 >> 16) & 0xF;
1742b71562fSIan Lepore 	cpuinfo.virtualization_ext = (cpuinfo.id_pfr1 >> 12) & 0xF;
1752b71562fSIan Lepore 	cpuinfo.security_ext = (cpuinfo.id_pfr1 >> 4) & 0xF;
176d029cb61SAndrew Turner 	/* mpidr */
177d029cb61SAndrew Turner 	cpuinfo.mp_ext = (cpuinfo.mpidr >> 31u) & 0x1;
178a286c311SIan Lepore 
179a286c311SIan Lepore 	/* L1 Cache sizes */
180a22f8196SIan Lepore 	if (CPU_CT_FORMAT(cpuinfo.ctr) == CPU_CT_ARMV7) {
181a22f8196SIan Lepore 		cpuinfo.dcache_line_size =
182a22f8196SIan Lepore 		    1 << (CPU_CT_DMINLINE(cpuinfo.ctr) + 2);
183a22f8196SIan Lepore 		cpuinfo.icache_line_size =
184a22f8196SIan Lepore 		    1 << (CPU_CT_IMINLINE(cpuinfo.ctr) + 2);
185a22f8196SIan Lepore 	} else {
186a22f8196SIan Lepore 		cpuinfo.dcache_line_size =
187a22f8196SIan Lepore 		    1 << (CPU_CT_xSIZE_LEN(CPU_CT_DSIZE(cpuinfo.ctr)) + 3);
188a22f8196SIan Lepore 		cpuinfo.icache_line_size =
189a22f8196SIan Lepore 		    1 << (CPU_CT_xSIZE_LEN(CPU_CT_ISIZE(cpuinfo.ctr)) + 3);
190a22f8196SIan Lepore 	}
191a286c311SIan Lepore 	cpuinfo.dcache_line_mask = cpuinfo.dcache_line_size - 1;
192a286c311SIan Lepore 	cpuinfo.icache_line_mask = cpuinfo.icache_line_size - 1;
1933025d19dSMichal Meloun #endif
1942b71562fSIan Lepore }
195935c21a1SIan Lepore 
1967bf5720aSMichal Meloun #if __ARM_ARCH >= 6
197935c21a1SIan Lepore /*
198935c21a1SIan Lepore  * Get bits that must be set or cleared in ACLR register.
199935c21a1SIan Lepore  * Note: Bits in ACLR register are IMPLEMENTATION DEFINED.
200935c21a1SIan Lepore  * Its expected that SCU is in operational state before this
201935c21a1SIan Lepore  * function is called.
202935c21a1SIan Lepore  */
2037bf5720aSMichal Meloun static void
204935c21a1SIan Lepore cpuinfo_get_actlr_modifier(uint32_t *actlr_mask, uint32_t *actlr_set)
205935c21a1SIan Lepore {
2067bf5720aSMichal Meloun 
207935c21a1SIan Lepore 	*actlr_mask = 0;
208935c21a1SIan Lepore 	*actlr_set = 0;
209935c21a1SIan Lepore 
210935c21a1SIan Lepore 	if (cpuinfo.implementer == CPU_IMPLEMENTER_ARM) {
211935c21a1SIan Lepore 		switch (cpuinfo.part_number) {
212ba0bb206SMichal Meloun 		case CPU_ARCH_CORTEX_A73:
21355e447c9SMichal Meloun 		case CPU_ARCH_CORTEX_A72:
21455e447c9SMichal Meloun 		case CPU_ARCH_CORTEX_A57:
21555e447c9SMichal Meloun 		case CPU_ARCH_CORTEX_A53:
21655e447c9SMichal Meloun 			/* Nothing to do for AArch32 */
21755e447c9SMichal Meloun 			break;
218935c21a1SIan Lepore 		case CPU_ARCH_CORTEX_A17:
219935c21a1SIan Lepore 		case CPU_ARCH_CORTEX_A12: /* A12 is merged to A17 */
220935c21a1SIan Lepore 			/*
221935c21a1SIan Lepore 			 * Enable SMP mode
222935c21a1SIan Lepore 			 */
223935c21a1SIan Lepore 			*actlr_mask = (1 << 6);
224935c21a1SIan Lepore 			*actlr_set = (1 << 6);
225935c21a1SIan Lepore 			break;
226935c21a1SIan Lepore 		case CPU_ARCH_CORTEX_A15:
227935c21a1SIan Lepore 			/*
228935c21a1SIan Lepore 			 * Enable snoop-delayed exclusive handling
229935c21a1SIan Lepore 			 * Enable SMP mode
230935c21a1SIan Lepore 			 */
231935c21a1SIan Lepore 			*actlr_mask = (1U << 31) |(1 << 6);
232935c21a1SIan Lepore 			*actlr_set = (1U << 31) |(1 << 6);
233935c21a1SIan Lepore 			break;
234935c21a1SIan Lepore 		case CPU_ARCH_CORTEX_A9:
235935c21a1SIan Lepore 			/*
236935c21a1SIan Lepore 			 * Disable exclusive L1/L2 cache control
237935c21a1SIan Lepore 			 * Enable SMP mode
238935c21a1SIan Lepore 			 * Enable Cache and TLB maintenance broadcast
239935c21a1SIan Lepore 			 */
240935c21a1SIan Lepore 			*actlr_mask = (1 << 7) | (1 << 6) | (1 << 0);
241935c21a1SIan Lepore 			*actlr_set = (1 << 6) | (1 << 0);
242935c21a1SIan Lepore 			break;
243935c21a1SIan Lepore 		case CPU_ARCH_CORTEX_A8:
244935c21a1SIan Lepore 			/*
245935c21a1SIan Lepore 			 * Enable L2 cache
246935c21a1SIan Lepore 			 * Enable L1 data cache hardware alias checks
247935c21a1SIan Lepore 			 */
248935c21a1SIan Lepore 			*actlr_mask = (1 << 1) | (1 << 0);
249935c21a1SIan Lepore 			*actlr_set = (1 << 1);
250935c21a1SIan Lepore 			break;
251935c21a1SIan Lepore 		case CPU_ARCH_CORTEX_A7:
252935c21a1SIan Lepore 			/*
253935c21a1SIan Lepore 			 * Enable SMP mode
254935c21a1SIan Lepore 			 */
255935c21a1SIan Lepore 			*actlr_mask = (1 << 6);
256935c21a1SIan Lepore 			*actlr_set = (1 << 6);
257935c21a1SIan Lepore 			break;
258935c21a1SIan Lepore 		case CPU_ARCH_CORTEX_A5:
259935c21a1SIan Lepore 			/*
260935c21a1SIan Lepore 			 * Disable exclusive L1/L2 cache control
261935c21a1SIan Lepore 			 * Enable SMP mode
262935c21a1SIan Lepore 			 * Enable Cache and TLB maintenance broadcast
263935c21a1SIan Lepore 			 */
264935c21a1SIan Lepore 			*actlr_mask = (1 << 7) | (1 << 6) | (1 << 0);
265935c21a1SIan Lepore 			*actlr_set = (1 << 6) | (1 << 0);
266935c21a1SIan Lepore 			break;
267935c21a1SIan Lepore 		case CPU_ARCH_ARM1176:
268935c21a1SIan Lepore 			/*
269935c21a1SIan Lepore 			 * Restrict cache size to 16KB
270935c21a1SIan Lepore 			 * Enable the return stack
271935c21a1SIan Lepore 			 * Enable dynamic branch prediction
272935c21a1SIan Lepore 			 * Enable static branch prediction
273935c21a1SIan Lepore 			 */
274935c21a1SIan Lepore 			*actlr_mask = (1 << 6) | (1 << 2) | (1 << 1) | (1 << 0);
275935c21a1SIan Lepore 			*actlr_set = (1 << 6) | (1 << 2) | (1 << 1) | (1 << 0);
276935c21a1SIan Lepore 			break;
277935c21a1SIan Lepore 		}
278935c21a1SIan Lepore 		return;
279935c21a1SIan Lepore 	}
280935c21a1SIan Lepore }
2817bf5720aSMichal Meloun 
2827bf5720aSMichal Meloun /* Reinitialize MMU to final kernel mapping and apply all CPU quirks. */
2837bf5720aSMichal Meloun void
2847bf5720aSMichal Meloun cpuinfo_reinit_mmu(uint32_t ttb)
2857bf5720aSMichal Meloun {
2867bf5720aSMichal Meloun 	uint32_t actlr_mask;
2877bf5720aSMichal Meloun 	uint32_t actlr_set;
2887bf5720aSMichal Meloun 
2897bf5720aSMichal Meloun 	cpuinfo_get_actlr_modifier(&actlr_mask, &actlr_set);
2907bf5720aSMichal Meloun 	actlr_mask |= cpu_quirks_actlr_mask;
2917bf5720aSMichal Meloun 	actlr_set |= cpu_quirks_actlr_set;
2927bf5720aSMichal Meloun 	reinit_mmu(ttb, actlr_mask, actlr_set);
2937bf5720aSMichal Meloun }
2947bf5720aSMichal Meloun 
2957bf5720aSMichal Meloun #endif /* __ARM_ARCH >= 6 */
296