12b71562fSIan Lepore /*- 22b71562fSIan Lepore * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com> 32b71562fSIan Lepore * Copyright 2014 Michal Meloun <meloun@miracle.cz> 42b71562fSIan Lepore * All rights reserved. 52b71562fSIan Lepore * 62b71562fSIan Lepore * Redistribution and use in source and binary forms, with or without 72b71562fSIan Lepore * modification, are permitted provided that the following conditions 82b71562fSIan Lepore * are met: 92b71562fSIan Lepore * 1. Redistributions of source code must retain the above copyright 102b71562fSIan Lepore * notice, this list of conditions and the following disclaimer. 112b71562fSIan Lepore * 2. Redistributions in binary form must reproduce the above copyright 122b71562fSIan Lepore * notice, this list of conditions and the following disclaimer in the 132b71562fSIan Lepore * documentation and/or other materials provided with the distribution. 142b71562fSIan Lepore * 152b71562fSIan Lepore * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 162b71562fSIan Lepore * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 172b71562fSIan Lepore * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 182b71562fSIan Lepore * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 192b71562fSIan Lepore * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 202b71562fSIan Lepore * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 212b71562fSIan Lepore * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 222b71562fSIan Lepore * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 232b71562fSIan Lepore * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 242b71562fSIan Lepore * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 252b71562fSIan Lepore * SUCH DAMAGE. 262b71562fSIan Lepore */ 272b71562fSIan Lepore 282b71562fSIan Lepore #include <sys/cdefs.h> 292b71562fSIan Lepore __FBSDID("$FreeBSD$"); 302b71562fSIan Lepore 312b71562fSIan Lepore #include <sys/param.h> 322b71562fSIan Lepore #include <sys/systm.h> 332b71562fSIan Lepore 343025d19dSMichal Meloun #include <machine/cpu.h> 352b71562fSIan Lepore #include <machine/cpuinfo.h> 362b71562fSIan Lepore 37a286c311SIan Lepore struct cpuinfo cpuinfo = 38a286c311SIan Lepore { 39a286c311SIan Lepore /* Use safe defaults for start */ 40a286c311SIan Lepore .dcache_line_size = 32, 41a286c311SIan Lepore .dcache_line_mask = 31, 42a286c311SIan Lepore .icache_line_size = 32, 43a286c311SIan Lepore .icache_line_mask = 31, 44a286c311SIan Lepore }; 452b71562fSIan Lepore 462b71562fSIan Lepore /* Read and parse CPU id scheme */ 472b71562fSIan Lepore void 482b71562fSIan Lepore cpuinfo_init(void) 492b71562fSIan Lepore { 502b71562fSIan Lepore 512b71562fSIan Lepore cpuinfo.midr = cp15_midr_get(); 522b71562fSIan Lepore /* Test old version id schemes first */ 532b71562fSIan Lepore if ((cpuinfo.midr & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD) { 542b71562fSIan Lepore if (CPU_ID_ISOLD(cpuinfo.midr)) { 552b71562fSIan Lepore /* obsolete ARMv2 or ARMv3 CPU */ 562b71562fSIan Lepore cpuinfo.midr = 0; 572b71562fSIan Lepore return; 582b71562fSIan Lepore } 592b71562fSIan Lepore if (CPU_ID_IS7(cpuinfo.midr)) { 602b71562fSIan Lepore if ((cpuinfo.midr & (1 << 23)) == 0) { 612b71562fSIan Lepore /* obsolete ARMv3 CPU */ 622b71562fSIan Lepore cpuinfo.midr = 0; 632b71562fSIan Lepore return; 642b71562fSIan Lepore } 652b71562fSIan Lepore /* ARMv4T CPU */ 662b71562fSIan Lepore cpuinfo.architecture = 1; 672b71562fSIan Lepore cpuinfo.revision = (cpuinfo.midr >> 16) & 0x7F; 688e6dd301SIan Lepore } else { 698e6dd301SIan Lepore /* ARM new id scheme */ 708e6dd301SIan Lepore cpuinfo.architecture = (cpuinfo.midr >> 16) & 0x0F; 718e6dd301SIan Lepore cpuinfo.revision = (cpuinfo.midr >> 20) & 0x0F; 722b71562fSIan Lepore } 732b71562fSIan Lepore } else { 748e6dd301SIan Lepore /* non ARM -> must be new id scheme */ 752b71562fSIan Lepore cpuinfo.architecture = (cpuinfo.midr >> 16) & 0x0F; 762b71562fSIan Lepore cpuinfo.revision = (cpuinfo.midr >> 20) & 0x0F; 772b71562fSIan Lepore } 782b71562fSIan Lepore /* Parse rest of MIDR */ 792b71562fSIan Lepore cpuinfo.implementer = (cpuinfo.midr >> 24) & 0xFF; 802b71562fSIan Lepore cpuinfo.part_number = (cpuinfo.midr >> 4) & 0xFFF; 812b71562fSIan Lepore cpuinfo.patch = cpuinfo.midr & 0x0F; 822b71562fSIan Lepore 832b71562fSIan Lepore /* CP15 c0,c0 regs 0-7 exist on all CPUs (although aliased with MIDR) */ 842b71562fSIan Lepore cpuinfo.ctr = cp15_ctr_get(); 852b71562fSIan Lepore cpuinfo.tcmtr = cp15_tcmtr_get(); 863025d19dSMichal Meloun #if __ARM_ARCH >= 6 872b71562fSIan Lepore cpuinfo.tlbtr = cp15_tlbtr_get(); 882b71562fSIan Lepore cpuinfo.mpidr = cp15_mpidr_get(); 892b71562fSIan Lepore cpuinfo.revidr = cp15_revidr_get(); 903025d19dSMichal Meloun #endif 912b71562fSIan Lepore 922b71562fSIan Lepore /* if CPU is not v7 cpu id scheme */ 932b71562fSIan Lepore if (cpuinfo.architecture != 0xF) 942b71562fSIan Lepore return; 953025d19dSMichal Meloun #if __ARM_ARCH >= 6 962b71562fSIan Lepore cpuinfo.id_pfr0 = cp15_id_pfr0_get(); 972b71562fSIan Lepore cpuinfo.id_pfr1 = cp15_id_pfr1_get(); 982b71562fSIan Lepore cpuinfo.id_dfr0 = cp15_id_dfr0_get(); 992b71562fSIan Lepore cpuinfo.id_afr0 = cp15_id_afr0_get(); 1002b71562fSIan Lepore cpuinfo.id_mmfr0 = cp15_id_mmfr0_get(); 1012b71562fSIan Lepore cpuinfo.id_mmfr1 = cp15_id_mmfr1_get(); 1022b71562fSIan Lepore cpuinfo.id_mmfr2 = cp15_id_mmfr2_get(); 1032b71562fSIan Lepore cpuinfo.id_mmfr3 = cp15_id_mmfr3_get(); 1042b71562fSIan Lepore cpuinfo.id_isar0 = cp15_id_isar0_get(); 1052b71562fSIan Lepore cpuinfo.id_isar1 = cp15_id_isar1_get(); 1062b71562fSIan Lepore cpuinfo.id_isar2 = cp15_id_isar2_get(); 1072b71562fSIan Lepore cpuinfo.id_isar3 = cp15_id_isar3_get(); 1082b71562fSIan Lepore cpuinfo.id_isar4 = cp15_id_isar4_get(); 1092b71562fSIan Lepore cpuinfo.id_isar5 = cp15_id_isar5_get(); 1102b71562fSIan Lepore 1112b71562fSIan Lepore /* Not yet - CBAR only exist on ARM SMP Cortex A CPUs 1122b71562fSIan Lepore cpuinfo.cbar = cp15_cbar_get(); 1132b71562fSIan Lepore */ 1142b71562fSIan Lepore 1152b71562fSIan Lepore /* Test if revidr is implemented */ 1162b71562fSIan Lepore if (cpuinfo.revidr == cpuinfo.midr) 1172b71562fSIan Lepore cpuinfo.revidr = 0; 1182b71562fSIan Lepore 1192b71562fSIan Lepore /* parsed bits of above registers */ 1202b71562fSIan Lepore /* id_mmfr0 */ 1212b71562fSIan Lepore cpuinfo.outermost_shareability = (cpuinfo.id_mmfr0 >> 8) & 0xF; 1222b71562fSIan Lepore cpuinfo.shareability_levels = (cpuinfo.id_mmfr0 >> 12) & 0xF; 1232b71562fSIan Lepore cpuinfo.auxiliary_registers = (cpuinfo.id_mmfr0 >> 20) & 0xF; 1242b71562fSIan Lepore cpuinfo.innermost_shareability = (cpuinfo.id_mmfr0 >> 28) & 0xF; 1252b71562fSIan Lepore /* id_mmfr2 */ 1262b71562fSIan Lepore cpuinfo.mem_barrier = (cpuinfo.id_mmfr2 >> 20) & 0xF; 1272b71562fSIan Lepore /* id_mmfr3 */ 1282b71562fSIan Lepore cpuinfo.coherent_walk = (cpuinfo.id_mmfr3 >> 20) & 0xF; 1292b71562fSIan Lepore cpuinfo.maintenance_broadcast =(cpuinfo.id_mmfr3 >> 12) & 0xF; 1302b71562fSIan Lepore /* id_pfr1 */ 1312b71562fSIan Lepore cpuinfo.generic_timer_ext = (cpuinfo.id_pfr1 >> 16) & 0xF; 1322b71562fSIan Lepore cpuinfo.virtualization_ext = (cpuinfo.id_pfr1 >> 12) & 0xF; 1332b71562fSIan Lepore cpuinfo.security_ext = (cpuinfo.id_pfr1 >> 4) & 0xF; 134a286c311SIan Lepore 135a286c311SIan Lepore /* L1 Cache sizes */ 136a22f8196SIan Lepore if (CPU_CT_FORMAT(cpuinfo.ctr) == CPU_CT_ARMV7) { 137a22f8196SIan Lepore cpuinfo.dcache_line_size = 138a22f8196SIan Lepore 1 << (CPU_CT_DMINLINE(cpuinfo.ctr) + 2); 139a22f8196SIan Lepore cpuinfo.icache_line_size = 140a22f8196SIan Lepore 1 << (CPU_CT_IMINLINE(cpuinfo.ctr) + 2); 141a22f8196SIan Lepore } else { 142a22f8196SIan Lepore cpuinfo.dcache_line_size = 143a22f8196SIan Lepore 1 << (CPU_CT_xSIZE_LEN(CPU_CT_DSIZE(cpuinfo.ctr)) + 3); 144a22f8196SIan Lepore cpuinfo.icache_line_size = 145a22f8196SIan Lepore 1 << (CPU_CT_xSIZE_LEN(CPU_CT_ISIZE(cpuinfo.ctr)) + 3); 146a22f8196SIan Lepore } 147a286c311SIan Lepore cpuinfo.dcache_line_mask = cpuinfo.dcache_line_size - 1; 148a286c311SIan Lepore cpuinfo.icache_line_mask = cpuinfo.icache_line_size - 1; 1493025d19dSMichal Meloun #endif 1502b71562fSIan Lepore } 151935c21a1SIan Lepore 152935c21a1SIan Lepore /* 153935c21a1SIan Lepore * Get bits that must be set or cleared in ACLR register. 154935c21a1SIan Lepore * Note: Bits in ACLR register are IMPLEMENTATION DEFINED. 155935c21a1SIan Lepore * Its expected that SCU is in operational state before this 156935c21a1SIan Lepore * function is called. 157935c21a1SIan Lepore */ 158935c21a1SIan Lepore void 159935c21a1SIan Lepore cpuinfo_get_actlr_modifier(uint32_t *actlr_mask, uint32_t *actlr_set) 160935c21a1SIan Lepore { 161935c21a1SIan Lepore *actlr_mask = 0; 162935c21a1SIan Lepore *actlr_set = 0; 163935c21a1SIan Lepore 164935c21a1SIan Lepore if (cpuinfo.implementer == CPU_IMPLEMENTER_ARM) { 165935c21a1SIan Lepore switch (cpuinfo.part_number) { 166935c21a1SIan Lepore 167935c21a1SIan Lepore case CPU_ARCH_CORTEX_A17: 168935c21a1SIan Lepore case CPU_ARCH_CORTEX_A12: /* A12 is merged to A17 */ 169935c21a1SIan Lepore /* 170935c21a1SIan Lepore * Enable SMP mode 171935c21a1SIan Lepore */ 172935c21a1SIan Lepore *actlr_mask = (1 << 6); 173935c21a1SIan Lepore *actlr_set = (1 << 6); 174935c21a1SIan Lepore break; 175935c21a1SIan Lepore case CPU_ARCH_CORTEX_A15: 176935c21a1SIan Lepore /* 177935c21a1SIan Lepore * Enable snoop-delayed exclusive handling 178935c21a1SIan Lepore * Enable SMP mode 179935c21a1SIan Lepore */ 180935c21a1SIan Lepore *actlr_mask = (1U << 31) |(1 << 6); 181935c21a1SIan Lepore *actlr_set = (1U << 31) |(1 << 6); 182935c21a1SIan Lepore break; 183935c21a1SIan Lepore case CPU_ARCH_CORTEX_A9: 184935c21a1SIan Lepore /* 185935c21a1SIan Lepore * Disable exclusive L1/L2 cache control 186935c21a1SIan Lepore * Enable SMP mode 187935c21a1SIan Lepore * Enable Cache and TLB maintenance broadcast 188935c21a1SIan Lepore */ 189935c21a1SIan Lepore *actlr_mask = (1 << 7) | (1 << 6) | (1 << 0); 190935c21a1SIan Lepore *actlr_set = (1 << 6) | (1 << 0); 191935c21a1SIan Lepore break; 192935c21a1SIan Lepore case CPU_ARCH_CORTEX_A8: 193935c21a1SIan Lepore /* 194935c21a1SIan Lepore * Enable L2 cache 195935c21a1SIan Lepore * Enable L1 data cache hardware alias checks 196935c21a1SIan Lepore */ 197935c21a1SIan Lepore *actlr_mask = (1 << 1) | (1 << 0); 198935c21a1SIan Lepore *actlr_set = (1 << 1); 199935c21a1SIan Lepore break; 200935c21a1SIan Lepore case CPU_ARCH_CORTEX_A7: 201935c21a1SIan Lepore /* 202935c21a1SIan Lepore * Enable SMP mode 203935c21a1SIan Lepore */ 204935c21a1SIan Lepore *actlr_mask = (1 << 6); 205935c21a1SIan Lepore *actlr_set = (1 << 6); 206935c21a1SIan Lepore break; 207935c21a1SIan Lepore case CPU_ARCH_CORTEX_A5: 208935c21a1SIan Lepore /* 209935c21a1SIan Lepore * Disable exclusive L1/L2 cache control 210935c21a1SIan Lepore * Enable SMP mode 211935c21a1SIan Lepore * Enable Cache and TLB maintenance broadcast 212935c21a1SIan Lepore */ 213935c21a1SIan Lepore *actlr_mask = (1 << 7) | (1 << 6) | (1 << 0); 214935c21a1SIan Lepore *actlr_set = (1 << 6) | (1 << 0); 215935c21a1SIan Lepore break; 216935c21a1SIan Lepore case CPU_ARCH_ARM1176: 217935c21a1SIan Lepore /* 218935c21a1SIan Lepore * Restrict cache size to 16KB 219935c21a1SIan Lepore * Enable the return stack 220935c21a1SIan Lepore * Enable dynamic branch prediction 221935c21a1SIan Lepore * Enable static branch prediction 222935c21a1SIan Lepore */ 223935c21a1SIan Lepore *actlr_mask = (1 << 6) | (1 << 2) | (1 << 1) | (1 << 0); 224935c21a1SIan Lepore *actlr_set = (1 << 6) | (1 << 2) | (1 << 1) | (1 << 0); 225935c21a1SIan Lepore break; 226935c21a1SIan Lepore } 227935c21a1SIan Lepore return; 228935c21a1SIan Lepore } 229935c21a1SIan Lepore } 230