1 /*- 2 * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com> 3 * Copyright 2014 Michal Meloun <meloun@miracle.cz> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 34 #include <machine/cpu.h> 35 #include <machine/cpuinfo.h> 36 37 struct cpuinfo cpuinfo = 38 { 39 /* Use safe defaults for start */ 40 .dcache_line_size = 32, 41 .dcache_line_mask = 31, 42 .icache_line_size = 32, 43 .icache_line_mask = 31, 44 }; 45 46 /* Read and parse CPU id scheme */ 47 void 48 cpuinfo_init(void) 49 { 50 51 cpuinfo.midr = cp15_midr_get(); 52 /* Test old version id schemes first */ 53 if ((cpuinfo.midr & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD) { 54 if (CPU_ID_ISOLD(cpuinfo.midr)) { 55 /* obsolete ARMv2 or ARMv3 CPU */ 56 cpuinfo.midr = 0; 57 return; 58 } 59 if (CPU_ID_IS7(cpuinfo.midr)) { 60 if ((cpuinfo.midr & (1 << 23)) == 0) { 61 /* obsolete ARMv3 CPU */ 62 cpuinfo.midr = 0; 63 return; 64 } 65 /* ARMv4T CPU */ 66 cpuinfo.architecture = 1; 67 cpuinfo.revision = (cpuinfo.midr >> 16) & 0x7F; 68 } else { 69 /* ARM new id scheme */ 70 cpuinfo.architecture = (cpuinfo.midr >> 16) & 0x0F; 71 cpuinfo.revision = (cpuinfo.midr >> 20) & 0x0F; 72 } 73 } else { 74 /* non ARM -> must be new id scheme */ 75 cpuinfo.architecture = (cpuinfo.midr >> 16) & 0x0F; 76 cpuinfo.revision = (cpuinfo.midr >> 20) & 0x0F; 77 } 78 /* Parse rest of MIDR */ 79 cpuinfo.implementer = (cpuinfo.midr >> 24) & 0xFF; 80 cpuinfo.part_number = (cpuinfo.midr >> 4) & 0xFFF; 81 cpuinfo.patch = cpuinfo.midr & 0x0F; 82 83 /* CP15 c0,c0 regs 0-7 exist on all CPUs (although aliased with MIDR) */ 84 cpuinfo.ctr = cp15_ctr_get(); 85 cpuinfo.tcmtr = cp15_tcmtr_get(); 86 #if __ARM_ARCH >= 6 87 cpuinfo.tlbtr = cp15_tlbtr_get(); 88 cpuinfo.mpidr = cp15_mpidr_get(); 89 cpuinfo.revidr = cp15_revidr_get(); 90 #endif 91 92 /* if CPU is not v7 cpu id scheme */ 93 if (cpuinfo.architecture != 0xF) 94 return; 95 #if __ARM_ARCH >= 6 96 cpuinfo.id_pfr0 = cp15_id_pfr0_get(); 97 cpuinfo.id_pfr1 = cp15_id_pfr1_get(); 98 cpuinfo.id_dfr0 = cp15_id_dfr0_get(); 99 cpuinfo.id_afr0 = cp15_id_afr0_get(); 100 cpuinfo.id_mmfr0 = cp15_id_mmfr0_get(); 101 cpuinfo.id_mmfr1 = cp15_id_mmfr1_get(); 102 cpuinfo.id_mmfr2 = cp15_id_mmfr2_get(); 103 cpuinfo.id_mmfr3 = cp15_id_mmfr3_get(); 104 cpuinfo.id_isar0 = cp15_id_isar0_get(); 105 cpuinfo.id_isar1 = cp15_id_isar1_get(); 106 cpuinfo.id_isar2 = cp15_id_isar2_get(); 107 cpuinfo.id_isar3 = cp15_id_isar3_get(); 108 cpuinfo.id_isar4 = cp15_id_isar4_get(); 109 cpuinfo.id_isar5 = cp15_id_isar5_get(); 110 111 /* Not yet - CBAR only exist on ARM SMP Cortex A CPUs 112 cpuinfo.cbar = cp15_cbar_get(); 113 */ 114 if (CPU_CT_FORMAT(cpuinfo.ctr) == CPU_CT_ARMV7) { 115 cpuinfo.ccsidr = cp15_ccsidr_get(); 116 cpuinfo.clidr = cp15_clidr_get(); 117 } 118 119 /* Test if revidr is implemented */ 120 if (cpuinfo.revidr == cpuinfo.midr) 121 cpuinfo.revidr = 0; 122 123 /* parsed bits of above registers */ 124 /* id_mmfr0 */ 125 cpuinfo.outermost_shareability = (cpuinfo.id_mmfr0 >> 8) & 0xF; 126 cpuinfo.shareability_levels = (cpuinfo.id_mmfr0 >> 12) & 0xF; 127 cpuinfo.auxiliary_registers = (cpuinfo.id_mmfr0 >> 20) & 0xF; 128 cpuinfo.innermost_shareability = (cpuinfo.id_mmfr0 >> 28) & 0xF; 129 /* id_mmfr2 */ 130 cpuinfo.mem_barrier = (cpuinfo.id_mmfr2 >> 20) & 0xF; 131 /* id_mmfr3 */ 132 cpuinfo.coherent_walk = (cpuinfo.id_mmfr3 >> 20) & 0xF; 133 cpuinfo.maintenance_broadcast =(cpuinfo.id_mmfr3 >> 12) & 0xF; 134 /* id_pfr1 */ 135 cpuinfo.generic_timer_ext = (cpuinfo.id_pfr1 >> 16) & 0xF; 136 cpuinfo.virtualization_ext = (cpuinfo.id_pfr1 >> 12) & 0xF; 137 cpuinfo.security_ext = (cpuinfo.id_pfr1 >> 4) & 0xF; 138 /* mpidr */ 139 cpuinfo.mp_ext = (cpuinfo.mpidr >> 31u) & 0x1; 140 141 /* L1 Cache sizes */ 142 if (CPU_CT_FORMAT(cpuinfo.ctr) == CPU_CT_ARMV7) { 143 cpuinfo.dcache_line_size = 144 1 << (CPU_CT_DMINLINE(cpuinfo.ctr) + 2); 145 cpuinfo.icache_line_size = 146 1 << (CPU_CT_IMINLINE(cpuinfo.ctr) + 2); 147 } else { 148 cpuinfo.dcache_line_size = 149 1 << (CPU_CT_xSIZE_LEN(CPU_CT_DSIZE(cpuinfo.ctr)) + 3); 150 cpuinfo.icache_line_size = 151 1 << (CPU_CT_xSIZE_LEN(CPU_CT_ISIZE(cpuinfo.ctr)) + 3); 152 } 153 cpuinfo.dcache_line_mask = cpuinfo.dcache_line_size - 1; 154 cpuinfo.icache_line_mask = cpuinfo.icache_line_size - 1; 155 #endif 156 } 157 158 /* 159 * Get bits that must be set or cleared in ACLR register. 160 * Note: Bits in ACLR register are IMPLEMENTATION DEFINED. 161 * Its expected that SCU is in operational state before this 162 * function is called. 163 */ 164 void 165 cpuinfo_get_actlr_modifier(uint32_t *actlr_mask, uint32_t *actlr_set) 166 { 167 *actlr_mask = 0; 168 *actlr_set = 0; 169 170 if (cpuinfo.implementer == CPU_IMPLEMENTER_ARM) { 171 switch (cpuinfo.part_number) { 172 case CPU_ARCH_CORTEX_A73: 173 case CPU_ARCH_CORTEX_A72: 174 case CPU_ARCH_CORTEX_A57: 175 case CPU_ARCH_CORTEX_A53: 176 /* Nothing to do for AArch32 */ 177 break; 178 case CPU_ARCH_CORTEX_A17: 179 case CPU_ARCH_CORTEX_A12: /* A12 is merged to A17 */ 180 /* 181 * Enable SMP mode 182 */ 183 *actlr_mask = (1 << 6); 184 *actlr_set = (1 << 6); 185 break; 186 case CPU_ARCH_CORTEX_A15: 187 /* 188 * Enable snoop-delayed exclusive handling 189 * Enable SMP mode 190 */ 191 *actlr_mask = (1U << 31) |(1 << 6); 192 *actlr_set = (1U << 31) |(1 << 6); 193 break; 194 case CPU_ARCH_CORTEX_A9: 195 /* 196 * Disable exclusive L1/L2 cache control 197 * Enable SMP mode 198 * Enable Cache and TLB maintenance broadcast 199 */ 200 *actlr_mask = (1 << 7) | (1 << 6) | (1 << 0); 201 *actlr_set = (1 << 6) | (1 << 0); 202 break; 203 case CPU_ARCH_CORTEX_A8: 204 /* 205 * Enable L2 cache 206 * Enable L1 data cache hardware alias checks 207 */ 208 *actlr_mask = (1 << 1) | (1 << 0); 209 *actlr_set = (1 << 1); 210 break; 211 case CPU_ARCH_CORTEX_A7: 212 /* 213 * Enable SMP mode 214 */ 215 *actlr_mask = (1 << 6); 216 *actlr_set = (1 << 6); 217 break; 218 case CPU_ARCH_CORTEX_A5: 219 /* 220 * Disable exclusive L1/L2 cache control 221 * Enable SMP mode 222 * Enable Cache and TLB maintenance broadcast 223 */ 224 *actlr_mask = (1 << 7) | (1 << 6) | (1 << 0); 225 *actlr_set = (1 << 6) | (1 << 0); 226 break; 227 case CPU_ARCH_ARM1176: 228 /* 229 * Restrict cache size to 16KB 230 * Enable the return stack 231 * Enable dynamic branch prediction 232 * Enable static branch prediction 233 */ 234 *actlr_mask = (1 << 6) | (1 << 2) | (1 << 1) | (1 << 0); 235 *actlr_set = (1 << 6) | (1 << 2) | (1 << 1) | (1 << 0); 236 break; 237 } 238 return; 239 } 240 } 241