1 #include "kernel/kernel.h" 2 #include "kernel/watchdog.h" 3 #include "arch_proto.h" 4 #include "glo.h" 5 #include <minix/minlib.h> 6 #include <minix/u64.h> 7 8 #include "apic.h" 9 10 #define CPUID_UNHALTED_CORE_CYCLES_AVAILABLE 0 11 12 /* 13 * Intel architecture performance counters watchdog 14 */ 15 16 static struct arch_watchdog intel_arch_watchdog; 17 static struct arch_watchdog amd_watchdog; 18 19 static void intel_arch_watchdog_init(const unsigned cpu) 20 { 21 u64_t cpuf; 22 u32_t val; 23 24 ia32_msr_write(INTEL_MSR_PERFMON_CRT0, 0, 0); 25 26 /* Int, OS, USR, Core ccyles */ 27 val = 1 << 20 | 1 << 17 | 1 << 16 | 0x3c; 28 ia32_msr_write(INTEL_MSR_PERFMON_SEL0, 0, val); 29 30 /* 31 * should give as a tick approx. every 0.5-1s, the perf counter has only 32 * lowest 31 bits writable :( 33 */ 34 cpuf = cpu_get_freq(cpu); 35 while (ex64hi(cpuf) || ex64lo(cpuf) > 0x7fffffffU) 36 cpuf /= 2; 37 cpuf = make64(-ex64lo(cpuf), ex64hi(cpuf)); 38 watchdog->resetval = watchdog->watchdog_resetval = cpuf; 39 40 ia32_msr_write(INTEL_MSR_PERFMON_CRT0, 0, ex64lo(cpuf)); 41 42 ia32_msr_write(INTEL_MSR_PERFMON_SEL0, 0, 43 val | INTEL_MSR_PERFMON_SEL0_ENABLE); 44 45 /* unmask the performance counter interrupt */ 46 lapic_write(LAPIC_LVTPCR, APIC_ICR_DM_NMI); 47 } 48 49 static void intel_arch_watchdog_reinit(const unsigned cpu) 50 { 51 lapic_write(LAPIC_LVTPCR, APIC_ICR_DM_NMI); 52 ia32_msr_write(INTEL_MSR_PERFMON_CRT0, 0, ex64lo(watchdog->resetval)); 53 } 54 55 int arch_watchdog_init(void) 56 { 57 u32_t eax, ebx, ecx, edx; 58 unsigned cpu = cpuid; 59 60 if (!lapic_addr) { 61 printf("ERROR : Cannot use NMI watchdog if APIC is not enabled\n"); 62 return -1; 63 } 64 65 if (cpu_info[cpu].vendor == CPU_VENDOR_INTEL) { 66 eax = 0xA; 67 68 _cpuid(&eax, &ebx, &ecx, &edx); 69 70 /* FIXME currently we support only watchdog based on the intel 71 * architectural performance counters. Some Intel CPUs don't have this 72 * feature 73 */ 74 if (ebx & (1 << CPUID_UNHALTED_CORE_CYCLES_AVAILABLE)) 75 return -1; 76 if (!((((eax >> 8)) & 0xff) > 0)) 77 return -1; 78 79 watchdog = &intel_arch_watchdog; 80 } else if (cpu_info[cpu].vendor == CPU_VENDOR_AMD) { 81 if (cpu_info[cpu].family != 6 && 82 cpu_info[cpu].family != 15 && 83 cpu_info[cpu].family != 16 && 84 cpu_info[cpu].family != 17) 85 return -1; 86 else 87 watchdog = &amd_watchdog; 88 } else 89 return -1; 90 91 /* Setup PC overflow as NMI for watchdog, it is masked for now */ 92 lapic_write(LAPIC_LVTPCR, APIC_ICR_INT_MASK | APIC_ICR_DM_NMI); 93 (void) lapic_read(LAPIC_LVTPCR); 94 95 /* double check if LAPIC is enabled */ 96 if (lapic_addr && watchdog->init) { 97 watchdog->init(cpuid); 98 } 99 100 return 0; 101 } 102 103 void arch_watchdog_stop(void) 104 { 105 } 106 107 void arch_watchdog_lockup(const struct nmi_frame * frame) 108 { 109 printf("KERNEL LOCK UP\n" 110 "eax 0x%08x\n" 111 "ecx 0x%08x\n" 112 "edx 0x%08x\n" 113 "ebx 0x%08x\n" 114 "ebp 0x%08x\n" 115 "esi 0x%08x\n" 116 "edi 0x%08x\n" 117 "gs 0x%08x\n" 118 "fs 0x%08x\n" 119 "es 0x%08x\n" 120 "ds 0x%08x\n" 121 "pc 0x%08x\n" 122 "cs 0x%08x\n" 123 "eflags 0x%08x\n", 124 frame->eax, 125 frame->ecx, 126 frame->edx, 127 frame->ebx, 128 frame->ebp, 129 frame->esi, 130 frame->edi, 131 frame->gs, 132 frame->fs, 133 frame->es, 134 frame->ds, 135 frame->pc, 136 frame->cs, 137 frame->eflags 138 ); 139 panic("Kernel lockup"); 140 } 141 142 int i386_watchdog_start(void) 143 { 144 if (arch_watchdog_init()) { 145 printf("WARNING watchdog initialization " 146 "failed! Disabled\n"); 147 watchdog_enabled = 0; 148 return -1; 149 } 150 else 151 BOOT_VERBOSE(printf("Watchdog enabled\n");); 152 153 return 0; 154 } 155 156 static int intel_arch_watchdog_profile_init(const unsigned freq) 157 { 158 u64_t cpuf; 159 160 /* FIXME works only if all CPUs have the same freq */ 161 cpuf = cpu_get_freq(cpuid); 162 cpuf /= freq; 163 164 /* 165 * if freq is too low and the cpu freq too high we may get in a range of 166 * insane value which cannot be handled by the 31bit CPU perf counter 167 */ 168 if (ex64hi(cpuf) != 0 || ex64lo(cpuf) > 0x7fffffffU) { 169 printf("ERROR : nmi watchdog ticks exceed 31bits, use higher frequency\n"); 170 return EINVAL; 171 } 172 173 cpuf = make64(-ex64lo(cpuf), ex64hi(cpuf)); 174 watchdog->profile_resetval = cpuf; 175 176 return OK; 177 } 178 179 static struct arch_watchdog intel_arch_watchdog = { 180 /*.init = */ intel_arch_watchdog_init, 181 /*.reinit = */ intel_arch_watchdog_reinit, 182 /*.profile_init = */ intel_arch_watchdog_profile_init 183 }; 184 185 #define AMD_MSR_EVENT_SEL0 0xc0010000 186 #define AMD_MSR_EVENT_CTR0 0xc0010004 187 #define AMD_MSR_EVENT_SEL0_ENABLE (1 << 22) 188 189 static void amd_watchdog_init(const unsigned cpu) 190 { 191 u64_t cpuf; 192 u32_t val; 193 194 ia32_msr_write(AMD_MSR_EVENT_CTR0, 0, 0); 195 196 /* Int, OS, USR, Cycles cpu is running */ 197 val = 1 << 20 | 1 << 17 | 1 << 16 | 0x76; 198 ia32_msr_write(AMD_MSR_EVENT_SEL0, 0, val); 199 200 cpuf = -cpu_get_freq(cpu); 201 watchdog->resetval = watchdog->watchdog_resetval = cpuf; 202 203 ia32_msr_write(AMD_MSR_EVENT_CTR0, 204 ex64hi(watchdog->resetval), ex64lo(watchdog->resetval)); 205 206 ia32_msr_write(AMD_MSR_EVENT_SEL0, 0, 207 val | AMD_MSR_EVENT_SEL0_ENABLE); 208 209 /* unmask the performance counter interrupt */ 210 lapic_write(LAPIC_LVTPCR, APIC_ICR_DM_NMI); 211 } 212 213 static void amd_watchdog_reinit(const unsigned cpu) 214 { 215 lapic_write(LAPIC_LVTPCR, APIC_ICR_DM_NMI); 216 ia32_msr_write(AMD_MSR_EVENT_CTR0, 217 ex64hi(watchdog->resetval), ex64lo(watchdog->resetval)); 218 } 219 220 static int amd_watchdog_profile_init(const unsigned freq) 221 { 222 u64_t cpuf; 223 224 /* FIXME works only if all CPUs have the same freq */ 225 cpuf = cpu_get_freq(cpuid); 226 cpuf = -cpuf / freq; 227 228 watchdog->profile_resetval = cpuf; 229 230 return OK; 231 } 232 233 static struct arch_watchdog amd_watchdog = { 234 /*.init = */ amd_watchdog_init, 235 /*.reinit = */ amd_watchdog_reinit, 236 /*.profile_init = */ amd_watchdog_profile_init 237 }; 238