1 /*- 2 * Copyright (c) 2015-2016 Svatopluk Kraus 3 * Copyright (c) 2015-2016 Michal Meloun 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include "opt_platform.h" 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/syslog.h> 33 #include <sys/kernel.h> 34 #include <sys/malloc.h> 35 #include <sys/proc.h> 36 #include <sys/bus.h> 37 #include <sys/interrupt.h> 38 #include <sys/conf.h> 39 #include <sys/pmc.h> 40 #include <sys/pmckern.h> 41 #include <sys/smp.h> 42 43 #include <machine/atomic.h> 44 #include <machine/bus.h> 45 #include <machine/intr.h> 46 #include <machine/cpu.h> 47 #include <machine/smp.h> 48 49 #include "pic_if.h" 50 51 #ifdef SMP 52 #define INTR_IPI_NAMELEN (MAXCOMLEN + 1) 53 54 struct intr_ipi { 55 intr_ipi_handler_t * ii_handler; 56 void * ii_handler_arg; 57 intr_ipi_send_t * ii_send; 58 void * ii_send_arg; 59 char ii_name[INTR_IPI_NAMELEN]; 60 u_long * ii_count; 61 }; 62 63 static struct intr_ipi ipi_sources[INTR_IPI_COUNT]; 64 #endif 65 66 /* 67 * arm_irq_memory_barrier() 68 * 69 * Ensure all writes to device memory have reached devices before proceeding. 70 * 71 * This is intended to be called from the post-filter and post-thread routines 72 * of an interrupt controller implementation. A peripheral device driver should 73 * use bus_space_barrier() if it needs to ensure a write has reached the 74 * hardware for some reason other than clearing interrupt conditions. 75 * 76 * The need for this function arises from the ARM weak memory ordering model. 77 * Writes to locations mapped with the Device attribute bypass any caches, but 78 * are buffered. Multiple writes to the same device will be observed by that 79 * device in the order issued by the cpu. Writes to different devices may 80 * appear at those devices in a different order than issued by the cpu. That 81 * is, if the cpu writes to device A then device B, the write to device B could 82 * complete before the write to device A. 83 * 84 * Consider a typical device interrupt handler which services the interrupt and 85 * writes to a device status-acknowledge register to clear the interrupt before 86 * returning. That write is posted to the L2 controller which "immediately" 87 * places it in a store buffer and automatically drains that buffer. This can 88 * be less immediate than you'd think... There may be no free slots in the store 89 * buffers, so an existing buffer has to be drained first to make room. The 90 * target bus may be busy with other traffic (such as DMA for various devices), 91 * delaying the drain of the store buffer for some indeterminate time. While 92 * all this delay is happening, execution proceeds on the CPU, unwinding its way 93 * out of the interrupt call stack to the point where the interrupt driver code 94 * is ready to EOI and unmask the interrupt. The interrupt controller may be 95 * accessed via a faster bus than the hardware whose handler just ran; the write 96 * to unmask and EOI the interrupt may complete quickly while the device write 97 * to ack and clear the interrupt source is still lingering in a store buffer 98 * waiting for access to a slower bus. With the interrupt unmasked at the 99 * interrupt controller but still active at the device, as soon as interrupts 100 * are enabled on the core the device re-interrupts immediately: now you've got 101 * a spurious interrupt on your hands. 102 * 103 * The right way to fix this problem is for every device driver to use the 104 * proper bus_space_barrier() calls in its interrupt handler. For ARM a single 105 * barrier call at the end of the handler would work. This would have to be 106 * done to every driver in the system, not just arm-specific drivers. 107 * 108 * Another potential fix is to map all device memory as Strongly-Ordered rather 109 * than Device memory, which takes the store buffers out of the picture. This 110 * has a pretty big impact on overall system performance, because each strongly 111 * ordered memory access causes all L2 store buffers to be drained. 112 * 113 * A compromise solution is to have the interrupt controller implementation call 114 * this function to establish a barrier between writes to the interrupt-source 115 * device and writes to the interrupt controller device. 116 * 117 * This takes the interrupt number as an argument, and currently doesn't use it. 118 * The plan is that maybe some day there is a way to flag certain interrupts as 119 * "memory barrier safe" and we can avoid this overhead with them. 120 */ 121 void 122 arm_irq_memory_barrier(uintptr_t irq) 123 { 124 125 dsb(); 126 cpu_l2cache_drain_writebuf(); 127 } 128 129 #ifdef SMP 130 static inline struct intr_ipi * 131 intr_ipi_lookup(u_int ipi) 132 { 133 134 if (ipi >= INTR_IPI_COUNT) 135 panic("%s: no such IPI %u", __func__, ipi); 136 137 return (&ipi_sources[ipi]); 138 } 139 140 void 141 intr_ipi_dispatch(u_int ipi) 142 { 143 struct intr_ipi *ii; 144 145 ii = intr_ipi_lookup(ipi); 146 if (ii->ii_count == NULL) 147 panic("%s: not setup IPI %u", __func__, ipi); 148 149 intr_ipi_increment_count(ii->ii_count, PCPU_GET(cpuid)); 150 151 ii->ii_handler(ii->ii_handler_arg); 152 } 153 154 void 155 intr_ipi_send(cpuset_t cpus, u_int ipi) 156 { 157 struct intr_ipi *ii; 158 159 ii = intr_ipi_lookup(ipi); 160 if (ii->ii_count == NULL) 161 panic("%s: not setup IPI %u", __func__, ipi); 162 163 ii->ii_send(ii->ii_send_arg, cpus, ipi); 164 } 165 166 void 167 intr_ipi_setup(u_int ipi, const char *name, intr_ipi_handler_t *hand, 168 void *h_arg, intr_ipi_send_t *send, void *s_arg) 169 { 170 struct intr_ipi *ii; 171 172 ii = intr_ipi_lookup(ipi); 173 174 KASSERT(hand != NULL, ("%s: ipi %u no handler", __func__, ipi)); 175 KASSERT(send != NULL, ("%s: ipi %u no sender", __func__, ipi)); 176 KASSERT(ii->ii_count == NULL, ("%s: ipi %u reused", __func__, ipi)); 177 178 ii->ii_handler = hand; 179 ii->ii_handler_arg = h_arg; 180 ii->ii_send = send; 181 ii->ii_send_arg = s_arg; 182 strlcpy(ii->ii_name, name, INTR_IPI_NAMELEN); 183 ii->ii_count = intr_ipi_setup_counters(name); 184 } 185 186 /* 187 * Send IPI thru interrupt controller. 188 */ 189 static void 190 pic_ipi_send(void *arg, cpuset_t cpus, u_int ipi) 191 { 192 193 KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__)); 194 PIC_IPI_SEND(intr_irq_root_dev, arg, cpus, ipi); 195 } 196 197 /* 198 * Setup IPI handler on interrupt controller. 199 * 200 * Not SMP coherent. 201 */ 202 int 203 intr_pic_ipi_setup(u_int ipi, const char *name, intr_ipi_handler_t *hand, 204 void *arg) 205 { 206 int error; 207 struct intr_irqsrc *isrc; 208 209 KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__)); 210 211 error = PIC_IPI_SETUP(intr_irq_root_dev, ipi, &isrc); 212 if (error != 0) 213 return (error); 214 215 isrc->isrc_handlers++; 216 intr_ipi_setup(ipi, name, hand, arg, pic_ipi_send, isrc); 217 PIC_ENABLE_INTR(intr_irq_root_dev, isrc); 218 return (0); 219 } 220 #endif 221