1 /*- 2 * Copyright (c) 2015-2016 Svatopluk Kraus 3 * Copyright (c) 2015-2016 Michal Meloun 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include "opt_platform.h" 29 30 #include <sys/cdefs.h> 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/syslog.h> 34 #include <sys/kernel.h> 35 #include <sys/malloc.h> 36 #include <sys/proc.h> 37 #include <sys/bus.h> 38 #include <sys/interrupt.h> 39 #include <sys/conf.h> 40 #include <sys/pmc.h> 41 #include <sys/pmckern.h> 42 #include <sys/smp.h> 43 44 #include <machine/atomic.h> 45 #include <machine/bus.h> 46 #include <machine/intr.h> 47 #include <machine/cpu.h> 48 #include <machine/smp.h> 49 50 #include "pic_if.h" 51 52 #ifdef SMP 53 #define INTR_IPI_NAMELEN (MAXCOMLEN + 1) 54 55 struct intr_ipi { 56 intr_ipi_handler_t * ii_handler; 57 void * ii_handler_arg; 58 intr_ipi_send_t * ii_send; 59 void * ii_send_arg; 60 char ii_name[INTR_IPI_NAMELEN]; 61 u_long * ii_count; 62 }; 63 64 static struct intr_ipi ipi_sources[INTR_IPI_COUNT]; 65 #endif 66 67 /* 68 * arm_irq_memory_barrier() 69 * 70 * Ensure all writes to device memory have reached devices before proceeding. 71 * 72 * This is intended to be called from the post-filter and post-thread routines 73 * of an interrupt controller implementation. A peripheral device driver should 74 * use bus_space_barrier() if it needs to ensure a write has reached the 75 * hardware for some reason other than clearing interrupt conditions. 76 * 77 * The need for this function arises from the ARM weak memory ordering model. 78 * Writes to locations mapped with the Device attribute bypass any caches, but 79 * are buffered. Multiple writes to the same device will be observed by that 80 * device in the order issued by the cpu. Writes to different devices may 81 * appear at those devices in a different order than issued by the cpu. That 82 * is, if the cpu writes to device A then device B, the write to device B could 83 * complete before the write to device A. 84 * 85 * Consider a typical device interrupt handler which services the interrupt and 86 * writes to a device status-acknowledge register to clear the interrupt before 87 * returning. That write is posted to the L2 controller which "immediately" 88 * places it in a store buffer and automatically drains that buffer. This can 89 * be less immediate than you'd think... There may be no free slots in the store 90 * buffers, so an existing buffer has to be drained first to make room. The 91 * target bus may be busy with other traffic (such as DMA for various devices), 92 * delaying the drain of the store buffer for some indeterminate time. While 93 * all this delay is happening, execution proceeds on the CPU, unwinding its way 94 * out of the interrupt call stack to the point where the interrupt driver code 95 * is ready to EOI and unmask the interrupt. The interrupt controller may be 96 * accessed via a faster bus than the hardware whose handler just ran; the write 97 * to unmask and EOI the interrupt may complete quickly while the device write 98 * to ack and clear the interrupt source is still lingering in a store buffer 99 * waiting for access to a slower bus. With the interrupt unmasked at the 100 * interrupt controller but still active at the device, as soon as interrupts 101 * are enabled on the core the device re-interrupts immediately: now you've got 102 * a spurious interrupt on your hands. 103 * 104 * The right way to fix this problem is for every device driver to use the 105 * proper bus_space_barrier() calls in its interrupt handler. For ARM a single 106 * barrier call at the end of the handler would work. This would have to be 107 * done to every driver in the system, not just arm-specific drivers. 108 * 109 * Another potential fix is to map all device memory as Strongly-Ordered rather 110 * than Device memory, which takes the store buffers out of the picture. This 111 * has a pretty big impact on overall system performance, because each strongly 112 * ordered memory access causes all L2 store buffers to be drained. 113 * 114 * A compromise solution is to have the interrupt controller implementation call 115 * this function to establish a barrier between writes to the interrupt-source 116 * device and writes to the interrupt controller device. 117 * 118 * This takes the interrupt number as an argument, and currently doesn't use it. 119 * The plan is that maybe some day there is a way to flag certain interrupts as 120 * "memory barrier safe" and we can avoid this overhead with them. 121 */ 122 void 123 arm_irq_memory_barrier(uintptr_t irq) 124 { 125 126 dsb(); 127 cpu_l2cache_drain_writebuf(); 128 } 129 130 #ifdef SMP 131 static inline struct intr_ipi * 132 intr_ipi_lookup(u_int ipi) 133 { 134 135 if (ipi >= INTR_IPI_COUNT) 136 panic("%s: no such IPI %u", __func__, ipi); 137 138 return (&ipi_sources[ipi]); 139 } 140 141 void 142 intr_ipi_dispatch(u_int ipi) 143 { 144 struct intr_ipi *ii; 145 146 ii = intr_ipi_lookup(ipi); 147 if (ii->ii_count == NULL) 148 panic("%s: not setup IPI %u", __func__, ipi); 149 150 intr_ipi_increment_count(ii->ii_count, PCPU_GET(cpuid)); 151 152 ii->ii_handler(ii->ii_handler_arg); 153 } 154 155 void 156 intr_ipi_send(cpuset_t cpus, u_int ipi) 157 { 158 struct intr_ipi *ii; 159 160 ii = intr_ipi_lookup(ipi); 161 if (ii->ii_count == NULL) 162 panic("%s: not setup IPI %u", __func__, ipi); 163 164 ii->ii_send(ii->ii_send_arg, cpus, ipi); 165 } 166 167 void 168 intr_ipi_setup(u_int ipi, const char *name, intr_ipi_handler_t *hand, 169 void *h_arg, intr_ipi_send_t *send, void *s_arg) 170 { 171 struct intr_ipi *ii; 172 173 ii = intr_ipi_lookup(ipi); 174 175 KASSERT(hand != NULL, ("%s: ipi %u no handler", __func__, ipi)); 176 KASSERT(send != NULL, ("%s: ipi %u no sender", __func__, ipi)); 177 KASSERT(ii->ii_count == NULL, ("%s: ipi %u reused", __func__, ipi)); 178 179 ii->ii_handler = hand; 180 ii->ii_handler_arg = h_arg; 181 ii->ii_send = send; 182 ii->ii_send_arg = s_arg; 183 strlcpy(ii->ii_name, name, INTR_IPI_NAMELEN); 184 ii->ii_count = intr_ipi_setup_counters(name); 185 } 186 187 /* 188 * Send IPI thru interrupt controller. 189 */ 190 static void 191 pic_ipi_send(void *arg, cpuset_t cpus, u_int ipi) 192 { 193 194 KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__)); 195 PIC_IPI_SEND(intr_irq_root_dev, arg, cpus, ipi); 196 } 197 198 /* 199 * Setup IPI handler on interrupt controller. 200 * 201 * Not SMP coherent. 202 */ 203 int 204 intr_pic_ipi_setup(u_int ipi, const char *name, intr_ipi_handler_t *hand, 205 void *arg) 206 { 207 int error; 208 struct intr_irqsrc *isrc; 209 210 KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__)); 211 212 error = PIC_IPI_SETUP(intr_irq_root_dev, ipi, &isrc); 213 if (error != 0) 214 return (error); 215 216 isrc->isrc_handlers++; 217 intr_ipi_setup(ipi, name, hand, arg, pic_ipi_send, isrc); 218 PIC_ENABLE_INTR(intr_irq_root_dev, isrc); 219 return (0); 220 } 221 #endif 222