xref: /qemu/hw/intc/ioapic.c (revision 856dfd8a)
1 /*
2  *  ioapic.c IOAPIC emulation logic
3  *
4  *  Copyright (c) 2004-2005 Fabrice Bellard
5  *
6  *  Split the ioapic logic from apic.c
7  *  Xiantao Zhang <xiantao.zhang@intel.com>
8  *
9  * This library is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2 of the License, or (at your option) any later version.
13  *
14  * This library is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21  */
22 
23 #include "qemu/osdep.h"
24 #include "qapi/error.h"
25 #include "monitor/monitor.h"
26 #include "hw/hw.h"
27 #include "hw/i386/pc.h"
28 #include "hw/i386/apic.h"
29 #include "hw/i386/ioapic.h"
30 #include "hw/i386/ioapic_internal.h"
31 #include "hw/pci/msi.h"
32 #include "sysemu/kvm.h"
33 #include "hw/i386/apic-msidef.h"
34 #include "hw/i386/x86-iommu.h"
35 #include "trace.h"
36 
37 #define APIC_DELIVERY_MODE_SHIFT 8
38 #define APIC_POLARITY_SHIFT 14
39 #define APIC_TRIG_MODE_SHIFT 15
40 
41 static IOAPICCommonState *ioapics[MAX_IOAPICS];
42 
43 /* global variable from ioapic_common.c */
44 extern int ioapic_no;
45 
46 struct ioapic_entry_info {
47     /* fields parsed from IOAPIC entries */
48     uint8_t masked;
49     uint8_t trig_mode;
50     uint16_t dest_idx;
51     uint8_t dest_mode;
52     uint8_t delivery_mode;
53     uint8_t vector;
54 
55     /* MSI message generated from above parsed fields */
56     uint32_t addr;
57     uint32_t data;
58 };
59 
60 static void ioapic_entry_parse(uint64_t entry, struct ioapic_entry_info *info)
61 {
62     memset(info, 0, sizeof(*info));
63     info->masked = (entry >> IOAPIC_LVT_MASKED_SHIFT) & 1;
64     info->trig_mode = (entry >> IOAPIC_LVT_TRIGGER_MODE_SHIFT) & 1;
65     /*
66      * By default, this would be dest_id[8] + reserved[8]. When IR
67      * is enabled, this would be interrupt_index[15] +
68      * interrupt_format[1]. This field never means anything, but
69      * only used to generate corresponding MSI.
70      */
71     info->dest_idx = (entry >> IOAPIC_LVT_DEST_IDX_SHIFT) & 0xffff;
72     info->dest_mode = (entry >> IOAPIC_LVT_DEST_MODE_SHIFT) & 1;
73     info->delivery_mode = (entry >> IOAPIC_LVT_DELIV_MODE_SHIFT) \
74         & IOAPIC_DM_MASK;
75     if (info->delivery_mode == IOAPIC_DM_EXTINT) {
76         info->vector = pic_read_irq(isa_pic);
77     } else {
78         info->vector = entry & IOAPIC_VECTOR_MASK;
79     }
80 
81     info->addr = APIC_DEFAULT_ADDRESS | \
82         (info->dest_idx << MSI_ADDR_DEST_IDX_SHIFT) | \
83         (info->dest_mode << MSI_ADDR_DEST_MODE_SHIFT);
84     info->data = (info->vector << MSI_DATA_VECTOR_SHIFT) | \
85         (info->trig_mode << MSI_DATA_TRIGGER_SHIFT) | \
86         (info->delivery_mode << MSI_DATA_DELIVERY_MODE_SHIFT);
87 }
88 
89 static void ioapic_service(IOAPICCommonState *s)
90 {
91     AddressSpace *ioapic_as = PC_MACHINE(qdev_get_machine())->ioapic_as;
92     struct ioapic_entry_info info;
93     uint8_t i;
94     uint32_t mask;
95     uint64_t entry;
96 
97     for (i = 0; i < IOAPIC_NUM_PINS; i++) {
98         mask = 1 << i;
99         if (s->irr & mask) {
100             int coalesce = 0;
101 
102             entry = s->ioredtbl[i];
103             ioapic_entry_parse(entry, &info);
104             if (!info.masked) {
105                 if (info.trig_mode == IOAPIC_TRIGGER_EDGE) {
106                     s->irr &= ~mask;
107                 } else {
108                     coalesce = s->ioredtbl[i] & IOAPIC_LVT_REMOTE_IRR;
109                     trace_ioapic_set_remote_irr(i);
110                     s->ioredtbl[i] |= IOAPIC_LVT_REMOTE_IRR;
111                 }
112 
113                 if (coalesce) {
114                     /* We are level triggered interrupts, and the
115                      * guest should be still working on previous one,
116                      * so skip it. */
117                     continue;
118                 }
119 
120 #ifdef CONFIG_KVM
121                 if (kvm_irqchip_is_split()) {
122                     if (info.trig_mode == IOAPIC_TRIGGER_EDGE) {
123                         kvm_set_irq(kvm_state, i, 1);
124                         kvm_set_irq(kvm_state, i, 0);
125                     } else {
126                         kvm_set_irq(kvm_state, i, 1);
127                     }
128                     continue;
129                 }
130 #endif
131 
132                 /* No matter whether IR is enabled, we translate
133                  * the IOAPIC message into a MSI one, and its
134                  * address space will decide whether we need a
135                  * translation. */
136                 stl_le_phys(ioapic_as, info.addr, info.data);
137             }
138         }
139     }
140 }
141 
142 #define SUCCESSIVE_IRQ_MAX_COUNT 10000
143 
144 static void delayed_ioapic_service_cb(void *opaque)
145 {
146     IOAPICCommonState *s = opaque;
147 
148     ioapic_service(s);
149 }
150 
151 static void ioapic_set_irq(void *opaque, int vector, int level)
152 {
153     IOAPICCommonState *s = opaque;
154 
155     /* ISA IRQs map to GSI 1-1 except for IRQ0 which maps
156      * to GSI 2.  GSI maps to ioapic 1-1.  This is not
157      * the cleanest way of doing it but it should work. */
158 
159     trace_ioapic_set_irq(vector, level);
160     ioapic_stat_update_irq(s, vector, level);
161     if (vector == 0) {
162         vector = 2;
163     }
164     if (vector < IOAPIC_NUM_PINS) {
165         uint32_t mask = 1 << vector;
166         uint64_t entry = s->ioredtbl[vector];
167 
168         if (((entry >> IOAPIC_LVT_TRIGGER_MODE_SHIFT) & 1) ==
169             IOAPIC_TRIGGER_LEVEL) {
170             /* level triggered */
171             if (level) {
172                 s->irr |= mask;
173                 if (!(entry & IOAPIC_LVT_REMOTE_IRR)) {
174                     ioapic_service(s);
175                 }
176             } else {
177                 s->irr &= ~mask;
178             }
179         } else {
180             /* According to the 82093AA manual, we must ignore edge requests
181              * if the input pin is masked. */
182             if (level && !(entry & IOAPIC_LVT_MASKED)) {
183                 s->irr |= mask;
184                 ioapic_service(s);
185             }
186         }
187     }
188 }
189 
190 static void ioapic_update_kvm_routes(IOAPICCommonState *s)
191 {
192 #ifdef CONFIG_KVM
193     int i;
194 
195     if (kvm_irqchip_is_split()) {
196         for (i = 0; i < IOAPIC_NUM_PINS; i++) {
197             MSIMessage msg;
198             struct ioapic_entry_info info;
199             ioapic_entry_parse(s->ioredtbl[i], &info);
200             msg.address = info.addr;
201             msg.data = info.data;
202             kvm_irqchip_update_msi_route(kvm_state, i, msg, NULL);
203         }
204         kvm_irqchip_commit_routes(kvm_state);
205     }
206 #endif
207 }
208 
209 #ifdef CONFIG_KVM
210 static void ioapic_iec_notifier(void *private, bool global,
211                                 uint32_t index, uint32_t mask)
212 {
213     IOAPICCommonState *s = (IOAPICCommonState *)private;
214     /* For simplicity, we just update all the routes */
215     ioapic_update_kvm_routes(s);
216 }
217 #endif
218 
219 void ioapic_eoi_broadcast(int vector)
220 {
221     IOAPICCommonState *s;
222     uint64_t entry;
223     int i, n;
224 
225     trace_ioapic_eoi_broadcast(vector);
226 
227     for (i = 0; i < MAX_IOAPICS; i++) {
228         s = ioapics[i];
229         if (!s) {
230             continue;
231         }
232         for (n = 0; n < IOAPIC_NUM_PINS; n++) {
233             entry = s->ioredtbl[n];
234 
235             if ((entry & IOAPIC_VECTOR_MASK) != vector ||
236                 ((entry >> IOAPIC_LVT_TRIGGER_MODE_SHIFT) & 1) != IOAPIC_TRIGGER_LEVEL) {
237                 continue;
238             }
239 
240             if (!(entry & IOAPIC_LVT_REMOTE_IRR)) {
241                 continue;
242             }
243 
244             trace_ioapic_clear_remote_irr(n, vector);
245             s->ioredtbl[n] = entry & ~IOAPIC_LVT_REMOTE_IRR;
246 
247             if (!(entry & IOAPIC_LVT_MASKED) && (s->irr & (1 << n))) {
248                 ++s->irq_eoi[vector];
249                 if (s->irq_eoi[vector] >= SUCCESSIVE_IRQ_MAX_COUNT) {
250                     /*
251                      * Real hardware does not deliver the interrupt immediately
252                      * during eoi broadcast, and this lets a buggy guest make
253                      * slow progress even if it does not correctly handle a
254                      * level-triggered interrupt. Emulate this behavior if we
255                      * detect an interrupt storm.
256                      */
257                     s->irq_eoi[vector] = 0;
258                     timer_mod_anticipate(s->delayed_ioapic_service_timer,
259                                          qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
260                                          NANOSECONDS_PER_SECOND / 100);
261                     trace_ioapic_eoi_delayed_reassert(vector);
262                 } else {
263                     ioapic_service(s);
264                 }
265             } else {
266                 s->irq_eoi[vector] = 0;
267             }
268         }
269     }
270 }
271 
272 static uint64_t
273 ioapic_mem_read(void *opaque, hwaddr addr, unsigned int size)
274 {
275     IOAPICCommonState *s = opaque;
276     int index;
277     uint32_t val = 0;
278 
279     addr &= 0xff;
280 
281     switch (addr) {
282     case IOAPIC_IOREGSEL:
283         val = s->ioregsel;
284         break;
285     case IOAPIC_IOWIN:
286         if (size != 4) {
287             break;
288         }
289         switch (s->ioregsel) {
290         case IOAPIC_REG_ID:
291         case IOAPIC_REG_ARB:
292             val = s->id << IOAPIC_ID_SHIFT;
293             break;
294         case IOAPIC_REG_VER:
295             val = s->version |
296                 ((IOAPIC_NUM_PINS - 1) << IOAPIC_VER_ENTRIES_SHIFT);
297             break;
298         default:
299             index = (s->ioregsel - IOAPIC_REG_REDTBL_BASE) >> 1;
300             if (index >= 0 && index < IOAPIC_NUM_PINS) {
301                 if (s->ioregsel & 1) {
302                     val = s->ioredtbl[index] >> 32;
303                 } else {
304                     val = s->ioredtbl[index] & 0xffffffff;
305                 }
306             }
307         }
308         break;
309     }
310 
311     trace_ioapic_mem_read(addr, s->ioregsel, size, val);
312 
313     return val;
314 }
315 
316 /*
317  * This is to satisfy the hack in Linux kernel. One hack of it is to
318  * simulate clearing the Remote IRR bit of IOAPIC entry using the
319  * following:
320  *
321  * "For IO-APIC's with EOI register, we use that to do an explicit EOI.
322  * Otherwise, we simulate the EOI message manually by changing the trigger
323  * mode to edge and then back to level, with RTE being masked during
324  * this."
325  *
326  * (See linux kernel __eoi_ioapic_pin() comment in commit c0205701)
327  *
328  * This is based on the assumption that, Remote IRR bit will be
329  * cleared by IOAPIC hardware when configured as edge-triggered
330  * interrupts.
331  *
332  * Without this, level-triggered interrupts in IR mode might fail to
333  * work correctly.
334  */
335 static inline void
336 ioapic_fix_edge_remote_irr(uint64_t *entry)
337 {
338     if (!(*entry & IOAPIC_LVT_TRIGGER_MODE)) {
339         /* Edge-triggered interrupts, make sure remote IRR is zero */
340         *entry &= ~((uint64_t)IOAPIC_LVT_REMOTE_IRR);
341     }
342 }
343 
344 static void
345 ioapic_mem_write(void *opaque, hwaddr addr, uint64_t val,
346                  unsigned int size)
347 {
348     IOAPICCommonState *s = opaque;
349     int index;
350 
351     addr &= 0xff;
352     trace_ioapic_mem_write(addr, s->ioregsel, size, val);
353 
354     switch (addr) {
355     case IOAPIC_IOREGSEL:
356         s->ioregsel = val;
357         break;
358     case IOAPIC_IOWIN:
359         if (size != 4) {
360             break;
361         }
362         switch (s->ioregsel) {
363         case IOAPIC_REG_ID:
364             s->id = (val >> IOAPIC_ID_SHIFT) & IOAPIC_ID_MASK;
365             break;
366         case IOAPIC_REG_VER:
367         case IOAPIC_REG_ARB:
368             break;
369         default:
370             index = (s->ioregsel - IOAPIC_REG_REDTBL_BASE) >> 1;
371             if (index >= 0 && index < IOAPIC_NUM_PINS) {
372                 uint64_t ro_bits = s->ioredtbl[index] & IOAPIC_RO_BITS;
373                 if (s->ioregsel & 1) {
374                     s->ioredtbl[index] &= 0xffffffff;
375                     s->ioredtbl[index] |= (uint64_t)val << 32;
376                 } else {
377                     s->ioredtbl[index] &= ~0xffffffffULL;
378                     s->ioredtbl[index] |= val;
379                 }
380                 /* restore RO bits */
381                 s->ioredtbl[index] &= IOAPIC_RW_BITS;
382                 s->ioredtbl[index] |= ro_bits;
383                 ioapic_fix_edge_remote_irr(&s->ioredtbl[index]);
384                 ioapic_service(s);
385             }
386         }
387         break;
388     case IOAPIC_EOI:
389         /* Explicit EOI is only supported for IOAPIC version 0x20 */
390         if (size != 4 || s->version != 0x20) {
391             break;
392         }
393         ioapic_eoi_broadcast(val);
394         break;
395     }
396 
397     ioapic_update_kvm_routes(s);
398 }
399 
400 static const MemoryRegionOps ioapic_io_ops = {
401     .read = ioapic_mem_read,
402     .write = ioapic_mem_write,
403     .endianness = DEVICE_NATIVE_ENDIAN,
404 };
405 
406 static void ioapic_machine_done_notify(Notifier *notifier, void *data)
407 {
408 #ifdef CONFIG_KVM
409     IOAPICCommonState *s = container_of(notifier, IOAPICCommonState,
410                                         machine_done);
411 
412     if (kvm_irqchip_is_split()) {
413         X86IOMMUState *iommu = x86_iommu_get_default();
414         if (iommu) {
415             /* Register this IOAPIC with IOMMU IEC notifier, so that
416              * when there are IR invalidates, we can be notified to
417              * update kernel IR cache. */
418             x86_iommu_iec_register_notifier(iommu, ioapic_iec_notifier, s);
419         }
420     }
421 #endif
422 }
423 
424 #define IOAPIC_VER_DEF 0x20
425 
426 static void ioapic_realize(DeviceState *dev, Error **errp)
427 {
428     IOAPICCommonState *s = IOAPIC_COMMON(dev);
429 
430     if (s->version != 0x11 && s->version != 0x20) {
431         error_setg(errp, "IOAPIC only supports version 0x11 or 0x20 "
432                    "(default: 0x%x).", IOAPIC_VER_DEF);
433         return;
434     }
435 
436     memory_region_init_io(&s->io_memory, OBJECT(s), &ioapic_io_ops, s,
437                           "ioapic", 0x1000);
438 
439     s->delayed_ioapic_service_timer =
440         timer_new_ns(QEMU_CLOCK_VIRTUAL, delayed_ioapic_service_cb, s);
441 
442     qdev_init_gpio_in(dev, ioapic_set_irq, IOAPIC_NUM_PINS);
443 
444     ioapics[ioapic_no] = s;
445     s->machine_done.notify = ioapic_machine_done_notify;
446     qemu_add_machine_init_done_notifier(&s->machine_done);
447 }
448 
449 static void ioapic_unrealize(DeviceState *dev, Error **errp)
450 {
451     IOAPICCommonState *s = IOAPIC_COMMON(dev);
452 
453     timer_del(s->delayed_ioapic_service_timer);
454     timer_free(s->delayed_ioapic_service_timer);
455 }
456 
457 static Property ioapic_properties[] = {
458     DEFINE_PROP_UINT8("version", IOAPICCommonState, version, IOAPIC_VER_DEF),
459     DEFINE_PROP_END_OF_LIST(),
460 };
461 
462 static void ioapic_class_init(ObjectClass *klass, void *data)
463 {
464     IOAPICCommonClass *k = IOAPIC_COMMON_CLASS(klass);
465     DeviceClass *dc = DEVICE_CLASS(klass);
466 
467     k->realize = ioapic_realize;
468     k->unrealize = ioapic_unrealize;
469     /*
470      * If APIC is in kernel, we need to update the kernel cache after
471      * migration, otherwise first 24 gsi routes will be invalid.
472      */
473     k->post_load = ioapic_update_kvm_routes;
474     dc->reset = ioapic_reset_common;
475     dc->props = ioapic_properties;
476 }
477 
478 static const TypeInfo ioapic_info = {
479     .name          = TYPE_IOAPIC,
480     .parent        = TYPE_IOAPIC_COMMON,
481     .instance_size = sizeof(IOAPICCommonState),
482     .class_init    = ioapic_class_init,
483 };
484 
485 static void ioapic_register_types(void)
486 {
487     type_register_static(&ioapic_info);
488 }
489 
490 type_init(ioapic_register_types)
491