xref: /qemu/hw/misc/aspeed_xdma.c (revision 09147930)
1 /*
2  * ASPEED XDMA Controller
3  * Eddie James <eajames@linux.ibm.com>
4  *
5  * Copyright (C) 2019 IBM Corp
6  * SPDX-License-Identifer: GPL-2.0-or-later
7  */
8 
9 #include "qemu/osdep.h"
10 #include "qemu/log.h"
11 #include "qemu/error-report.h"
12 #include "hw/irq.h"
13 #include "hw/misc/aspeed_xdma.h"
14 #include "migration/vmstate.h"
15 #include "qapi/error.h"
16 
17 #include "trace.h"
18 
19 #define XDMA_BMC_CMDQ_ADDR         0x10
20 #define XDMA_BMC_CMDQ_ENDP         0x14
21 #define XDMA_BMC_CMDQ_WRP          0x18
22 #define  XDMA_BMC_CMDQ_W_MASK      0x0003FFFF
23 #define XDMA_BMC_CMDQ_RDP          0x1C
24 #define  XDMA_BMC_CMDQ_RDP_MAGIC   0xEE882266
25 #define XDMA_IRQ_ENG_CTRL          0x20
26 #define  XDMA_IRQ_ENG_CTRL_US_COMP BIT(4)
27 #define  XDMA_IRQ_ENG_CTRL_DS_COMP BIT(5)
28 #define  XDMA_IRQ_ENG_CTRL_W_MASK  0xBFEFF07F
29 #define XDMA_IRQ_ENG_STAT          0x24
30 #define  XDMA_IRQ_ENG_STAT_US_COMP BIT(4)
31 #define  XDMA_IRQ_ENG_STAT_DS_COMP BIT(5)
32 #define  XDMA_IRQ_ENG_STAT_RESET   0xF8000000
33 #define XDMA_MEM_SIZE              0x1000
34 
35 #define TO_REG(addr) ((addr) / sizeof(uint32_t))
36 
37 static uint64_t aspeed_xdma_read(void *opaque, hwaddr addr, unsigned int size)
38 {
39     uint32_t val = 0;
40     AspeedXDMAState *xdma = opaque;
41 
42     if (addr < ASPEED_XDMA_REG_SIZE) {
43         val = xdma->regs[TO_REG(addr)];
44     }
45 
46     return (uint64_t)val;
47 }
48 
49 static void aspeed_xdma_write(void *opaque, hwaddr addr, uint64_t val,
50                               unsigned int size)
51 {
52     unsigned int idx;
53     uint32_t val32 = (uint32_t)val;
54     AspeedXDMAState *xdma = opaque;
55 
56     if (addr >= ASPEED_XDMA_REG_SIZE) {
57         return;
58     }
59 
60     switch (addr) {
61     case XDMA_BMC_CMDQ_ENDP:
62         xdma->regs[TO_REG(addr)] = val32 & XDMA_BMC_CMDQ_W_MASK;
63         break;
64     case XDMA_BMC_CMDQ_WRP:
65         idx = TO_REG(addr);
66         xdma->regs[idx] = val32 & XDMA_BMC_CMDQ_W_MASK;
67         xdma->regs[TO_REG(XDMA_BMC_CMDQ_RDP)] = xdma->regs[idx];
68 
69         trace_aspeed_xdma_write(addr, val);
70 
71         if (xdma->bmc_cmdq_readp_set) {
72             xdma->bmc_cmdq_readp_set = 0;
73         } else {
74             xdma->regs[TO_REG(XDMA_IRQ_ENG_STAT)] |=
75                 XDMA_IRQ_ENG_STAT_US_COMP | XDMA_IRQ_ENG_STAT_DS_COMP;
76 
77             if (xdma->regs[TO_REG(XDMA_IRQ_ENG_CTRL)] &
78                 (XDMA_IRQ_ENG_CTRL_US_COMP | XDMA_IRQ_ENG_CTRL_DS_COMP))
79                 qemu_irq_raise(xdma->irq);
80         }
81         break;
82     case XDMA_BMC_CMDQ_RDP:
83         trace_aspeed_xdma_write(addr, val);
84 
85         if (val32 == XDMA_BMC_CMDQ_RDP_MAGIC) {
86             xdma->bmc_cmdq_readp_set = 1;
87         }
88         break;
89     case XDMA_IRQ_ENG_CTRL:
90         xdma->regs[TO_REG(addr)] = val32 & XDMA_IRQ_ENG_CTRL_W_MASK;
91         break;
92     case XDMA_IRQ_ENG_STAT:
93         trace_aspeed_xdma_write(addr, val);
94 
95         idx = TO_REG(addr);
96         if (val32 & (XDMA_IRQ_ENG_STAT_US_COMP | XDMA_IRQ_ENG_STAT_DS_COMP)) {
97             xdma->regs[idx] &=
98                 ~(XDMA_IRQ_ENG_STAT_US_COMP | XDMA_IRQ_ENG_STAT_DS_COMP);
99             qemu_irq_lower(xdma->irq);
100         }
101         break;
102     default:
103         xdma->regs[TO_REG(addr)] = val32;
104         break;
105     }
106 }
107 
108 static const MemoryRegionOps aspeed_xdma_ops = {
109     .read = aspeed_xdma_read,
110     .write = aspeed_xdma_write,
111     .endianness = DEVICE_NATIVE_ENDIAN,
112     .valid.min_access_size = 4,
113     .valid.max_access_size = 4,
114 };
115 
116 static void aspeed_xdma_realize(DeviceState *dev, Error **errp)
117 {
118     SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
119     AspeedXDMAState *xdma = ASPEED_XDMA(dev);
120 
121     sysbus_init_irq(sbd, &xdma->irq);
122     memory_region_init_io(&xdma->iomem, OBJECT(xdma), &aspeed_xdma_ops, xdma,
123                           TYPE_ASPEED_XDMA, XDMA_MEM_SIZE);
124     sysbus_init_mmio(sbd, &xdma->iomem);
125 }
126 
127 static void aspeed_xdma_reset(DeviceState *dev)
128 {
129     AspeedXDMAState *xdma = ASPEED_XDMA(dev);
130 
131     xdma->bmc_cmdq_readp_set = 0;
132     memset(xdma->regs, 0, ASPEED_XDMA_REG_SIZE);
133     xdma->regs[TO_REG(XDMA_IRQ_ENG_STAT)] = XDMA_IRQ_ENG_STAT_RESET;
134 
135     qemu_irq_lower(xdma->irq);
136 }
137 
138 static const VMStateDescription aspeed_xdma_vmstate = {
139     .name = TYPE_ASPEED_XDMA,
140     .version_id = 1,
141     .fields = (VMStateField[]) {
142         VMSTATE_UINT32_ARRAY(regs, AspeedXDMAState, ASPEED_XDMA_NUM_REGS),
143         VMSTATE_END_OF_LIST(),
144     },
145 };
146 
147 static void aspeed_xdma_class_init(ObjectClass *classp, void *data)
148 {
149     DeviceClass *dc = DEVICE_CLASS(classp);
150 
151     dc->realize = aspeed_xdma_realize;
152     dc->reset = aspeed_xdma_reset;
153     dc->vmsd = &aspeed_xdma_vmstate;
154 }
155 
156 static const TypeInfo aspeed_xdma_info = {
157     .name          = TYPE_ASPEED_XDMA,
158     .parent        = TYPE_SYS_BUS_DEVICE,
159     .instance_size = sizeof(AspeedXDMAState),
160     .class_init    = aspeed_xdma_class_init,
161 };
162 
163 static void aspeed_xdma_register_type(void)
164 {
165     type_register_static(&aspeed_xdma_info);
166 }
167 type_init(aspeed_xdma_register_type);
168