1 /*
2 * Copyright (C) 2014-2016 Broadcom Corporation
3 * Copyright (c) 2017 Red Hat, Inc.
4 * Written by Prem Mallappa, Eric Auger
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include "hw/irq.h"
22 #include "hw/sysbus.h"
23 #include "migration/vmstate.h"
24 #include "hw/qdev-properties.h"
25 #include "hw/qdev-core.h"
26 #include "hw/pci/pci.h"
27 #include "cpu.h"
28 #include "trace.h"
29 #include "qemu/log.h"
30 #include "qemu/error-report.h"
31 #include "qapi/error.h"
32
33 #include "hw/arm/smmuv3.h"
34 #include "smmuv3-internal.h"
35 #include "smmu-internal.h"
36
37 #define PTW_RECORD_FAULT(cfg) (((cfg)->stage == 1) ? (cfg)->record_faults : \
38 (cfg)->s2cfg.record_faults)
39
40 /**
41 * smmuv3_trigger_irq - pulse @irq if enabled and update
42 * GERROR register in case of GERROR interrupt
43 *
44 * @irq: irq type
45 * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR)
46 */
smmuv3_trigger_irq(SMMUv3State * s,SMMUIrq irq,uint32_t gerror_mask)47 static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq,
48 uint32_t gerror_mask)
49 {
50
51 bool pulse = false;
52
53 switch (irq) {
54 case SMMU_IRQ_EVTQ:
55 pulse = smmuv3_eventq_irq_enabled(s);
56 break;
57 case SMMU_IRQ_PRIQ:
58 qemu_log_mask(LOG_UNIMP, "PRI not yet supported\n");
59 break;
60 case SMMU_IRQ_CMD_SYNC:
61 pulse = true;
62 break;
63 case SMMU_IRQ_GERROR:
64 {
65 uint32_t pending = s->gerror ^ s->gerrorn;
66 uint32_t new_gerrors = ~pending & gerror_mask;
67
68 if (!new_gerrors) {
69 /* only toggle non pending errors */
70 return;
71 }
72 s->gerror ^= new_gerrors;
73 trace_smmuv3_write_gerror(new_gerrors, s->gerror);
74
75 pulse = smmuv3_gerror_irq_enabled(s);
76 break;
77 }
78 }
79 if (pulse) {
80 trace_smmuv3_trigger_irq(irq);
81 qemu_irq_pulse(s->irq[irq]);
82 }
83 }
84
smmuv3_write_gerrorn(SMMUv3State * s,uint32_t new_gerrorn)85 static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn)
86 {
87 uint32_t pending = s->gerror ^ s->gerrorn;
88 uint32_t toggled = s->gerrorn ^ new_gerrorn;
89
90 if (toggled & ~pending) {
91 qemu_log_mask(LOG_GUEST_ERROR,
92 "guest toggles non pending errors = 0x%x\n",
93 toggled & ~pending);
94 }
95
96 /*
97 * We do not raise any error in case guest toggles bits corresponding
98 * to not active IRQs (CONSTRAINED UNPREDICTABLE)
99 */
100 s->gerrorn = new_gerrorn;
101
102 trace_smmuv3_write_gerrorn(toggled & pending, s->gerrorn);
103 }
104
queue_read(SMMUQueue * q,Cmd * cmd)105 static inline MemTxResult queue_read(SMMUQueue *q, Cmd *cmd)
106 {
107 dma_addr_t addr = Q_CONS_ENTRY(q);
108 MemTxResult ret;
109 int i;
110
111 ret = dma_memory_read(&address_space_memory, addr, cmd, sizeof(Cmd),
112 MEMTXATTRS_UNSPECIFIED);
113 if (ret != MEMTX_OK) {
114 return ret;
115 }
116 for (i = 0; i < ARRAY_SIZE(cmd->word); i++) {
117 le32_to_cpus(&cmd->word[i]);
118 }
119 return ret;
120 }
121
queue_write(SMMUQueue * q,Evt * evt_in)122 static MemTxResult queue_write(SMMUQueue *q, Evt *evt_in)
123 {
124 dma_addr_t addr = Q_PROD_ENTRY(q);
125 MemTxResult ret;
126 Evt evt = *evt_in;
127 int i;
128
129 for (i = 0; i < ARRAY_SIZE(evt.word); i++) {
130 cpu_to_le32s(&evt.word[i]);
131 }
132 ret = dma_memory_write(&address_space_memory, addr, &evt, sizeof(Evt),
133 MEMTXATTRS_UNSPECIFIED);
134 if (ret != MEMTX_OK) {
135 return ret;
136 }
137
138 queue_prod_incr(q);
139 return MEMTX_OK;
140 }
141
smmuv3_write_eventq(SMMUv3State * s,Evt * evt)142 static MemTxResult smmuv3_write_eventq(SMMUv3State *s, Evt *evt)
143 {
144 SMMUQueue *q = &s->eventq;
145 MemTxResult r;
146
147 if (!smmuv3_eventq_enabled(s)) {
148 return MEMTX_ERROR;
149 }
150
151 if (smmuv3_q_full(q)) {
152 return MEMTX_ERROR;
153 }
154
155 r = queue_write(q, evt);
156 if (r != MEMTX_OK) {
157 return r;
158 }
159
160 if (!smmuv3_q_empty(q)) {
161 smmuv3_trigger_irq(s, SMMU_IRQ_EVTQ, 0);
162 }
163 return MEMTX_OK;
164 }
165
smmuv3_record_event(SMMUv3State * s,SMMUEventInfo * info)166 void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo *info)
167 {
168 Evt evt = {};
169 MemTxResult r;
170
171 if (!smmuv3_eventq_enabled(s)) {
172 return;
173 }
174
175 EVT_SET_TYPE(&evt, info->type);
176 EVT_SET_SID(&evt, info->sid);
177
178 switch (info->type) {
179 case SMMU_EVT_NONE:
180 return;
181 case SMMU_EVT_F_UUT:
182 EVT_SET_SSID(&evt, info->u.f_uut.ssid);
183 EVT_SET_SSV(&evt, info->u.f_uut.ssv);
184 EVT_SET_ADDR(&evt, info->u.f_uut.addr);
185 EVT_SET_RNW(&evt, info->u.f_uut.rnw);
186 EVT_SET_PNU(&evt, info->u.f_uut.pnu);
187 EVT_SET_IND(&evt, info->u.f_uut.ind);
188 break;
189 case SMMU_EVT_C_BAD_STREAMID:
190 EVT_SET_SSID(&evt, info->u.c_bad_streamid.ssid);
191 EVT_SET_SSV(&evt, info->u.c_bad_streamid.ssv);
192 break;
193 case SMMU_EVT_F_STE_FETCH:
194 EVT_SET_SSID(&evt, info->u.f_ste_fetch.ssid);
195 EVT_SET_SSV(&evt, info->u.f_ste_fetch.ssv);
196 EVT_SET_ADDR2(&evt, info->u.f_ste_fetch.addr);
197 break;
198 case SMMU_EVT_C_BAD_STE:
199 EVT_SET_SSID(&evt, info->u.c_bad_ste.ssid);
200 EVT_SET_SSV(&evt, info->u.c_bad_ste.ssv);
201 break;
202 case SMMU_EVT_F_STREAM_DISABLED:
203 break;
204 case SMMU_EVT_F_TRANS_FORBIDDEN:
205 EVT_SET_ADDR(&evt, info->u.f_transl_forbidden.addr);
206 EVT_SET_RNW(&evt, info->u.f_transl_forbidden.rnw);
207 break;
208 case SMMU_EVT_C_BAD_SUBSTREAMID:
209 EVT_SET_SSID(&evt, info->u.c_bad_substream.ssid);
210 break;
211 case SMMU_EVT_F_CD_FETCH:
212 EVT_SET_SSID(&evt, info->u.f_cd_fetch.ssid);
213 EVT_SET_SSV(&evt, info->u.f_cd_fetch.ssv);
214 EVT_SET_ADDR(&evt, info->u.f_cd_fetch.addr);
215 break;
216 case SMMU_EVT_C_BAD_CD:
217 EVT_SET_SSID(&evt, info->u.c_bad_cd.ssid);
218 EVT_SET_SSV(&evt, info->u.c_bad_cd.ssv);
219 break;
220 case SMMU_EVT_F_WALK_EABT:
221 case SMMU_EVT_F_TRANSLATION:
222 case SMMU_EVT_F_ADDR_SIZE:
223 case SMMU_EVT_F_ACCESS:
224 case SMMU_EVT_F_PERMISSION:
225 EVT_SET_STALL(&evt, info->u.f_walk_eabt.stall);
226 EVT_SET_STAG(&evt, info->u.f_walk_eabt.stag);
227 EVT_SET_SSID(&evt, info->u.f_walk_eabt.ssid);
228 EVT_SET_SSV(&evt, info->u.f_walk_eabt.ssv);
229 EVT_SET_S2(&evt, info->u.f_walk_eabt.s2);
230 EVT_SET_ADDR(&evt, info->u.f_walk_eabt.addr);
231 EVT_SET_RNW(&evt, info->u.f_walk_eabt.rnw);
232 EVT_SET_PNU(&evt, info->u.f_walk_eabt.pnu);
233 EVT_SET_IND(&evt, info->u.f_walk_eabt.ind);
234 EVT_SET_CLASS(&evt, info->u.f_walk_eabt.class);
235 EVT_SET_ADDR2(&evt, info->u.f_walk_eabt.addr2);
236 break;
237 case SMMU_EVT_F_CFG_CONFLICT:
238 EVT_SET_SSID(&evt, info->u.f_cfg_conflict.ssid);
239 EVT_SET_SSV(&evt, info->u.f_cfg_conflict.ssv);
240 break;
241 /* rest is not implemented */
242 case SMMU_EVT_F_BAD_ATS_TREQ:
243 case SMMU_EVT_F_TLB_CONFLICT:
244 case SMMU_EVT_E_PAGE_REQ:
245 default:
246 g_assert_not_reached();
247 }
248
249 trace_smmuv3_record_event(smmu_event_string(info->type), info->sid);
250 r = smmuv3_write_eventq(s, &evt);
251 if (r != MEMTX_OK) {
252 smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_EVENTQ_ABT_ERR_MASK);
253 }
254 info->recorded = true;
255 }
256
smmuv3_init_regs(SMMUv3State * s)257 static void smmuv3_init_regs(SMMUv3State *s)
258 {
259 /* Based on sys property, the stages supported in smmu will be advertised.*/
260 if (s->stage && !strcmp("2", s->stage)) {
261 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S2P, 1);
262 } else {
263 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S1P, 1);
264 }
265
266 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTF, 2); /* AArch64 PTW only */
267 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, COHACC, 1); /* IO coherent */
268 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, ASID16, 1); /* 16-bit ASID */
269 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, VMID16, 1); /* 16-bit VMID */
270 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTENDIAN, 2); /* little endian */
271 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STALL_MODEL, 1); /* No stall */
272 /* terminated transaction will always be aborted/error returned */
273 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TERM_MODEL, 1);
274 /* 2-level stream table supported */
275 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STLEVEL, 1);
276
277 s->idr[1] = FIELD_DP32(s->idr[1], IDR1, SIDSIZE, SMMU_IDR1_SIDSIZE);
278 s->idr[1] = FIELD_DP32(s->idr[1], IDR1, EVENTQS, SMMU_EVENTQS);
279 s->idr[1] = FIELD_DP32(s->idr[1], IDR1, CMDQS, SMMU_CMDQS);
280
281 s->idr[3] = FIELD_DP32(s->idr[3], IDR3, HAD, 1);
282 if (FIELD_EX32(s->idr[0], IDR0, S2P)) {
283 /* XNX is a stage-2-specific feature */
284 s->idr[3] = FIELD_DP32(s->idr[3], IDR3, XNX, 1);
285 }
286 s->idr[3] = FIELD_DP32(s->idr[3], IDR3, RIL, 1);
287 s->idr[3] = FIELD_DP32(s->idr[3], IDR3, BBML, 2);
288
289 s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS); /* 44 bits */
290 /* 4K, 16K and 64K granule support */
291 s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN4K, 1);
292 s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN16K, 1);
293 s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN64K, 1);
294
295 s->cmdq.base = deposit64(s->cmdq.base, 0, 5, SMMU_CMDQS);
296 s->cmdq.prod = 0;
297 s->cmdq.cons = 0;
298 s->cmdq.entry_size = sizeof(struct Cmd);
299 s->eventq.base = deposit64(s->eventq.base, 0, 5, SMMU_EVENTQS);
300 s->eventq.prod = 0;
301 s->eventq.cons = 0;
302 s->eventq.entry_size = sizeof(struct Evt);
303
304 s->features = 0;
305 s->sid_split = 0;
306 s->aidr = 0x1;
307 s->cr[0] = 0;
308 s->cr0ack = 0;
309 s->irq_ctrl = 0;
310 s->gerror = 0;
311 s->gerrorn = 0;
312 s->statusr = 0;
313 s->gbpa = SMMU_GBPA_RESET_VAL;
314 }
315
smmu_get_ste(SMMUv3State * s,dma_addr_t addr,STE * buf,SMMUEventInfo * event)316 static int smmu_get_ste(SMMUv3State *s, dma_addr_t addr, STE *buf,
317 SMMUEventInfo *event)
318 {
319 int ret, i;
320
321 trace_smmuv3_get_ste(addr);
322 /* TODO: guarantee 64-bit single-copy atomicity */
323 ret = dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf),
324 MEMTXATTRS_UNSPECIFIED);
325 if (ret != MEMTX_OK) {
326 qemu_log_mask(LOG_GUEST_ERROR,
327 "Cannot fetch pte at address=0x%"PRIx64"\n", addr);
328 event->type = SMMU_EVT_F_STE_FETCH;
329 event->u.f_ste_fetch.addr = addr;
330 return -EINVAL;
331 }
332 for (i = 0; i < ARRAY_SIZE(buf->word); i++) {
333 le32_to_cpus(&buf->word[i]);
334 }
335 return 0;
336
337 }
338
339 /* @ssid > 0 not supported yet */
smmu_get_cd(SMMUv3State * s,STE * ste,uint32_t ssid,CD * buf,SMMUEventInfo * event)340 static int smmu_get_cd(SMMUv3State *s, STE *ste, uint32_t ssid,
341 CD *buf, SMMUEventInfo *event)
342 {
343 dma_addr_t addr = STE_CTXPTR(ste);
344 int ret, i;
345
346 trace_smmuv3_get_cd(addr);
347 /* TODO: guarantee 64-bit single-copy atomicity */
348 ret = dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf),
349 MEMTXATTRS_UNSPECIFIED);
350 if (ret != MEMTX_OK) {
351 qemu_log_mask(LOG_GUEST_ERROR,
352 "Cannot fetch pte at address=0x%"PRIx64"\n", addr);
353 event->type = SMMU_EVT_F_CD_FETCH;
354 event->u.f_ste_fetch.addr = addr;
355 return -EINVAL;
356 }
357 for (i = 0; i < ARRAY_SIZE(buf->word); i++) {
358 le32_to_cpus(&buf->word[i]);
359 }
360 return 0;
361 }
362
363 /*
364 * Max valid value is 39 when SMMU_IDR3.STT == 0.
365 * In architectures after SMMUv3.0:
366 * - If STE.S2TG selects a 4KB or 16KB granule, the minimum valid value for this
367 * field is MAX(16, 64-IAS)
368 * - If STE.S2TG selects a 64KB granule, the minimum valid value for this field
369 * is (64-IAS).
370 * As we only support AA64, IAS = OAS.
371 */
s2t0sz_valid(SMMUTransCfg * cfg)372 static bool s2t0sz_valid(SMMUTransCfg *cfg)
373 {
374 if (cfg->s2cfg.tsz > 39) {
375 return false;
376 }
377
378 if (cfg->s2cfg.granule_sz == 16) {
379 return (cfg->s2cfg.tsz >= 64 - oas2bits(SMMU_IDR5_OAS));
380 }
381
382 return (cfg->s2cfg.tsz >= MAX(64 - oas2bits(SMMU_IDR5_OAS), 16));
383 }
384
385 /*
386 * Return true if s2 page table config is valid.
387 * This checks with the configured start level, ias_bits and granularity we can
388 * have a valid page table as described in ARM ARM D8.2 Translation process.
389 * The idea here is to see for the highest possible number of IPA bits, how
390 * many concatenated tables we would need, if it is more than 16, then this is
391 * not possible.
392 */
s2_pgtable_config_valid(uint8_t sl0,uint8_t t0sz,uint8_t gran)393 static bool s2_pgtable_config_valid(uint8_t sl0, uint8_t t0sz, uint8_t gran)
394 {
395 int level = get_start_level(sl0, gran);
396 uint64_t ipa_bits = 64 - t0sz;
397 uint64_t max_ipa = (1ULL << ipa_bits) - 1;
398 int nr_concat = pgd_concat_idx(level, gran, max_ipa) + 1;
399
400 return nr_concat <= VMSA_MAX_S2_CONCAT;
401 }
402
decode_ste_s2_cfg(SMMUTransCfg * cfg,STE * ste)403 static int decode_ste_s2_cfg(SMMUTransCfg *cfg, STE *ste)
404 {
405 cfg->stage = 2;
406
407 if (STE_S2AA64(ste) == 0x0) {
408 qemu_log_mask(LOG_UNIMP,
409 "SMMUv3 AArch32 tables not supported\n");
410 g_assert_not_reached();
411 }
412
413 switch (STE_S2TG(ste)) {
414 case 0x0: /* 4KB */
415 cfg->s2cfg.granule_sz = 12;
416 break;
417 case 0x1: /* 64KB */
418 cfg->s2cfg.granule_sz = 16;
419 break;
420 case 0x2: /* 16KB */
421 cfg->s2cfg.granule_sz = 14;
422 break;
423 default:
424 qemu_log_mask(LOG_GUEST_ERROR,
425 "SMMUv3 bad STE S2TG: %x\n", STE_S2TG(ste));
426 goto bad_ste;
427 }
428
429 cfg->s2cfg.vttb = STE_S2TTB(ste);
430
431 cfg->s2cfg.sl0 = STE_S2SL0(ste);
432 /* FEAT_TTST not supported. */
433 if (cfg->s2cfg.sl0 == 0x3) {
434 qemu_log_mask(LOG_UNIMP, "SMMUv3 S2SL0 = 0x3 has no meaning!\n");
435 goto bad_ste;
436 }
437
438 /* For AA64, The effective S2PS size is capped to the OAS. */
439 cfg->s2cfg.eff_ps = oas2bits(MIN(STE_S2PS(ste), SMMU_IDR5_OAS));
440 /*
441 * It is ILLEGAL for the address in S2TTB to be outside the range
442 * described by the effective S2PS value.
443 */
444 if (cfg->s2cfg.vttb & ~(MAKE_64BIT_MASK(0, cfg->s2cfg.eff_ps))) {
445 qemu_log_mask(LOG_GUEST_ERROR,
446 "SMMUv3 S2TTB too large 0x%" PRIx64
447 ", effective PS %d bits\n",
448 cfg->s2cfg.vttb, cfg->s2cfg.eff_ps);
449 goto bad_ste;
450 }
451
452 cfg->s2cfg.tsz = STE_S2T0SZ(ste);
453
454 if (!s2t0sz_valid(cfg)) {
455 qemu_log_mask(LOG_GUEST_ERROR, "SMMUv3 bad STE S2T0SZ = %d\n",
456 cfg->s2cfg.tsz);
457 goto bad_ste;
458 }
459
460 if (!s2_pgtable_config_valid(cfg->s2cfg.sl0, cfg->s2cfg.tsz,
461 cfg->s2cfg.granule_sz)) {
462 qemu_log_mask(LOG_GUEST_ERROR,
463 "SMMUv3 STE stage 2 config not valid!\n");
464 goto bad_ste;
465 }
466
467 /* Only LE supported(IDR0.TTENDIAN). */
468 if (STE_S2ENDI(ste)) {
469 qemu_log_mask(LOG_GUEST_ERROR,
470 "SMMUv3 STE_S2ENDI only supports LE!\n");
471 goto bad_ste;
472 }
473
474 cfg->s2cfg.affd = STE_S2AFFD(ste);
475
476 cfg->s2cfg.record_faults = STE_S2R(ste);
477 /* As stall is not supported. */
478 if (STE_S2S(ste)) {
479 qemu_log_mask(LOG_UNIMP, "SMMUv3 Stall not implemented!\n");
480 goto bad_ste;
481 }
482
483 return 0;
484
485 bad_ste:
486 return -EINVAL;
487 }
488
489 /* Returns < 0 in case of invalid STE, 0 otherwise */
decode_ste(SMMUv3State * s,SMMUTransCfg * cfg,STE * ste,SMMUEventInfo * event)490 static int decode_ste(SMMUv3State *s, SMMUTransCfg *cfg,
491 STE *ste, SMMUEventInfo *event)
492 {
493 uint32_t config;
494 int ret;
495
496 if (!STE_VALID(ste)) {
497 if (!event->inval_ste_allowed) {
498 qemu_log_mask(LOG_GUEST_ERROR, "invalid STE\n");
499 }
500 goto bad_ste;
501 }
502
503 config = STE_CONFIG(ste);
504
505 if (STE_CFG_ABORT(config)) {
506 cfg->aborted = true;
507 return 0;
508 }
509
510 if (STE_CFG_BYPASS(config)) {
511 cfg->bypassed = true;
512 return 0;
513 }
514
515 /*
516 * If a stage is enabled in SW while not advertised, throw bad ste
517 * according to user manual(IHI0070E) "5.2 Stream Table Entry".
518 */
519 if (!STAGE1_SUPPORTED(s) && STE_CFG_S1_ENABLED(config)) {
520 qemu_log_mask(LOG_GUEST_ERROR, "SMMUv3 S1 used but not supported.\n");
521 goto bad_ste;
522 }
523 if (!STAGE2_SUPPORTED(s) && STE_CFG_S2_ENABLED(config)) {
524 qemu_log_mask(LOG_GUEST_ERROR, "SMMUv3 S2 used but not supported.\n");
525 goto bad_ste;
526 }
527
528 if (STAGE2_SUPPORTED(s)) {
529 /* VMID is considered even if s2 is disabled. */
530 cfg->s2cfg.vmid = STE_S2VMID(ste);
531 } else {
532 /* Default to -1 */
533 cfg->s2cfg.vmid = -1;
534 }
535
536 if (STE_CFG_S2_ENABLED(config)) {
537 /*
538 * Stage-1 OAS defaults to OAS even if not enabled as it would be used
539 * in input address check for stage-2.
540 */
541 cfg->oas = oas2bits(SMMU_IDR5_OAS);
542 ret = decode_ste_s2_cfg(cfg, ste);
543 if (ret) {
544 goto bad_ste;
545 }
546 }
547
548 if (STE_S1CDMAX(ste) != 0) {
549 qemu_log_mask(LOG_UNIMP,
550 "SMMUv3 does not support multiple context descriptors yet\n");
551 goto bad_ste;
552 }
553
554 if (STE_S1STALLD(ste)) {
555 qemu_log_mask(LOG_UNIMP,
556 "SMMUv3 S1 stalling fault model not allowed yet\n");
557 goto bad_ste;
558 }
559 return 0;
560
561 bad_ste:
562 event->type = SMMU_EVT_C_BAD_STE;
563 return -EINVAL;
564 }
565
566 /**
567 * smmu_find_ste - Return the stream table entry associated
568 * to the sid
569 *
570 * @s: smmuv3 handle
571 * @sid: stream ID
572 * @ste: returned stream table entry
573 * @event: handle to an event info
574 *
575 * Supports linear and 2-level stream table
576 * Return 0 on success, -EINVAL otherwise
577 */
smmu_find_ste(SMMUv3State * s,uint32_t sid,STE * ste,SMMUEventInfo * event)578 static int smmu_find_ste(SMMUv3State *s, uint32_t sid, STE *ste,
579 SMMUEventInfo *event)
580 {
581 dma_addr_t addr, strtab_base;
582 uint32_t log2size;
583 int strtab_size_shift;
584 int ret;
585
586 trace_smmuv3_find_ste(sid, s->features, s->sid_split);
587 log2size = FIELD_EX32(s->strtab_base_cfg, STRTAB_BASE_CFG, LOG2SIZE);
588 /*
589 * Check SID range against both guest-configured and implementation limits
590 */
591 if (sid >= (1 << MIN(log2size, SMMU_IDR1_SIDSIZE))) {
592 event->type = SMMU_EVT_C_BAD_STREAMID;
593 return -EINVAL;
594 }
595 if (s->features & SMMU_FEATURE_2LVL_STE) {
596 int l1_ste_offset, l2_ste_offset, max_l2_ste, span, i;
597 dma_addr_t l1ptr, l2ptr;
598 STEDesc l1std;
599
600 /*
601 * Align strtab base address to table size. For this purpose, assume it
602 * is not bounded by SMMU_IDR1_SIDSIZE.
603 */
604 strtab_size_shift = MAX(5, (int)log2size - s->sid_split - 1 + 3);
605 strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK &
606 ~MAKE_64BIT_MASK(0, strtab_size_shift);
607 l1_ste_offset = sid >> s->sid_split;
608 l2_ste_offset = sid & ((1 << s->sid_split) - 1);
609 l1ptr = (dma_addr_t)(strtab_base + l1_ste_offset * sizeof(l1std));
610 /* TODO: guarantee 64-bit single-copy atomicity */
611 ret = dma_memory_read(&address_space_memory, l1ptr, &l1std,
612 sizeof(l1std), MEMTXATTRS_UNSPECIFIED);
613 if (ret != MEMTX_OK) {
614 qemu_log_mask(LOG_GUEST_ERROR,
615 "Could not read L1PTR at 0X%"PRIx64"\n", l1ptr);
616 event->type = SMMU_EVT_F_STE_FETCH;
617 event->u.f_ste_fetch.addr = l1ptr;
618 return -EINVAL;
619 }
620 for (i = 0; i < ARRAY_SIZE(l1std.word); i++) {
621 le32_to_cpus(&l1std.word[i]);
622 }
623
624 span = L1STD_SPAN(&l1std);
625
626 if (!span) {
627 /* l2ptr is not valid */
628 if (!event->inval_ste_allowed) {
629 qemu_log_mask(LOG_GUEST_ERROR,
630 "invalid sid=%d (L1STD span=0)\n", sid);
631 }
632 event->type = SMMU_EVT_C_BAD_STREAMID;
633 return -EINVAL;
634 }
635 max_l2_ste = (1 << span) - 1;
636 l2ptr = l1std_l2ptr(&l1std);
637 trace_smmuv3_find_ste_2lvl(s->strtab_base, l1ptr, l1_ste_offset,
638 l2ptr, l2_ste_offset, max_l2_ste);
639 if (l2_ste_offset > max_l2_ste) {
640 qemu_log_mask(LOG_GUEST_ERROR,
641 "l2_ste_offset=%d > max_l2_ste=%d\n",
642 l2_ste_offset, max_l2_ste);
643 event->type = SMMU_EVT_C_BAD_STE;
644 return -EINVAL;
645 }
646 addr = l2ptr + l2_ste_offset * sizeof(*ste);
647 } else {
648 strtab_size_shift = log2size + 5;
649 strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK &
650 ~MAKE_64BIT_MASK(0, strtab_size_shift);
651 addr = strtab_base + sid * sizeof(*ste);
652 }
653
654 if (smmu_get_ste(s, addr, ste, event)) {
655 return -EINVAL;
656 }
657
658 return 0;
659 }
660
decode_cd(SMMUTransCfg * cfg,CD * cd,SMMUEventInfo * event)661 static int decode_cd(SMMUTransCfg *cfg, CD *cd, SMMUEventInfo *event)
662 {
663 int ret = -EINVAL;
664 int i;
665
666 if (!CD_VALID(cd) || !CD_AARCH64(cd)) {
667 goto bad_cd;
668 }
669 if (!CD_A(cd)) {
670 goto bad_cd; /* SMMU_IDR0.TERM_MODEL == 1 */
671 }
672 if (CD_S(cd)) {
673 goto bad_cd; /* !STE_SECURE && SMMU_IDR0.STALL_MODEL == 1 */
674 }
675 if (CD_HA(cd) || CD_HD(cd)) {
676 goto bad_cd; /* HTTU = 0 */
677 }
678
679 /* we support only those at the moment */
680 cfg->aa64 = true;
681 cfg->stage = 1;
682
683 cfg->oas = oas2bits(CD_IPS(cd));
684 cfg->oas = MIN(oas2bits(SMMU_IDR5_OAS), cfg->oas);
685 cfg->tbi = CD_TBI(cd);
686 cfg->asid = CD_ASID(cd);
687 cfg->affd = CD_AFFD(cd);
688
689 trace_smmuv3_decode_cd(cfg->oas);
690
691 /* decode data dependent on TT */
692 for (i = 0; i <= 1; i++) {
693 int tg, tsz;
694 SMMUTransTableInfo *tt = &cfg->tt[i];
695
696 cfg->tt[i].disabled = CD_EPD(cd, i);
697 if (cfg->tt[i].disabled) {
698 continue;
699 }
700
701 tsz = CD_TSZ(cd, i);
702 if (tsz < 16 || tsz > 39) {
703 goto bad_cd;
704 }
705
706 tg = CD_TG(cd, i);
707 tt->granule_sz = tg2granule(tg, i);
708 if ((tt->granule_sz != 12 && tt->granule_sz != 14 &&
709 tt->granule_sz != 16) || CD_ENDI(cd)) {
710 goto bad_cd;
711 }
712
713 tt->tsz = tsz;
714 tt->ttb = CD_TTB(cd, i);
715 if (tt->ttb & ~(MAKE_64BIT_MASK(0, cfg->oas))) {
716 goto bad_cd;
717 }
718 tt->had = CD_HAD(cd, i);
719 trace_smmuv3_decode_cd_tt(i, tt->tsz, tt->ttb, tt->granule_sz, tt->had);
720 }
721
722 cfg->record_faults = CD_R(cd);
723
724 return 0;
725
726 bad_cd:
727 event->type = SMMU_EVT_C_BAD_CD;
728 return ret;
729 }
730
731 /**
732 * smmuv3_decode_config - Prepare the translation configuration
733 * for the @mr iommu region
734 * @mr: iommu memory region the translation config must be prepared for
735 * @cfg: output translation configuration which is populated through
736 * the different configuration decoding steps
737 * @event: must be zero'ed by the caller
738 *
739 * return < 0 in case of config decoding error (@event is filled
740 * accordingly). Return 0 otherwise.
741 */
smmuv3_decode_config(IOMMUMemoryRegion * mr,SMMUTransCfg * cfg,SMMUEventInfo * event)742 static int smmuv3_decode_config(IOMMUMemoryRegion *mr, SMMUTransCfg *cfg,
743 SMMUEventInfo *event)
744 {
745 SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
746 uint32_t sid = smmu_get_sid(sdev);
747 SMMUv3State *s = sdev->smmu;
748 int ret;
749 STE ste;
750 CD cd;
751
752 /* ASID defaults to -1 (if s1 is not supported). */
753 cfg->asid = -1;
754
755 ret = smmu_find_ste(s, sid, &ste, event);
756 if (ret) {
757 return ret;
758 }
759
760 ret = decode_ste(s, cfg, &ste, event);
761 if (ret) {
762 return ret;
763 }
764
765 if (cfg->aborted || cfg->bypassed || (cfg->stage == 2)) {
766 return 0;
767 }
768
769 ret = smmu_get_cd(s, &ste, 0 /* ssid */, &cd, event);
770 if (ret) {
771 return ret;
772 }
773
774 return decode_cd(cfg, &cd, event);
775 }
776
777 /**
778 * smmuv3_get_config - Look up for a cached copy of configuration data for
779 * @sdev and on cache miss performs a configuration structure decoding from
780 * guest RAM.
781 *
782 * @sdev: SMMUDevice handle
783 * @event: output event info
784 *
785 * The configuration cache contains data resulting from both STE and CD
786 * decoding under the form of an SMMUTransCfg struct. The hash table is indexed
787 * by the SMMUDevice handle.
788 */
smmuv3_get_config(SMMUDevice * sdev,SMMUEventInfo * event)789 static SMMUTransCfg *smmuv3_get_config(SMMUDevice *sdev, SMMUEventInfo *event)
790 {
791 SMMUv3State *s = sdev->smmu;
792 SMMUState *bc = &s->smmu_state;
793 SMMUTransCfg *cfg;
794
795 cfg = g_hash_table_lookup(bc->configs, sdev);
796 if (cfg) {
797 sdev->cfg_cache_hits++;
798 trace_smmuv3_config_cache_hit(smmu_get_sid(sdev),
799 sdev->cfg_cache_hits, sdev->cfg_cache_misses,
800 100 * sdev->cfg_cache_hits /
801 (sdev->cfg_cache_hits + sdev->cfg_cache_misses));
802 } else {
803 sdev->cfg_cache_misses++;
804 trace_smmuv3_config_cache_miss(smmu_get_sid(sdev),
805 sdev->cfg_cache_hits, sdev->cfg_cache_misses,
806 100 * sdev->cfg_cache_hits /
807 (sdev->cfg_cache_hits + sdev->cfg_cache_misses));
808 cfg = g_new0(SMMUTransCfg, 1);
809
810 if (!smmuv3_decode_config(&sdev->iommu, cfg, event)) {
811 g_hash_table_insert(bc->configs, sdev, cfg);
812 } else {
813 g_free(cfg);
814 cfg = NULL;
815 }
816 }
817 return cfg;
818 }
819
smmuv3_flush_config(SMMUDevice * sdev)820 static void smmuv3_flush_config(SMMUDevice *sdev)
821 {
822 SMMUv3State *s = sdev->smmu;
823 SMMUState *bc = &s->smmu_state;
824
825 trace_smmuv3_config_cache_inv(smmu_get_sid(sdev));
826 g_hash_table_remove(bc->configs, sdev);
827 }
828
smmuv3_translate(IOMMUMemoryRegion * mr,hwaddr addr,IOMMUAccessFlags flag,int iommu_idx)829 static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr,
830 IOMMUAccessFlags flag, int iommu_idx)
831 {
832 SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
833 SMMUv3State *s = sdev->smmu;
834 uint32_t sid = smmu_get_sid(sdev);
835 SMMUEventInfo event = {.type = SMMU_EVT_NONE,
836 .sid = sid,
837 .inval_ste_allowed = false};
838 SMMUPTWEventInfo ptw_info = {};
839 SMMUTranslationStatus status;
840 SMMUState *bs = ARM_SMMU(s);
841 uint64_t page_mask, aligned_addr;
842 SMMUTLBEntry *cached_entry = NULL;
843 SMMUTransTableInfo *tt;
844 SMMUTransCfg *cfg = NULL;
845 IOMMUTLBEntry entry = {
846 .target_as = &address_space_memory,
847 .iova = addr,
848 .translated_addr = addr,
849 .addr_mask = ~(hwaddr)0,
850 .perm = IOMMU_NONE,
851 };
852 /*
853 * Combined attributes used for TLB lookup, as only one stage is supported,
854 * it will hold attributes based on the enabled stage.
855 */
856 SMMUTransTableInfo tt_combined;
857
858 qemu_mutex_lock(&s->mutex);
859
860 if (!smmu_enabled(s)) {
861 if (FIELD_EX32(s->gbpa, GBPA, ABORT)) {
862 status = SMMU_TRANS_ABORT;
863 } else {
864 status = SMMU_TRANS_DISABLE;
865 }
866 goto epilogue;
867 }
868
869 cfg = smmuv3_get_config(sdev, &event);
870 if (!cfg) {
871 status = SMMU_TRANS_ERROR;
872 goto epilogue;
873 }
874
875 if (cfg->aborted) {
876 status = SMMU_TRANS_ABORT;
877 goto epilogue;
878 }
879
880 if (cfg->bypassed) {
881 status = SMMU_TRANS_BYPASS;
882 goto epilogue;
883 }
884
885 if (cfg->stage == 1) {
886 /* Select stage1 translation table. */
887 tt = select_tt(cfg, addr);
888 if (!tt) {
889 if (cfg->record_faults) {
890 event.type = SMMU_EVT_F_TRANSLATION;
891 event.u.f_translation.addr = addr;
892 event.u.f_translation.rnw = flag & 0x1;
893 }
894 status = SMMU_TRANS_ERROR;
895 goto epilogue;
896 }
897 tt_combined.granule_sz = tt->granule_sz;
898 tt_combined.tsz = tt->tsz;
899
900 } else {
901 /* Stage2. */
902 tt_combined.granule_sz = cfg->s2cfg.granule_sz;
903 tt_combined.tsz = cfg->s2cfg.tsz;
904 }
905 /*
906 * TLB lookup looks for granule and input size for a translation stage,
907 * as only one stage is supported right now, choose the right values
908 * from the configuration.
909 */
910 page_mask = (1ULL << tt_combined.granule_sz) - 1;
911 aligned_addr = addr & ~page_mask;
912
913 cached_entry = smmu_iotlb_lookup(bs, cfg, &tt_combined, aligned_addr);
914 if (cached_entry) {
915 if ((flag & IOMMU_WO) && !(cached_entry->entry.perm & IOMMU_WO)) {
916 status = SMMU_TRANS_ERROR;
917 /*
918 * We know that the TLB only contains either stage-1 or stage-2 as
919 * nesting is not supported. So it is sufficient to check the
920 * translation stage to know the TLB stage for now.
921 */
922 event.u.f_walk_eabt.s2 = (cfg->stage == 2);
923 if (PTW_RECORD_FAULT(cfg)) {
924 event.type = SMMU_EVT_F_PERMISSION;
925 event.u.f_permission.addr = addr;
926 event.u.f_permission.rnw = flag & 0x1;
927 }
928 } else {
929 status = SMMU_TRANS_SUCCESS;
930 }
931 goto epilogue;
932 }
933
934 cached_entry = g_new0(SMMUTLBEntry, 1);
935
936 if (smmu_ptw(cfg, aligned_addr, flag, cached_entry, &ptw_info)) {
937 /* All faults from PTW has S2 field. */
938 event.u.f_walk_eabt.s2 = (ptw_info.stage == 2);
939 g_free(cached_entry);
940 switch (ptw_info.type) {
941 case SMMU_PTW_ERR_WALK_EABT:
942 event.type = SMMU_EVT_F_WALK_EABT;
943 event.u.f_walk_eabt.addr = addr;
944 event.u.f_walk_eabt.rnw = flag & 0x1;
945 event.u.f_walk_eabt.class = 0x1;
946 event.u.f_walk_eabt.addr2 = ptw_info.addr;
947 break;
948 case SMMU_PTW_ERR_TRANSLATION:
949 if (PTW_RECORD_FAULT(cfg)) {
950 event.type = SMMU_EVT_F_TRANSLATION;
951 event.u.f_translation.addr = addr;
952 event.u.f_translation.rnw = flag & 0x1;
953 }
954 break;
955 case SMMU_PTW_ERR_ADDR_SIZE:
956 if (PTW_RECORD_FAULT(cfg)) {
957 event.type = SMMU_EVT_F_ADDR_SIZE;
958 event.u.f_addr_size.addr = addr;
959 event.u.f_addr_size.rnw = flag & 0x1;
960 }
961 break;
962 case SMMU_PTW_ERR_ACCESS:
963 if (PTW_RECORD_FAULT(cfg)) {
964 event.type = SMMU_EVT_F_ACCESS;
965 event.u.f_access.addr = addr;
966 event.u.f_access.rnw = flag & 0x1;
967 }
968 break;
969 case SMMU_PTW_ERR_PERMISSION:
970 if (PTW_RECORD_FAULT(cfg)) {
971 event.type = SMMU_EVT_F_PERMISSION;
972 event.u.f_permission.addr = addr;
973 event.u.f_permission.rnw = flag & 0x1;
974 }
975 break;
976 default:
977 g_assert_not_reached();
978 }
979 status = SMMU_TRANS_ERROR;
980 } else {
981 smmu_iotlb_insert(bs, cfg, cached_entry);
982 status = SMMU_TRANS_SUCCESS;
983 }
984
985 epilogue:
986 qemu_mutex_unlock(&s->mutex);
987 switch (status) {
988 case SMMU_TRANS_SUCCESS:
989 entry.perm = cached_entry->entry.perm;
990 entry.translated_addr = cached_entry->entry.translated_addr +
991 (addr & cached_entry->entry.addr_mask);
992 entry.addr_mask = cached_entry->entry.addr_mask;
993 trace_smmuv3_translate_success(mr->parent_obj.name, sid, addr,
994 entry.translated_addr, entry.perm);
995 break;
996 case SMMU_TRANS_DISABLE:
997 entry.perm = flag;
998 entry.addr_mask = ~TARGET_PAGE_MASK;
999 trace_smmuv3_translate_disable(mr->parent_obj.name, sid, addr,
1000 entry.perm);
1001 break;
1002 case SMMU_TRANS_BYPASS:
1003 entry.perm = flag;
1004 entry.addr_mask = ~TARGET_PAGE_MASK;
1005 trace_smmuv3_translate_bypass(mr->parent_obj.name, sid, addr,
1006 entry.perm);
1007 break;
1008 case SMMU_TRANS_ABORT:
1009 /* no event is recorded on abort */
1010 trace_smmuv3_translate_abort(mr->parent_obj.name, sid, addr,
1011 entry.perm);
1012 break;
1013 case SMMU_TRANS_ERROR:
1014 qemu_log_mask(LOG_GUEST_ERROR,
1015 "%s translation failed for iova=0x%"PRIx64" (%s)\n",
1016 mr->parent_obj.name, addr, smmu_event_string(event.type));
1017 smmuv3_record_event(s, &event);
1018 break;
1019 }
1020
1021 return entry;
1022 }
1023
1024 /**
1025 * smmuv3_notify_iova - call the notifier @n for a given
1026 * @asid and @iova tuple.
1027 *
1028 * @mr: IOMMU mr region handle
1029 * @n: notifier to be called
1030 * @asid: address space ID or negative value if we don't care
1031 * @vmid: virtual machine ID or negative value if we don't care
1032 * @iova: iova
1033 * @tg: translation granule (if communicated through range invalidation)
1034 * @num_pages: number of @granule sized pages (if tg != 0), otherwise 1
1035 */
smmuv3_notify_iova(IOMMUMemoryRegion * mr,IOMMUNotifier * n,int asid,int vmid,dma_addr_t iova,uint8_t tg,uint64_t num_pages)1036 static void smmuv3_notify_iova(IOMMUMemoryRegion *mr,
1037 IOMMUNotifier *n,
1038 int asid, int vmid,
1039 dma_addr_t iova, uint8_t tg,
1040 uint64_t num_pages)
1041 {
1042 SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
1043 IOMMUTLBEvent event;
1044 uint8_t granule;
1045 SMMUv3State *s = sdev->smmu;
1046
1047 if (!tg) {
1048 SMMUEventInfo eventinfo = {.inval_ste_allowed = true};
1049 SMMUTransCfg *cfg = smmuv3_get_config(sdev, &eventinfo);
1050 SMMUTransTableInfo *tt;
1051
1052 if (!cfg) {
1053 return;
1054 }
1055
1056 if (asid >= 0 && cfg->asid != asid) {
1057 return;
1058 }
1059
1060 if (vmid >= 0 && cfg->s2cfg.vmid != vmid) {
1061 return;
1062 }
1063
1064 if (STAGE1_SUPPORTED(s)) {
1065 tt = select_tt(cfg, iova);
1066 if (!tt) {
1067 return;
1068 }
1069 granule = tt->granule_sz;
1070 } else {
1071 granule = cfg->s2cfg.granule_sz;
1072 }
1073
1074 } else {
1075 granule = tg * 2 + 10;
1076 }
1077
1078 event.type = IOMMU_NOTIFIER_UNMAP;
1079 event.entry.target_as = &address_space_memory;
1080 event.entry.iova = iova;
1081 event.entry.addr_mask = num_pages * (1 << granule) - 1;
1082 event.entry.perm = IOMMU_NONE;
1083
1084 memory_region_notify_iommu_one(n, &event);
1085 }
1086
1087 /* invalidate an asid/vmid/iova range tuple in all mr's */
smmuv3_inv_notifiers_iova(SMMUState * s,int asid,int vmid,dma_addr_t iova,uint8_t tg,uint64_t num_pages)1088 static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, int vmid,
1089 dma_addr_t iova, uint8_t tg,
1090 uint64_t num_pages)
1091 {
1092 SMMUDevice *sdev;
1093
1094 QLIST_FOREACH(sdev, &s->devices_with_notifiers, next) {
1095 IOMMUMemoryRegion *mr = &sdev->iommu;
1096 IOMMUNotifier *n;
1097
1098 trace_smmuv3_inv_notifiers_iova(mr->parent_obj.name, asid, vmid,
1099 iova, tg, num_pages);
1100
1101 IOMMU_NOTIFIER_FOREACH(n, mr) {
1102 smmuv3_notify_iova(mr, n, asid, vmid, iova, tg, num_pages);
1103 }
1104 }
1105 }
1106
smmuv3_range_inval(SMMUState * s,Cmd * cmd)1107 static void smmuv3_range_inval(SMMUState *s, Cmd *cmd)
1108 {
1109 dma_addr_t end, addr = CMD_ADDR(cmd);
1110 uint8_t type = CMD_TYPE(cmd);
1111 int vmid = -1;
1112 uint8_t scale = CMD_SCALE(cmd);
1113 uint8_t num = CMD_NUM(cmd);
1114 uint8_t ttl = CMD_TTL(cmd);
1115 bool leaf = CMD_LEAF(cmd);
1116 uint8_t tg = CMD_TG(cmd);
1117 uint64_t num_pages;
1118 uint8_t granule;
1119 int asid = -1;
1120 SMMUv3State *smmuv3 = ARM_SMMUV3(s);
1121
1122 /* Only consider VMID if stage-2 is supported. */
1123 if (STAGE2_SUPPORTED(smmuv3)) {
1124 vmid = CMD_VMID(cmd);
1125 }
1126
1127 if (type == SMMU_CMD_TLBI_NH_VA) {
1128 asid = CMD_ASID(cmd);
1129 }
1130
1131 if (!tg) {
1132 trace_smmuv3_range_inval(vmid, asid, addr, tg, 1, ttl, leaf);
1133 smmuv3_inv_notifiers_iova(s, asid, vmid, addr, tg, 1);
1134 smmu_iotlb_inv_iova(s, asid, vmid, addr, tg, 1, ttl);
1135 return;
1136 }
1137
1138 /* RIL in use */
1139
1140 num_pages = (num + 1) * BIT_ULL(scale);
1141 granule = tg * 2 + 10;
1142
1143 /* Split invalidations into ^2 range invalidations */
1144 end = addr + (num_pages << granule) - 1;
1145
1146 while (addr != end + 1) {
1147 uint64_t mask = dma_aligned_pow2_mask(addr, end, 64);
1148
1149 num_pages = (mask + 1) >> granule;
1150 trace_smmuv3_range_inval(vmid, asid, addr, tg, num_pages, ttl, leaf);
1151 smmuv3_inv_notifiers_iova(s, asid, vmid, addr, tg, num_pages);
1152 smmu_iotlb_inv_iova(s, asid, vmid, addr, tg, num_pages, ttl);
1153 addr += mask + 1;
1154 }
1155 }
1156
1157 static gboolean
smmuv3_invalidate_ste(gpointer key,gpointer value,gpointer user_data)1158 smmuv3_invalidate_ste(gpointer key, gpointer value, gpointer user_data)
1159 {
1160 SMMUDevice *sdev = (SMMUDevice *)key;
1161 uint32_t sid = smmu_get_sid(sdev);
1162 SMMUSIDRange *sid_range = (SMMUSIDRange *)user_data;
1163
1164 if (sid < sid_range->start || sid > sid_range->end) {
1165 return false;
1166 }
1167 trace_smmuv3_config_cache_inv(sid);
1168 return true;
1169 }
1170
smmuv3_cmdq_consume(SMMUv3State * s)1171 static int smmuv3_cmdq_consume(SMMUv3State *s)
1172 {
1173 SMMUState *bs = ARM_SMMU(s);
1174 SMMUCmdError cmd_error = SMMU_CERROR_NONE;
1175 SMMUQueue *q = &s->cmdq;
1176 SMMUCommandType type = 0;
1177
1178 if (!smmuv3_cmdq_enabled(s)) {
1179 return 0;
1180 }
1181 /*
1182 * some commands depend on register values, typically CR0. In case those
1183 * register values change while handling the command, spec says it
1184 * is UNPREDICTABLE whether the command is interpreted under the new
1185 * or old value.
1186 */
1187
1188 while (!smmuv3_q_empty(q)) {
1189 uint32_t pending = s->gerror ^ s->gerrorn;
1190 Cmd cmd;
1191
1192 trace_smmuv3_cmdq_consume(Q_PROD(q), Q_CONS(q),
1193 Q_PROD_WRAP(q), Q_CONS_WRAP(q));
1194
1195 if (FIELD_EX32(pending, GERROR, CMDQ_ERR)) {
1196 break;
1197 }
1198
1199 if (queue_read(q, &cmd) != MEMTX_OK) {
1200 cmd_error = SMMU_CERROR_ABT;
1201 break;
1202 }
1203
1204 type = CMD_TYPE(&cmd);
1205
1206 trace_smmuv3_cmdq_opcode(smmu_cmd_string(type));
1207
1208 qemu_mutex_lock(&s->mutex);
1209 switch (type) {
1210 case SMMU_CMD_SYNC:
1211 if (CMD_SYNC_CS(&cmd) & CMD_SYNC_SIG_IRQ) {
1212 smmuv3_trigger_irq(s, SMMU_IRQ_CMD_SYNC, 0);
1213 }
1214 break;
1215 case SMMU_CMD_PREFETCH_CONFIG:
1216 case SMMU_CMD_PREFETCH_ADDR:
1217 break;
1218 case SMMU_CMD_CFGI_STE:
1219 {
1220 uint32_t sid = CMD_SID(&cmd);
1221 IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid);
1222 SMMUDevice *sdev;
1223
1224 if (CMD_SSEC(&cmd)) {
1225 cmd_error = SMMU_CERROR_ILL;
1226 break;
1227 }
1228
1229 if (!mr) {
1230 break;
1231 }
1232
1233 trace_smmuv3_cmdq_cfgi_ste(sid);
1234 sdev = container_of(mr, SMMUDevice, iommu);
1235 smmuv3_flush_config(sdev);
1236
1237 break;
1238 }
1239 case SMMU_CMD_CFGI_STE_RANGE: /* same as SMMU_CMD_CFGI_ALL */
1240 {
1241 uint32_t sid = CMD_SID(&cmd), mask;
1242 uint8_t range = CMD_STE_RANGE(&cmd);
1243 SMMUSIDRange sid_range;
1244
1245 if (CMD_SSEC(&cmd)) {
1246 cmd_error = SMMU_CERROR_ILL;
1247 break;
1248 }
1249
1250 mask = (1ULL << (range + 1)) - 1;
1251 sid_range.start = sid & ~mask;
1252 sid_range.end = sid_range.start + mask;
1253
1254 trace_smmuv3_cmdq_cfgi_ste_range(sid_range.start, sid_range.end);
1255 g_hash_table_foreach_remove(bs->configs, smmuv3_invalidate_ste,
1256 &sid_range);
1257 break;
1258 }
1259 case SMMU_CMD_CFGI_CD:
1260 case SMMU_CMD_CFGI_CD_ALL:
1261 {
1262 uint32_t sid = CMD_SID(&cmd);
1263 IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid);
1264 SMMUDevice *sdev;
1265
1266 if (CMD_SSEC(&cmd)) {
1267 cmd_error = SMMU_CERROR_ILL;
1268 break;
1269 }
1270
1271 if (!mr) {
1272 break;
1273 }
1274
1275 trace_smmuv3_cmdq_cfgi_cd(sid);
1276 sdev = container_of(mr, SMMUDevice, iommu);
1277 smmuv3_flush_config(sdev);
1278 break;
1279 }
1280 case SMMU_CMD_TLBI_NH_ASID:
1281 {
1282 uint16_t asid = CMD_ASID(&cmd);
1283
1284 if (!STAGE1_SUPPORTED(s)) {
1285 cmd_error = SMMU_CERROR_ILL;
1286 break;
1287 }
1288
1289 trace_smmuv3_cmdq_tlbi_nh_asid(asid);
1290 smmu_inv_notifiers_all(&s->smmu_state);
1291 smmu_iotlb_inv_asid(bs, asid);
1292 break;
1293 }
1294 case SMMU_CMD_TLBI_NH_ALL:
1295 if (!STAGE1_SUPPORTED(s)) {
1296 cmd_error = SMMU_CERROR_ILL;
1297 break;
1298 }
1299 QEMU_FALLTHROUGH;
1300 case SMMU_CMD_TLBI_NSNH_ALL:
1301 trace_smmuv3_cmdq_tlbi_nh();
1302 smmu_inv_notifiers_all(&s->smmu_state);
1303 smmu_iotlb_inv_all(bs);
1304 break;
1305 case SMMU_CMD_TLBI_NH_VAA:
1306 case SMMU_CMD_TLBI_NH_VA:
1307 if (!STAGE1_SUPPORTED(s)) {
1308 cmd_error = SMMU_CERROR_ILL;
1309 break;
1310 }
1311 smmuv3_range_inval(bs, &cmd);
1312 break;
1313 case SMMU_CMD_TLBI_S12_VMALL:
1314 {
1315 uint16_t vmid = CMD_VMID(&cmd);
1316
1317 if (!STAGE2_SUPPORTED(s)) {
1318 cmd_error = SMMU_CERROR_ILL;
1319 break;
1320 }
1321
1322 trace_smmuv3_cmdq_tlbi_s12_vmid(vmid);
1323 smmu_inv_notifiers_all(&s->smmu_state);
1324 smmu_iotlb_inv_vmid(bs, vmid);
1325 break;
1326 }
1327 case SMMU_CMD_TLBI_S2_IPA:
1328 if (!STAGE2_SUPPORTED(s)) {
1329 cmd_error = SMMU_CERROR_ILL;
1330 break;
1331 }
1332 /*
1333 * As currently only either s1 or s2 are supported
1334 * we can reuse same function for s2.
1335 */
1336 smmuv3_range_inval(bs, &cmd);
1337 break;
1338 case SMMU_CMD_TLBI_EL3_ALL:
1339 case SMMU_CMD_TLBI_EL3_VA:
1340 case SMMU_CMD_TLBI_EL2_ALL:
1341 case SMMU_CMD_TLBI_EL2_ASID:
1342 case SMMU_CMD_TLBI_EL2_VA:
1343 case SMMU_CMD_TLBI_EL2_VAA:
1344 case SMMU_CMD_ATC_INV:
1345 case SMMU_CMD_PRI_RESP:
1346 case SMMU_CMD_RESUME:
1347 case SMMU_CMD_STALL_TERM:
1348 trace_smmuv3_unhandled_cmd(type);
1349 break;
1350 default:
1351 cmd_error = SMMU_CERROR_ILL;
1352 break;
1353 }
1354 qemu_mutex_unlock(&s->mutex);
1355 if (cmd_error) {
1356 if (cmd_error == SMMU_CERROR_ILL) {
1357 qemu_log_mask(LOG_GUEST_ERROR,
1358 "Illegal command type: %d\n", CMD_TYPE(&cmd));
1359 }
1360 break;
1361 }
1362 /*
1363 * We only increment the cons index after the completion of
1364 * the command. We do that because the SYNC returns immediately
1365 * and does not check the completion of previous commands
1366 */
1367 queue_cons_incr(q);
1368 }
1369
1370 if (cmd_error) {
1371 trace_smmuv3_cmdq_consume_error(smmu_cmd_string(type), cmd_error);
1372 smmu_write_cmdq_err(s, cmd_error);
1373 smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_CMDQ_ERR_MASK);
1374 }
1375
1376 trace_smmuv3_cmdq_consume_out(Q_PROD(q), Q_CONS(q),
1377 Q_PROD_WRAP(q), Q_CONS_WRAP(q));
1378
1379 return 0;
1380 }
1381
smmu_writell(SMMUv3State * s,hwaddr offset,uint64_t data,MemTxAttrs attrs)1382 static MemTxResult smmu_writell(SMMUv3State *s, hwaddr offset,
1383 uint64_t data, MemTxAttrs attrs)
1384 {
1385 switch (offset) {
1386 case A_GERROR_IRQ_CFG0:
1387 s->gerror_irq_cfg0 = data;
1388 return MEMTX_OK;
1389 case A_STRTAB_BASE:
1390 s->strtab_base = data;
1391 return MEMTX_OK;
1392 case A_CMDQ_BASE:
1393 s->cmdq.base = data;
1394 s->cmdq.log2size = extract64(s->cmdq.base, 0, 5);
1395 if (s->cmdq.log2size > SMMU_CMDQS) {
1396 s->cmdq.log2size = SMMU_CMDQS;
1397 }
1398 return MEMTX_OK;
1399 case A_EVENTQ_BASE:
1400 s->eventq.base = data;
1401 s->eventq.log2size = extract64(s->eventq.base, 0, 5);
1402 if (s->eventq.log2size > SMMU_EVENTQS) {
1403 s->eventq.log2size = SMMU_EVENTQS;
1404 }
1405 return MEMTX_OK;
1406 case A_EVENTQ_IRQ_CFG0:
1407 s->eventq_irq_cfg0 = data;
1408 return MEMTX_OK;
1409 default:
1410 qemu_log_mask(LOG_UNIMP,
1411 "%s Unexpected 64-bit access to 0x%"PRIx64" (WI)\n",
1412 __func__, offset);
1413 return MEMTX_OK;
1414 }
1415 }
1416
smmu_writel(SMMUv3State * s,hwaddr offset,uint64_t data,MemTxAttrs attrs)1417 static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset,
1418 uint64_t data, MemTxAttrs attrs)
1419 {
1420 switch (offset) {
1421 case A_CR0:
1422 s->cr[0] = data;
1423 s->cr0ack = data & ~SMMU_CR0_RESERVED;
1424 /* in case the command queue has been enabled */
1425 smmuv3_cmdq_consume(s);
1426 return MEMTX_OK;
1427 case A_CR1:
1428 s->cr[1] = data;
1429 return MEMTX_OK;
1430 case A_CR2:
1431 s->cr[2] = data;
1432 return MEMTX_OK;
1433 case A_IRQ_CTRL:
1434 s->irq_ctrl = data;
1435 return MEMTX_OK;
1436 case A_GERRORN:
1437 smmuv3_write_gerrorn(s, data);
1438 /*
1439 * By acknowledging the CMDQ_ERR, SW may notify cmds can
1440 * be processed again
1441 */
1442 smmuv3_cmdq_consume(s);
1443 return MEMTX_OK;
1444 case A_GERROR_IRQ_CFG0: /* 64b */
1445 s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 0, 32, data);
1446 return MEMTX_OK;
1447 case A_GERROR_IRQ_CFG0 + 4:
1448 s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 32, 32, data);
1449 return MEMTX_OK;
1450 case A_GERROR_IRQ_CFG1:
1451 s->gerror_irq_cfg1 = data;
1452 return MEMTX_OK;
1453 case A_GERROR_IRQ_CFG2:
1454 s->gerror_irq_cfg2 = data;
1455 return MEMTX_OK;
1456 case A_GBPA:
1457 /*
1458 * If UPDATE is not set, the write is ignored. This is the only
1459 * permitted behavior in SMMUv3.2 and later.
1460 */
1461 if (data & R_GBPA_UPDATE_MASK) {
1462 /* Ignore update bit as write is synchronous. */
1463 s->gbpa = data & ~R_GBPA_UPDATE_MASK;
1464 }
1465 return MEMTX_OK;
1466 case A_STRTAB_BASE: /* 64b */
1467 s->strtab_base = deposit64(s->strtab_base, 0, 32, data);
1468 return MEMTX_OK;
1469 case A_STRTAB_BASE + 4:
1470 s->strtab_base = deposit64(s->strtab_base, 32, 32, data);
1471 return MEMTX_OK;
1472 case A_STRTAB_BASE_CFG:
1473 s->strtab_base_cfg = data;
1474 if (FIELD_EX32(data, STRTAB_BASE_CFG, FMT) == 1) {
1475 s->sid_split = FIELD_EX32(data, STRTAB_BASE_CFG, SPLIT);
1476 s->features |= SMMU_FEATURE_2LVL_STE;
1477 }
1478 return MEMTX_OK;
1479 case A_CMDQ_BASE: /* 64b */
1480 s->cmdq.base = deposit64(s->cmdq.base, 0, 32, data);
1481 s->cmdq.log2size = extract64(s->cmdq.base, 0, 5);
1482 if (s->cmdq.log2size > SMMU_CMDQS) {
1483 s->cmdq.log2size = SMMU_CMDQS;
1484 }
1485 return MEMTX_OK;
1486 case A_CMDQ_BASE + 4: /* 64b */
1487 s->cmdq.base = deposit64(s->cmdq.base, 32, 32, data);
1488 return MEMTX_OK;
1489 case A_CMDQ_PROD:
1490 s->cmdq.prod = data;
1491 smmuv3_cmdq_consume(s);
1492 return MEMTX_OK;
1493 case A_CMDQ_CONS:
1494 s->cmdq.cons = data;
1495 return MEMTX_OK;
1496 case A_EVENTQ_BASE: /* 64b */
1497 s->eventq.base = deposit64(s->eventq.base, 0, 32, data);
1498 s->eventq.log2size = extract64(s->eventq.base, 0, 5);
1499 if (s->eventq.log2size > SMMU_EVENTQS) {
1500 s->eventq.log2size = SMMU_EVENTQS;
1501 }
1502 return MEMTX_OK;
1503 case A_EVENTQ_BASE + 4:
1504 s->eventq.base = deposit64(s->eventq.base, 32, 32, data);
1505 return MEMTX_OK;
1506 case A_EVENTQ_PROD:
1507 s->eventq.prod = data;
1508 return MEMTX_OK;
1509 case A_EVENTQ_CONS:
1510 s->eventq.cons = data;
1511 return MEMTX_OK;
1512 case A_EVENTQ_IRQ_CFG0: /* 64b */
1513 s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 0, 32, data);
1514 return MEMTX_OK;
1515 case A_EVENTQ_IRQ_CFG0 + 4:
1516 s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 32, 32, data);
1517 return MEMTX_OK;
1518 case A_EVENTQ_IRQ_CFG1:
1519 s->eventq_irq_cfg1 = data;
1520 return MEMTX_OK;
1521 case A_EVENTQ_IRQ_CFG2:
1522 s->eventq_irq_cfg2 = data;
1523 return MEMTX_OK;
1524 default:
1525 qemu_log_mask(LOG_UNIMP,
1526 "%s Unexpected 32-bit access to 0x%"PRIx64" (WI)\n",
1527 __func__, offset);
1528 return MEMTX_OK;
1529 }
1530 }
1531
smmu_write_mmio(void * opaque,hwaddr offset,uint64_t data,unsigned size,MemTxAttrs attrs)1532 static MemTxResult smmu_write_mmio(void *opaque, hwaddr offset, uint64_t data,
1533 unsigned size, MemTxAttrs attrs)
1534 {
1535 SMMUState *sys = opaque;
1536 SMMUv3State *s = ARM_SMMUV3(sys);
1537 MemTxResult r;
1538
1539 /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
1540 offset &= ~0x10000;
1541
1542 switch (size) {
1543 case 8:
1544 r = smmu_writell(s, offset, data, attrs);
1545 break;
1546 case 4:
1547 r = smmu_writel(s, offset, data, attrs);
1548 break;
1549 default:
1550 r = MEMTX_ERROR;
1551 break;
1552 }
1553
1554 trace_smmuv3_write_mmio(offset, data, size, r);
1555 return r;
1556 }
1557
smmu_readll(SMMUv3State * s,hwaddr offset,uint64_t * data,MemTxAttrs attrs)1558 static MemTxResult smmu_readll(SMMUv3State *s, hwaddr offset,
1559 uint64_t *data, MemTxAttrs attrs)
1560 {
1561 switch (offset) {
1562 case A_GERROR_IRQ_CFG0:
1563 *data = s->gerror_irq_cfg0;
1564 return MEMTX_OK;
1565 case A_STRTAB_BASE:
1566 *data = s->strtab_base;
1567 return MEMTX_OK;
1568 case A_CMDQ_BASE:
1569 *data = s->cmdq.base;
1570 return MEMTX_OK;
1571 case A_EVENTQ_BASE:
1572 *data = s->eventq.base;
1573 return MEMTX_OK;
1574 default:
1575 *data = 0;
1576 qemu_log_mask(LOG_UNIMP,
1577 "%s Unexpected 64-bit access to 0x%"PRIx64" (RAZ)\n",
1578 __func__, offset);
1579 return MEMTX_OK;
1580 }
1581 }
1582
smmu_readl(SMMUv3State * s,hwaddr offset,uint64_t * data,MemTxAttrs attrs)1583 static MemTxResult smmu_readl(SMMUv3State *s, hwaddr offset,
1584 uint64_t *data, MemTxAttrs attrs)
1585 {
1586 switch (offset) {
1587 case A_IDREGS ... A_IDREGS + 0x2f:
1588 *data = smmuv3_idreg(offset - A_IDREGS);
1589 return MEMTX_OK;
1590 case A_IDR0 ... A_IDR5:
1591 *data = s->idr[(offset - A_IDR0) / 4];
1592 return MEMTX_OK;
1593 case A_IIDR:
1594 *data = s->iidr;
1595 return MEMTX_OK;
1596 case A_AIDR:
1597 *data = s->aidr;
1598 return MEMTX_OK;
1599 case A_CR0:
1600 *data = s->cr[0];
1601 return MEMTX_OK;
1602 case A_CR0ACK:
1603 *data = s->cr0ack;
1604 return MEMTX_OK;
1605 case A_CR1:
1606 *data = s->cr[1];
1607 return MEMTX_OK;
1608 case A_CR2:
1609 *data = s->cr[2];
1610 return MEMTX_OK;
1611 case A_STATUSR:
1612 *data = s->statusr;
1613 return MEMTX_OK;
1614 case A_GBPA:
1615 *data = s->gbpa;
1616 return MEMTX_OK;
1617 case A_IRQ_CTRL:
1618 case A_IRQ_CTRL_ACK:
1619 *data = s->irq_ctrl;
1620 return MEMTX_OK;
1621 case A_GERROR:
1622 *data = s->gerror;
1623 return MEMTX_OK;
1624 case A_GERRORN:
1625 *data = s->gerrorn;
1626 return MEMTX_OK;
1627 case A_GERROR_IRQ_CFG0: /* 64b */
1628 *data = extract64(s->gerror_irq_cfg0, 0, 32);
1629 return MEMTX_OK;
1630 case A_GERROR_IRQ_CFG0 + 4:
1631 *data = extract64(s->gerror_irq_cfg0, 32, 32);
1632 return MEMTX_OK;
1633 case A_GERROR_IRQ_CFG1:
1634 *data = s->gerror_irq_cfg1;
1635 return MEMTX_OK;
1636 case A_GERROR_IRQ_CFG2:
1637 *data = s->gerror_irq_cfg2;
1638 return MEMTX_OK;
1639 case A_STRTAB_BASE: /* 64b */
1640 *data = extract64(s->strtab_base, 0, 32);
1641 return MEMTX_OK;
1642 case A_STRTAB_BASE + 4: /* 64b */
1643 *data = extract64(s->strtab_base, 32, 32);
1644 return MEMTX_OK;
1645 case A_STRTAB_BASE_CFG:
1646 *data = s->strtab_base_cfg;
1647 return MEMTX_OK;
1648 case A_CMDQ_BASE: /* 64b */
1649 *data = extract64(s->cmdq.base, 0, 32);
1650 return MEMTX_OK;
1651 case A_CMDQ_BASE + 4:
1652 *data = extract64(s->cmdq.base, 32, 32);
1653 return MEMTX_OK;
1654 case A_CMDQ_PROD:
1655 *data = s->cmdq.prod;
1656 return MEMTX_OK;
1657 case A_CMDQ_CONS:
1658 *data = s->cmdq.cons;
1659 return MEMTX_OK;
1660 case A_EVENTQ_BASE: /* 64b */
1661 *data = extract64(s->eventq.base, 0, 32);
1662 return MEMTX_OK;
1663 case A_EVENTQ_BASE + 4: /* 64b */
1664 *data = extract64(s->eventq.base, 32, 32);
1665 return MEMTX_OK;
1666 case A_EVENTQ_PROD:
1667 *data = s->eventq.prod;
1668 return MEMTX_OK;
1669 case A_EVENTQ_CONS:
1670 *data = s->eventq.cons;
1671 return MEMTX_OK;
1672 default:
1673 *data = 0;
1674 qemu_log_mask(LOG_UNIMP,
1675 "%s unhandled 32-bit access at 0x%"PRIx64" (RAZ)\n",
1676 __func__, offset);
1677 return MEMTX_OK;
1678 }
1679 }
1680
smmu_read_mmio(void * opaque,hwaddr offset,uint64_t * data,unsigned size,MemTxAttrs attrs)1681 static MemTxResult smmu_read_mmio(void *opaque, hwaddr offset, uint64_t *data,
1682 unsigned size, MemTxAttrs attrs)
1683 {
1684 SMMUState *sys = opaque;
1685 SMMUv3State *s = ARM_SMMUV3(sys);
1686 MemTxResult r;
1687
1688 /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
1689 offset &= ~0x10000;
1690
1691 switch (size) {
1692 case 8:
1693 r = smmu_readll(s, offset, data, attrs);
1694 break;
1695 case 4:
1696 r = smmu_readl(s, offset, data, attrs);
1697 break;
1698 default:
1699 r = MEMTX_ERROR;
1700 break;
1701 }
1702
1703 trace_smmuv3_read_mmio(offset, *data, size, r);
1704 return r;
1705 }
1706
1707 static const MemoryRegionOps smmu_mem_ops = {
1708 .read_with_attrs = smmu_read_mmio,
1709 .write_with_attrs = smmu_write_mmio,
1710 .endianness = DEVICE_LITTLE_ENDIAN,
1711 .valid = {
1712 .min_access_size = 4,
1713 .max_access_size = 8,
1714 },
1715 .impl = {
1716 .min_access_size = 4,
1717 .max_access_size = 8,
1718 },
1719 };
1720
smmu_init_irq(SMMUv3State * s,SysBusDevice * dev)1721 static void smmu_init_irq(SMMUv3State *s, SysBusDevice *dev)
1722 {
1723 int i;
1724
1725 for (i = 0; i < ARRAY_SIZE(s->irq); i++) {
1726 sysbus_init_irq(dev, &s->irq[i]);
1727 }
1728 }
1729
smmu_reset_hold(Object * obj,ResetType type)1730 static void smmu_reset_hold(Object *obj, ResetType type)
1731 {
1732 SMMUv3State *s = ARM_SMMUV3(obj);
1733 SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s);
1734
1735 if (c->parent_phases.hold) {
1736 c->parent_phases.hold(obj, type);
1737 }
1738
1739 smmuv3_init_regs(s);
1740 }
1741
smmu_realize(DeviceState * d,Error ** errp)1742 static void smmu_realize(DeviceState *d, Error **errp)
1743 {
1744 SMMUState *sys = ARM_SMMU(d);
1745 SMMUv3State *s = ARM_SMMUV3(sys);
1746 SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s);
1747 SysBusDevice *dev = SYS_BUS_DEVICE(d);
1748 Error *local_err = NULL;
1749
1750 c->parent_realize(d, &local_err);
1751 if (local_err) {
1752 error_propagate(errp, local_err);
1753 return;
1754 }
1755
1756 qemu_mutex_init(&s->mutex);
1757
1758 memory_region_init_io(&sys->iomem, OBJECT(s),
1759 &smmu_mem_ops, sys, TYPE_ARM_SMMUV3, 0x20000);
1760
1761 sys->mrtypename = TYPE_SMMUV3_IOMMU_MEMORY_REGION;
1762
1763 sysbus_init_mmio(dev, &sys->iomem);
1764
1765 smmu_init_irq(s, dev);
1766 }
1767
1768 static const VMStateDescription vmstate_smmuv3_queue = {
1769 .name = "smmuv3_queue",
1770 .version_id = 1,
1771 .minimum_version_id = 1,
1772 .fields = (const VMStateField[]) {
1773 VMSTATE_UINT64(base, SMMUQueue),
1774 VMSTATE_UINT32(prod, SMMUQueue),
1775 VMSTATE_UINT32(cons, SMMUQueue),
1776 VMSTATE_UINT8(log2size, SMMUQueue),
1777 VMSTATE_END_OF_LIST(),
1778 },
1779 };
1780
smmuv3_gbpa_needed(void * opaque)1781 static bool smmuv3_gbpa_needed(void *opaque)
1782 {
1783 SMMUv3State *s = opaque;
1784
1785 /* Only migrate GBPA if it has different reset value. */
1786 return s->gbpa != SMMU_GBPA_RESET_VAL;
1787 }
1788
1789 static const VMStateDescription vmstate_gbpa = {
1790 .name = "smmuv3/gbpa",
1791 .version_id = 1,
1792 .minimum_version_id = 1,
1793 .needed = smmuv3_gbpa_needed,
1794 .fields = (const VMStateField[]) {
1795 VMSTATE_UINT32(gbpa, SMMUv3State),
1796 VMSTATE_END_OF_LIST()
1797 }
1798 };
1799
1800 static const VMStateDescription vmstate_smmuv3 = {
1801 .name = "smmuv3",
1802 .version_id = 1,
1803 .minimum_version_id = 1,
1804 .priority = MIG_PRI_IOMMU,
1805 .fields = (const VMStateField[]) {
1806 VMSTATE_UINT32(features, SMMUv3State),
1807 VMSTATE_UINT8(sid_size, SMMUv3State),
1808 VMSTATE_UINT8(sid_split, SMMUv3State),
1809
1810 VMSTATE_UINT32_ARRAY(cr, SMMUv3State, 3),
1811 VMSTATE_UINT32(cr0ack, SMMUv3State),
1812 VMSTATE_UINT32(statusr, SMMUv3State),
1813 VMSTATE_UINT32(irq_ctrl, SMMUv3State),
1814 VMSTATE_UINT32(gerror, SMMUv3State),
1815 VMSTATE_UINT32(gerrorn, SMMUv3State),
1816 VMSTATE_UINT64(gerror_irq_cfg0, SMMUv3State),
1817 VMSTATE_UINT32(gerror_irq_cfg1, SMMUv3State),
1818 VMSTATE_UINT32(gerror_irq_cfg2, SMMUv3State),
1819 VMSTATE_UINT64(strtab_base, SMMUv3State),
1820 VMSTATE_UINT32(strtab_base_cfg, SMMUv3State),
1821 VMSTATE_UINT64(eventq_irq_cfg0, SMMUv3State),
1822 VMSTATE_UINT32(eventq_irq_cfg1, SMMUv3State),
1823 VMSTATE_UINT32(eventq_irq_cfg2, SMMUv3State),
1824
1825 VMSTATE_STRUCT(cmdq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue),
1826 VMSTATE_STRUCT(eventq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue),
1827
1828 VMSTATE_END_OF_LIST(),
1829 },
1830 .subsections = (const VMStateDescription * const []) {
1831 &vmstate_gbpa,
1832 NULL
1833 }
1834 };
1835
1836 static Property smmuv3_properties[] = {
1837 /*
1838 * Stages of translation advertised.
1839 * "1": Stage 1
1840 * "2": Stage 2
1841 * Defaults to stage 1
1842 */
1843 DEFINE_PROP_STRING("stage", SMMUv3State, stage),
1844 DEFINE_PROP_END_OF_LIST()
1845 };
1846
smmuv3_instance_init(Object * obj)1847 static void smmuv3_instance_init(Object *obj)
1848 {
1849 /* Nothing much to do here as of now */
1850 }
1851
smmuv3_class_init(ObjectClass * klass,void * data)1852 static void smmuv3_class_init(ObjectClass *klass, void *data)
1853 {
1854 DeviceClass *dc = DEVICE_CLASS(klass);
1855 ResettableClass *rc = RESETTABLE_CLASS(klass);
1856 SMMUv3Class *c = ARM_SMMUV3_CLASS(klass);
1857
1858 dc->vmsd = &vmstate_smmuv3;
1859 resettable_class_set_parent_phases(rc, NULL, smmu_reset_hold, NULL,
1860 &c->parent_phases);
1861 device_class_set_parent_realize(dc, smmu_realize,
1862 &c->parent_realize);
1863 device_class_set_props(dc, smmuv3_properties);
1864 }
1865
smmuv3_notify_flag_changed(IOMMUMemoryRegion * iommu,IOMMUNotifierFlag old,IOMMUNotifierFlag new,Error ** errp)1866 static int smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu,
1867 IOMMUNotifierFlag old,
1868 IOMMUNotifierFlag new,
1869 Error **errp)
1870 {
1871 SMMUDevice *sdev = container_of(iommu, SMMUDevice, iommu);
1872 SMMUv3State *s3 = sdev->smmu;
1873 SMMUState *s = &(s3->smmu_state);
1874
1875 if (new & IOMMU_NOTIFIER_DEVIOTLB_UNMAP) {
1876 error_setg(errp, "SMMUv3 does not support dev-iotlb yet");
1877 return -EINVAL;
1878 }
1879
1880 if (new & IOMMU_NOTIFIER_MAP) {
1881 error_setg(errp,
1882 "device %02x.%02x.%x requires iommu MAP notifier which is "
1883 "not currently supported", pci_bus_num(sdev->bus),
1884 PCI_SLOT(sdev->devfn), PCI_FUNC(sdev->devfn));
1885 return -EINVAL;
1886 }
1887
1888 if (old == IOMMU_NOTIFIER_NONE) {
1889 trace_smmuv3_notify_flag_add(iommu->parent_obj.name);
1890 QLIST_INSERT_HEAD(&s->devices_with_notifiers, sdev, next);
1891 } else if (new == IOMMU_NOTIFIER_NONE) {
1892 trace_smmuv3_notify_flag_del(iommu->parent_obj.name);
1893 QLIST_REMOVE(sdev, next);
1894 }
1895 return 0;
1896 }
1897
smmuv3_iommu_memory_region_class_init(ObjectClass * klass,void * data)1898 static void smmuv3_iommu_memory_region_class_init(ObjectClass *klass,
1899 void *data)
1900 {
1901 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
1902
1903 imrc->translate = smmuv3_translate;
1904 imrc->notify_flag_changed = smmuv3_notify_flag_changed;
1905 }
1906
1907 static const TypeInfo smmuv3_type_info = {
1908 .name = TYPE_ARM_SMMUV3,
1909 .parent = TYPE_ARM_SMMU,
1910 .instance_size = sizeof(SMMUv3State),
1911 .instance_init = smmuv3_instance_init,
1912 .class_size = sizeof(SMMUv3Class),
1913 .class_init = smmuv3_class_init,
1914 };
1915
1916 static const TypeInfo smmuv3_iommu_memory_region_info = {
1917 .parent = TYPE_IOMMU_MEMORY_REGION,
1918 .name = TYPE_SMMUV3_IOMMU_MEMORY_REGION,
1919 .class_init = smmuv3_iommu_memory_region_class_init,
1920 };
1921
smmuv3_register_types(void)1922 static void smmuv3_register_types(void)
1923 {
1924 type_register(&smmuv3_type_info);
1925 type_register(&smmuv3_iommu_memory_region_info);
1926 }
1927
1928 type_init(smmuv3_register_types)
1929
1930