xref: /qemu/hw/arm/smmu-common.c (revision c7b64948)
1 /*
2  * Copyright (C) 2014-2016 Broadcom Corporation
3  * Copyright (c) 2017 Red Hat, Inc.
4  * Written by Prem Mallappa, Eric Auger
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * Author: Prem Mallappa <pmallapp@broadcom.com>
16  *
17  */
18 
19 #include "qemu/osdep.h"
20 #include "trace.h"
21 #include "exec/target_page.h"
22 #include "hw/core/cpu.h"
23 #include "hw/qdev-properties.h"
24 #include "qapi/error.h"
25 #include "qemu/jhash.h"
26 #include "qemu/module.h"
27 
28 #include "qemu/error-report.h"
29 #include "hw/arm/smmu-common.h"
30 #include "smmu-internal.h"
31 
32 /* IOTLB Management */
33 
34 static guint smmu_iotlb_key_hash(gconstpointer v)
35 {
36     SMMUIOTLBKey *key = (SMMUIOTLBKey *)v;
37     uint32_t a, b, c;
38 
39     /* Jenkins hash */
40     a = b = c = JHASH_INITVAL + sizeof(*key);
41     a += key->asid + key->vmid + key->level + key->tg;
42     b += extract64(key->iova, 0, 32);
43     c += extract64(key->iova, 32, 32);
44 
45     __jhash_mix(a, b, c);
46     __jhash_final(a, b, c);
47 
48     return c;
49 }
50 
51 static gboolean smmu_iotlb_key_equal(gconstpointer v1, gconstpointer v2)
52 {
53     SMMUIOTLBKey *k1 = (SMMUIOTLBKey *)v1, *k2 = (SMMUIOTLBKey *)v2;
54 
55     return (k1->asid == k2->asid) && (k1->iova == k2->iova) &&
56            (k1->level == k2->level) && (k1->tg == k2->tg) &&
57            (k1->vmid == k2->vmid);
58 }
59 
60 SMMUIOTLBKey smmu_get_iotlb_key(uint16_t asid, uint16_t vmid, uint64_t iova,
61                                 uint8_t tg, uint8_t level)
62 {
63     SMMUIOTLBKey key = {.asid = asid, .vmid = vmid, .iova = iova,
64                         .tg = tg, .level = level};
65 
66     return key;
67 }
68 
69 SMMUTLBEntry *smmu_iotlb_lookup(SMMUState *bs, SMMUTransCfg *cfg,
70                                 SMMUTransTableInfo *tt, hwaddr iova)
71 {
72     uint8_t tg = (tt->granule_sz - 10) / 2;
73     uint8_t inputsize = 64 - tt->tsz;
74     uint8_t stride = tt->granule_sz - 3;
75     uint8_t level = 4 - (inputsize - 4) / stride;
76     SMMUTLBEntry *entry = NULL;
77 
78     while (level <= 3) {
79         uint64_t subpage_size = 1ULL << level_shift(level, tt->granule_sz);
80         uint64_t mask = subpage_size - 1;
81         SMMUIOTLBKey key;
82 
83         key = smmu_get_iotlb_key(cfg->asid, cfg->s2cfg.vmid,
84                                  iova & ~mask, tg, level);
85         entry = g_hash_table_lookup(bs->iotlb, &key);
86         if (entry) {
87             break;
88         }
89         level++;
90     }
91 
92     if (entry) {
93         cfg->iotlb_hits++;
94         trace_smmu_iotlb_lookup_hit(cfg->asid, cfg->s2cfg.vmid, iova,
95                                     cfg->iotlb_hits, cfg->iotlb_misses,
96                                     100 * cfg->iotlb_hits /
97                                     (cfg->iotlb_hits + cfg->iotlb_misses));
98     } else {
99         cfg->iotlb_misses++;
100         trace_smmu_iotlb_lookup_miss(cfg->asid, cfg->s2cfg.vmid, iova,
101                                      cfg->iotlb_hits, cfg->iotlb_misses,
102                                      100 * cfg->iotlb_hits /
103                                      (cfg->iotlb_hits + cfg->iotlb_misses));
104     }
105     return entry;
106 }
107 
108 void smmu_iotlb_insert(SMMUState *bs, SMMUTransCfg *cfg, SMMUTLBEntry *new)
109 {
110     SMMUIOTLBKey *key = g_new0(SMMUIOTLBKey, 1);
111     uint8_t tg = (new->granule - 10) / 2;
112 
113     if (g_hash_table_size(bs->iotlb) >= SMMU_IOTLB_MAX_SIZE) {
114         smmu_iotlb_inv_all(bs);
115     }
116 
117     *key = smmu_get_iotlb_key(cfg->asid, cfg->s2cfg.vmid, new->entry.iova,
118                               tg, new->level);
119     trace_smmu_iotlb_insert(cfg->asid, cfg->s2cfg.vmid, new->entry.iova,
120                             tg, new->level);
121     g_hash_table_insert(bs->iotlb, key, new);
122 }
123 
124 void smmu_iotlb_inv_all(SMMUState *s)
125 {
126     trace_smmu_iotlb_inv_all();
127     g_hash_table_remove_all(s->iotlb);
128 }
129 
130 static gboolean smmu_hash_remove_by_asid(gpointer key, gpointer value,
131                                          gpointer user_data)
132 {
133     uint16_t asid = *(uint16_t *)user_data;
134     SMMUIOTLBKey *iotlb_key = (SMMUIOTLBKey *)key;
135 
136     return SMMU_IOTLB_ASID(*iotlb_key) == asid;
137 }
138 
139 static gboolean smmu_hash_remove_by_vmid(gpointer key, gpointer value,
140                                          gpointer user_data)
141 {
142     uint16_t vmid = *(uint16_t *)user_data;
143     SMMUIOTLBKey *iotlb_key = (SMMUIOTLBKey *)key;
144 
145     return SMMU_IOTLB_VMID(*iotlb_key) == vmid;
146 }
147 
148 static gboolean smmu_hash_remove_by_asid_vmid_iova(gpointer key, gpointer value,
149                                               gpointer user_data)
150 {
151     SMMUTLBEntry *iter = (SMMUTLBEntry *)value;
152     IOMMUTLBEntry *entry = &iter->entry;
153     SMMUIOTLBPageInvInfo *info = (SMMUIOTLBPageInvInfo *)user_data;
154     SMMUIOTLBKey iotlb_key = *(SMMUIOTLBKey *)key;
155 
156     if (info->asid >= 0 && info->asid != SMMU_IOTLB_ASID(iotlb_key)) {
157         return false;
158     }
159     if (info->vmid >= 0 && info->vmid != SMMU_IOTLB_VMID(iotlb_key)) {
160         return false;
161     }
162     return ((info->iova & ~entry->addr_mask) == entry->iova) ||
163            ((entry->iova & ~info->mask) == info->iova);
164 }
165 
166 void smmu_iotlb_inv_iova(SMMUState *s, int asid, int vmid, dma_addr_t iova,
167                          uint8_t tg, uint64_t num_pages, uint8_t ttl)
168 {
169     /* if tg is not set we use 4KB range invalidation */
170     uint8_t granule = tg ? tg * 2 + 10 : 12;
171 
172     if (ttl && (num_pages == 1) && (asid >= 0)) {
173         SMMUIOTLBKey key = smmu_get_iotlb_key(asid, vmid, iova, tg, ttl);
174 
175         if (g_hash_table_remove(s->iotlb, &key)) {
176             return;
177         }
178         /*
179          * if the entry is not found, let's see if it does not
180          * belong to a larger IOTLB entry
181          */
182     }
183 
184     SMMUIOTLBPageInvInfo info = {
185         .asid = asid, .iova = iova,
186         .vmid = vmid,
187         .mask = (num_pages * 1 << granule) - 1};
188 
189     g_hash_table_foreach_remove(s->iotlb,
190                                 smmu_hash_remove_by_asid_vmid_iova,
191                                 &info);
192 }
193 
194 void smmu_iotlb_inv_asid(SMMUState *s, uint16_t asid)
195 {
196     trace_smmu_iotlb_inv_asid(asid);
197     g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_asid, &asid);
198 }
199 
200 inline void smmu_iotlb_inv_vmid(SMMUState *s, uint16_t vmid)
201 {
202     trace_smmu_iotlb_inv_vmid(vmid);
203     g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_vmid, &vmid);
204 }
205 
206 /* VMSAv8-64 Translation */
207 
208 /**
209  * get_pte - Get the content of a page table entry located at
210  * @base_addr[@index]
211  */
212 static int get_pte(dma_addr_t baseaddr, uint32_t index, uint64_t *pte,
213                    SMMUPTWEventInfo *info)
214 {
215     int ret;
216     dma_addr_t addr = baseaddr + index * sizeof(*pte);
217 
218     /* TODO: guarantee 64-bit single-copy atomicity */
219     ret = dma_memory_read(&address_space_memory, addr, pte, sizeof(*pte),
220                           MEMTXATTRS_UNSPECIFIED);
221 
222     if (ret != MEMTX_OK) {
223         info->type = SMMU_PTW_ERR_WALK_EABT;
224         info->addr = addr;
225         return -EINVAL;
226     }
227     trace_smmu_get_pte(baseaddr, index, addr, *pte);
228     return 0;
229 }
230 
231 /* VMSAv8-64 Translation Table Format Descriptor Decoding */
232 
233 /**
234  * get_page_pte_address - returns the L3 descriptor output address,
235  * ie. the page frame
236  * ARM ARM spec: Figure D4-17 VMSAv8-64 level 3 descriptor format
237  */
238 static inline hwaddr get_page_pte_address(uint64_t pte, int granule_sz)
239 {
240     return PTE_ADDRESS(pte, granule_sz);
241 }
242 
243 /**
244  * get_table_pte_address - return table descriptor output address,
245  * ie. address of next level table
246  * ARM ARM Figure D4-16 VMSAv8-64 level0, level1, and level 2 descriptor formats
247  */
248 static inline hwaddr get_table_pte_address(uint64_t pte, int granule_sz)
249 {
250     return PTE_ADDRESS(pte, granule_sz);
251 }
252 
253 /**
254  * get_block_pte_address - return block descriptor output address and block size
255  * ARM ARM Figure D4-16 VMSAv8-64 level0, level1, and level 2 descriptor formats
256  */
257 static inline hwaddr get_block_pte_address(uint64_t pte, int level,
258                                            int granule_sz, uint64_t *bsz)
259 {
260     int n = level_shift(level, granule_sz);
261 
262     *bsz = 1ULL << n;
263     return PTE_ADDRESS(pte, n);
264 }
265 
266 SMMUTransTableInfo *select_tt(SMMUTransCfg *cfg, dma_addr_t iova)
267 {
268     bool tbi = extract64(iova, 55, 1) ? TBI1(cfg->tbi) : TBI0(cfg->tbi);
269     uint8_t tbi_byte = tbi * 8;
270 
271     if (cfg->tt[0].tsz &&
272         !extract64(iova, 64 - cfg->tt[0].tsz, cfg->tt[0].tsz - tbi_byte)) {
273         /* there is a ttbr0 region and we are in it (high bits all zero) */
274         return &cfg->tt[0];
275     } else if (cfg->tt[1].tsz &&
276         sextract64(iova, 64 - cfg->tt[1].tsz, cfg->tt[1].tsz - tbi_byte) == -1) {
277         /* there is a ttbr1 region and we are in it (high bits all one) */
278         return &cfg->tt[1];
279     } else if (!cfg->tt[0].tsz) {
280         /* ttbr0 region is "everything not in the ttbr1 region" */
281         return &cfg->tt[0];
282     } else if (!cfg->tt[1].tsz) {
283         /* ttbr1 region is "everything not in the ttbr0 region" */
284         return &cfg->tt[1];
285     }
286     /* in the gap between the two regions, this is a Translation fault */
287     return NULL;
288 }
289 
290 /**
291  * smmu_ptw_64_s1 - VMSAv8-64 Walk of the page tables for a given IOVA
292  * @cfg: translation config
293  * @iova: iova to translate
294  * @perm: access type
295  * @tlbe: SMMUTLBEntry (out)
296  * @info: handle to an error info
297  *
298  * Return 0 on success, < 0 on error. In case of error, @info is filled
299  * and tlbe->perm is set to IOMMU_NONE.
300  * Upon success, @tlbe is filled with translated_addr and entry
301  * permission rights.
302  */
303 static int smmu_ptw_64_s1(SMMUTransCfg *cfg,
304                           dma_addr_t iova, IOMMUAccessFlags perm,
305                           SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info)
306 {
307     dma_addr_t baseaddr, indexmask;
308     int stage = cfg->stage;
309     SMMUTransTableInfo *tt = select_tt(cfg, iova);
310     uint8_t level, granule_sz, inputsize, stride;
311 
312     if (!tt || tt->disabled) {
313         info->type = SMMU_PTW_ERR_TRANSLATION;
314         goto error;
315     }
316 
317     granule_sz = tt->granule_sz;
318     stride = VMSA_STRIDE(granule_sz);
319     inputsize = 64 - tt->tsz;
320     level = 4 - (inputsize - 4) / stride;
321     indexmask = VMSA_IDXMSK(inputsize, stride, level);
322     baseaddr = extract64(tt->ttb, 0, 48);
323     baseaddr &= ~indexmask;
324 
325     while (level < VMSA_LEVELS) {
326         uint64_t subpage_size = 1ULL << level_shift(level, granule_sz);
327         uint64_t mask = subpage_size - 1;
328         uint32_t offset = iova_level_offset(iova, inputsize, level, granule_sz);
329         uint64_t pte, gpa;
330         dma_addr_t pte_addr = baseaddr + offset * sizeof(pte);
331         uint8_t ap;
332 
333         if (get_pte(baseaddr, offset, &pte, info)) {
334                 goto error;
335         }
336         trace_smmu_ptw_level(stage, level, iova, subpage_size,
337                              baseaddr, offset, pte);
338 
339         if (is_invalid_pte(pte) || is_reserved_pte(pte, level)) {
340             trace_smmu_ptw_invalid_pte(stage, level, baseaddr,
341                                        pte_addr, offset, pte);
342             break;
343         }
344 
345         if (is_table_pte(pte, level)) {
346             ap = PTE_APTABLE(pte);
347 
348             if (is_permission_fault(ap, perm) && !tt->had) {
349                 info->type = SMMU_PTW_ERR_PERMISSION;
350                 goto error;
351             }
352             baseaddr = get_table_pte_address(pte, granule_sz);
353             level++;
354             continue;
355         } else if (is_page_pte(pte, level)) {
356             gpa = get_page_pte_address(pte, granule_sz);
357             trace_smmu_ptw_page_pte(stage, level, iova,
358                                     baseaddr, pte_addr, pte, gpa);
359         } else {
360             uint64_t block_size;
361 
362             gpa = get_block_pte_address(pte, level, granule_sz,
363                                         &block_size);
364             trace_smmu_ptw_block_pte(stage, level, baseaddr,
365                                      pte_addr, pte, iova, gpa,
366                                      block_size >> 20);
367         }
368         ap = PTE_AP(pte);
369         if (is_permission_fault(ap, perm)) {
370             info->type = SMMU_PTW_ERR_PERMISSION;
371             goto error;
372         }
373 
374         tlbe->entry.translated_addr = gpa;
375         tlbe->entry.iova = iova & ~mask;
376         tlbe->entry.addr_mask = mask;
377         tlbe->entry.perm = PTE_AP_TO_PERM(ap);
378         tlbe->level = level;
379         tlbe->granule = granule_sz;
380         return 0;
381     }
382     info->type = SMMU_PTW_ERR_TRANSLATION;
383 
384 error:
385     info->stage = 1;
386     tlbe->entry.perm = IOMMU_NONE;
387     return -EINVAL;
388 }
389 
390 /**
391  * smmu_ptw_64_s2 - VMSAv8-64 Walk of the page tables for a given ipa
392  * for stage-2.
393  * @cfg: translation config
394  * @ipa: ipa to translate
395  * @perm: access type
396  * @tlbe: SMMUTLBEntry (out)
397  * @info: handle to an error info
398  *
399  * Return 0 on success, < 0 on error. In case of error, @info is filled
400  * and tlbe->perm is set to IOMMU_NONE.
401  * Upon success, @tlbe is filled with translated_addr and entry
402  * permission rights.
403  */
404 static int smmu_ptw_64_s2(SMMUTransCfg *cfg,
405                           dma_addr_t ipa, IOMMUAccessFlags perm,
406                           SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info)
407 {
408     const int stage = 2;
409     int granule_sz = cfg->s2cfg.granule_sz;
410     /* ARM DDI0487I.a: Table D8-7. */
411     int inputsize = 64 - cfg->s2cfg.tsz;
412     int level = get_start_level(cfg->s2cfg.sl0, granule_sz);
413     int stride = VMSA_STRIDE(granule_sz);
414     int idx = pgd_concat_idx(level, granule_sz, ipa);
415     /*
416      * Get the ttb from concatenated structure.
417      * The offset is the idx * size of each ttb(number of ptes * (sizeof(pte))
418      */
419     uint64_t baseaddr = extract64(cfg->s2cfg.vttb, 0, 48) + (1 << stride) *
420                                   idx * sizeof(uint64_t);
421     dma_addr_t indexmask = VMSA_IDXMSK(inputsize, stride, level);
422 
423     baseaddr &= ~indexmask;
424 
425     /*
426      * On input, a stage 2 Translation fault occurs if the IPA is outside the
427      * range configured by the relevant S2T0SZ field of the STE.
428      */
429     if (ipa >= (1ULL << inputsize)) {
430         info->type = SMMU_PTW_ERR_TRANSLATION;
431         goto error;
432     }
433 
434     while (level < VMSA_LEVELS) {
435         uint64_t subpage_size = 1ULL << level_shift(level, granule_sz);
436         uint64_t mask = subpage_size - 1;
437         uint32_t offset = iova_level_offset(ipa, inputsize, level, granule_sz);
438         uint64_t pte, gpa;
439         dma_addr_t pte_addr = baseaddr + offset * sizeof(pte);
440         uint8_t s2ap;
441 
442         if (get_pte(baseaddr, offset, &pte, info)) {
443                 goto error;
444         }
445         trace_smmu_ptw_level(stage, level, ipa, subpage_size,
446                              baseaddr, offset, pte);
447         if (is_invalid_pte(pte) || is_reserved_pte(pte, level)) {
448             trace_smmu_ptw_invalid_pte(stage, level, baseaddr,
449                                        pte_addr, offset, pte);
450             break;
451         }
452 
453         if (is_table_pte(pte, level)) {
454             baseaddr = get_table_pte_address(pte, granule_sz);
455             level++;
456             continue;
457         } else if (is_page_pte(pte, level)) {
458             gpa = get_page_pte_address(pte, granule_sz);
459             trace_smmu_ptw_page_pte(stage, level, ipa,
460                                     baseaddr, pte_addr, pte, gpa);
461         } else {
462             uint64_t block_size;
463 
464             gpa = get_block_pte_address(pte, level, granule_sz,
465                                         &block_size);
466             trace_smmu_ptw_block_pte(stage, level, baseaddr,
467                                      pte_addr, pte, ipa, gpa,
468                                      block_size >> 20);
469         }
470 
471         /*
472          * If S2AFFD and PTE.AF are 0 => fault. (5.2. Stream Table Entry)
473          * An Access fault takes priority over a Permission fault.
474          */
475         if (!PTE_AF(pte) && !cfg->s2cfg.affd) {
476             info->type = SMMU_PTW_ERR_ACCESS;
477             goto error;
478         }
479 
480         s2ap = PTE_AP(pte);
481         if (is_permission_fault_s2(s2ap, perm)) {
482             info->type = SMMU_PTW_ERR_PERMISSION;
483             goto error;
484         }
485 
486         /*
487          * The address output from the translation causes a stage 2 Address
488          * Size fault if it exceeds the effective PA output range.
489          */
490         if (gpa >= (1ULL << cfg->s2cfg.eff_ps)) {
491             info->type = SMMU_PTW_ERR_ADDR_SIZE;
492             goto error;
493         }
494 
495         tlbe->entry.translated_addr = gpa;
496         tlbe->entry.iova = ipa & ~mask;
497         tlbe->entry.addr_mask = mask;
498         tlbe->entry.perm = s2ap;
499         tlbe->level = level;
500         tlbe->granule = granule_sz;
501         return 0;
502     }
503     info->type = SMMU_PTW_ERR_TRANSLATION;
504 
505 error:
506     info->stage = 2;
507     tlbe->entry.perm = IOMMU_NONE;
508     return -EINVAL;
509 }
510 
511 /**
512  * smmu_ptw - Walk the page tables for an IOVA, according to @cfg
513  *
514  * @cfg: translation configuration
515  * @iova: iova to translate
516  * @perm: tentative access type
517  * @tlbe: returned entry
518  * @info: ptw event handle
519  *
520  * return 0 on success
521  */
522 int smmu_ptw(SMMUTransCfg *cfg, dma_addr_t iova, IOMMUAccessFlags perm,
523              SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info)
524 {
525     if (cfg->stage == 1) {
526         return smmu_ptw_64_s1(cfg, iova, perm, tlbe, info);
527     } else if (cfg->stage == 2) {
528         /*
529          * If bypassing stage 1(or unimplemented), the input address is passed
530          * directly to stage 2 as IPA. If the input address of a transaction
531          * exceeds the size of the IAS, a stage 1 Address Size fault occurs.
532          * For AA64, IAS = OAS according to (IHI 0070.E.a) "3.4 Address sizes"
533          */
534         if (iova >= (1ULL << cfg->oas)) {
535             info->type = SMMU_PTW_ERR_ADDR_SIZE;
536             info->stage = 1;
537             tlbe->entry.perm = IOMMU_NONE;
538             return -EINVAL;
539         }
540 
541         return smmu_ptw_64_s2(cfg, iova, perm, tlbe, info);
542     }
543 
544     g_assert_not_reached();
545 }
546 
547 /**
548  * The bus number is used for lookup when SID based invalidation occurs.
549  * In that case we lazily populate the SMMUPciBus array from the bus hash
550  * table. At the time the SMMUPciBus is created (smmu_find_add_as), the bus
551  * numbers may not be always initialized yet.
552  */
553 SMMUPciBus *smmu_find_smmu_pcibus(SMMUState *s, uint8_t bus_num)
554 {
555     SMMUPciBus *smmu_pci_bus = s->smmu_pcibus_by_bus_num[bus_num];
556     GHashTableIter iter;
557 
558     if (smmu_pci_bus) {
559         return smmu_pci_bus;
560     }
561 
562     g_hash_table_iter_init(&iter, s->smmu_pcibus_by_busptr);
563     while (g_hash_table_iter_next(&iter, NULL, (void **)&smmu_pci_bus)) {
564         if (pci_bus_num(smmu_pci_bus->bus) == bus_num) {
565             s->smmu_pcibus_by_bus_num[bus_num] = smmu_pci_bus;
566             return smmu_pci_bus;
567         }
568     }
569 
570     return NULL;
571 }
572 
573 static AddressSpace *smmu_find_add_as(PCIBus *bus, void *opaque, int devfn)
574 {
575     SMMUState *s = opaque;
576     SMMUPciBus *sbus = g_hash_table_lookup(s->smmu_pcibus_by_busptr, bus);
577     SMMUDevice *sdev;
578     static unsigned int index;
579 
580     if (!sbus) {
581         sbus = g_malloc0(sizeof(SMMUPciBus) +
582                          sizeof(SMMUDevice *) * SMMU_PCI_DEVFN_MAX);
583         sbus->bus = bus;
584         g_hash_table_insert(s->smmu_pcibus_by_busptr, bus, sbus);
585     }
586 
587     sdev = sbus->pbdev[devfn];
588     if (!sdev) {
589         char *name = g_strdup_printf("%s-%d-%d", s->mrtypename, devfn, index++);
590 
591         sdev = sbus->pbdev[devfn] = g_new0(SMMUDevice, 1);
592 
593         sdev->smmu = s;
594         sdev->bus = bus;
595         sdev->devfn = devfn;
596 
597         memory_region_init_iommu(&sdev->iommu, sizeof(sdev->iommu),
598                                  s->mrtypename,
599                                  OBJECT(s), name, UINT64_MAX);
600         address_space_init(&sdev->as,
601                            MEMORY_REGION(&sdev->iommu), name);
602         trace_smmu_add_mr(name);
603         g_free(name);
604     }
605 
606     return &sdev->as;
607 }
608 
609 IOMMUMemoryRegion *smmu_iommu_mr(SMMUState *s, uint32_t sid)
610 {
611     uint8_t bus_n, devfn;
612     SMMUPciBus *smmu_bus;
613     SMMUDevice *smmu;
614 
615     bus_n = PCI_BUS_NUM(sid);
616     smmu_bus = smmu_find_smmu_pcibus(s, bus_n);
617     if (smmu_bus) {
618         devfn = SMMU_PCI_DEVFN(sid);
619         smmu = smmu_bus->pbdev[devfn];
620         if (smmu) {
621             return &smmu->iommu;
622         }
623     }
624     return NULL;
625 }
626 
627 /* Unmap all notifiers attached to @mr */
628 static void smmu_inv_notifiers_mr(IOMMUMemoryRegion *mr)
629 {
630     IOMMUNotifier *n;
631 
632     trace_smmu_inv_notifiers_mr(mr->parent_obj.name);
633     IOMMU_NOTIFIER_FOREACH(n, mr) {
634         memory_region_unmap_iommu_notifier_range(n);
635     }
636 }
637 
638 /* Unmap all notifiers of all mr's */
639 void smmu_inv_notifiers_all(SMMUState *s)
640 {
641     SMMUDevice *sdev;
642 
643     QLIST_FOREACH(sdev, &s->devices_with_notifiers, next) {
644         smmu_inv_notifiers_mr(&sdev->iommu);
645     }
646 }
647 
648 static void smmu_base_realize(DeviceState *dev, Error **errp)
649 {
650     SMMUState *s = ARM_SMMU(dev);
651     SMMUBaseClass *sbc = ARM_SMMU_GET_CLASS(dev);
652     Error *local_err = NULL;
653 
654     sbc->parent_realize(dev, &local_err);
655     if (local_err) {
656         error_propagate(errp, local_err);
657         return;
658     }
659     s->configs = g_hash_table_new_full(NULL, NULL, NULL, g_free);
660     s->iotlb = g_hash_table_new_full(smmu_iotlb_key_hash, smmu_iotlb_key_equal,
661                                      g_free, g_free);
662     s->smmu_pcibus_by_busptr = g_hash_table_new(NULL, NULL);
663 
664     if (s->primary_bus) {
665         pci_setup_iommu(s->primary_bus, smmu_find_add_as, s);
666     } else {
667         error_setg(errp, "SMMU is not attached to any PCI bus!");
668     }
669 }
670 
671 static void smmu_base_reset_hold(Object *obj)
672 {
673     SMMUState *s = ARM_SMMU(obj);
674 
675     g_hash_table_remove_all(s->configs);
676     g_hash_table_remove_all(s->iotlb);
677 }
678 
679 static Property smmu_dev_properties[] = {
680     DEFINE_PROP_UINT8("bus_num", SMMUState, bus_num, 0),
681     DEFINE_PROP_LINK("primary-bus", SMMUState, primary_bus,
682                      TYPE_PCI_BUS, PCIBus *),
683     DEFINE_PROP_END_OF_LIST(),
684 };
685 
686 static void smmu_base_class_init(ObjectClass *klass, void *data)
687 {
688     DeviceClass *dc = DEVICE_CLASS(klass);
689     ResettableClass *rc = RESETTABLE_CLASS(klass);
690     SMMUBaseClass *sbc = ARM_SMMU_CLASS(klass);
691 
692     device_class_set_props(dc, smmu_dev_properties);
693     device_class_set_parent_realize(dc, smmu_base_realize,
694                                     &sbc->parent_realize);
695     rc->phases.hold = smmu_base_reset_hold;
696 }
697 
698 static const TypeInfo smmu_base_info = {
699     .name          = TYPE_ARM_SMMU,
700     .parent        = TYPE_SYS_BUS_DEVICE,
701     .instance_size = sizeof(SMMUState),
702     .class_data    = NULL,
703     .class_size    = sizeof(SMMUBaseClass),
704     .class_init    = smmu_base_class_init,
705     .abstract      = true,
706 };
707 
708 static void smmu_base_register_types(void)
709 {
710     type_register_static(&smmu_base_info);
711 }
712 
713 type_init(smmu_base_register_types)
714 
715