xref: /qemu/hw/xen/xen_pt_config_init.c (revision bfa3ab61)
1 /*
2  * Copyright (c) 2007, Neocleus Corporation.
3  * Copyright (c) 2007, Intel Corporation.
4  *
5  * This work is licensed under the terms of the GNU GPL, version 2.  See
6  * the COPYING file in the top-level directory.
7  *
8  * Alex Novik <alex@neocleus.com>
9  * Allen Kay <allen.m.kay@intel.com>
10  * Guy Zana <guy@neocleus.com>
11  *
12  * This file implements direct PCI assignment to a HVM guest
13  */
14 
15 #include "qemu/timer.h"
16 #include "hw/xen/xen_backend.h"
17 #include "xen_pt.h"
18 
19 #define XEN_PT_MERGE_VALUE(value, data, val_mask) \
20     (((value) & (val_mask)) | ((data) & ~(val_mask)))
21 
22 #define XEN_PT_INVALID_REG          0xFFFFFFFF      /* invalid register value */
23 
24 /* prototype */
25 
26 static int xen_pt_ptr_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg,
27                                uint32_t real_offset, uint32_t *data);
28 
29 
30 /* helper */
31 
32 /* A return value of 1 means the capability should NOT be exposed to guest. */
33 static int xen_pt_hide_dev_cap(const XenHostPCIDevice *d, uint8_t grp_id)
34 {
35     switch (grp_id) {
36     case PCI_CAP_ID_EXP:
37         /* The PCI Express Capability Structure of the VF of Intel 82599 10GbE
38          * Controller looks trivial, e.g., the PCI Express Capabilities
39          * Register is 0. We should not try to expose it to guest.
40          *
41          * The datasheet is available at
42          * http://download.intel.com/design/network/datashts/82599_datasheet.pdf
43          *
44          * See 'Table 9.7. VF PCIe Configuration Space' of the datasheet, the
45          * PCI Express Capability Structure of the VF of Intel 82599 10GbE
46          * Controller looks trivial, e.g., the PCI Express Capabilities
47          * Register is 0, so the Capability Version is 0 and
48          * xen_pt_pcie_size_init() would fail.
49          */
50         if (d->vendor_id == PCI_VENDOR_ID_INTEL &&
51             d->device_id == PCI_DEVICE_ID_INTEL_82599_SFP_VF) {
52             return 1;
53         }
54         break;
55     }
56     return 0;
57 }
58 
59 /*   find emulate register group entry */
60 XenPTRegGroup *xen_pt_find_reg_grp(XenPCIPassthroughState *s, uint32_t address)
61 {
62     XenPTRegGroup *entry = NULL;
63 
64     /* find register group entry */
65     QLIST_FOREACH(entry, &s->reg_grps, entries) {
66         /* check address */
67         if ((entry->base_offset <= address)
68             && ((entry->base_offset + entry->size) > address)) {
69             return entry;
70         }
71     }
72 
73     /* group entry not found */
74     return NULL;
75 }
76 
77 /* find emulate register entry */
78 XenPTReg *xen_pt_find_reg(XenPTRegGroup *reg_grp, uint32_t address)
79 {
80     XenPTReg *reg_entry = NULL;
81     XenPTRegInfo *reg = NULL;
82     uint32_t real_offset = 0;
83 
84     /* find register entry */
85     QLIST_FOREACH(reg_entry, &reg_grp->reg_tbl_list, entries) {
86         reg = reg_entry->reg;
87         real_offset = reg_grp->base_offset + reg->offset;
88         /* check address */
89         if ((real_offset <= address)
90             && ((real_offset + reg->size) > address)) {
91             return reg_entry;
92         }
93     }
94 
95     return NULL;
96 }
97 
98 static uint32_t get_throughable_mask(const XenPCIPassthroughState *s,
99                                      const XenPTRegInfo *reg,
100                                      uint32_t valid_mask)
101 {
102     uint32_t throughable_mask = ~(reg->emu_mask | reg->ro_mask);
103 
104     if (!s->permissive) {
105         throughable_mask &= ~reg->res_mask;
106     }
107 
108     return throughable_mask & valid_mask;
109 }
110 
111 /****************
112  * general register functions
113  */
114 
115 /* register initialization function */
116 
117 static int xen_pt_common_reg_init(XenPCIPassthroughState *s,
118                                   XenPTRegInfo *reg, uint32_t real_offset,
119                                   uint32_t *data)
120 {
121     *data = reg->init_val;
122     return 0;
123 }
124 
125 /* Read register functions */
126 
127 static int xen_pt_byte_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
128                                 uint8_t *value, uint8_t valid_mask)
129 {
130     XenPTRegInfo *reg = cfg_entry->reg;
131     uint8_t valid_emu_mask = 0;
132 
133     /* emulate byte register */
134     valid_emu_mask = reg->emu_mask & valid_mask;
135     *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask);
136 
137     return 0;
138 }
139 static int xen_pt_word_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
140                                 uint16_t *value, uint16_t valid_mask)
141 {
142     XenPTRegInfo *reg = cfg_entry->reg;
143     uint16_t valid_emu_mask = 0;
144 
145     /* emulate word register */
146     valid_emu_mask = reg->emu_mask & valid_mask;
147     *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask);
148 
149     return 0;
150 }
151 static int xen_pt_long_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
152                                 uint32_t *value, uint32_t valid_mask)
153 {
154     XenPTRegInfo *reg = cfg_entry->reg;
155     uint32_t valid_emu_mask = 0;
156 
157     /* emulate long register */
158     valid_emu_mask = reg->emu_mask & valid_mask;
159     *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask);
160 
161     return 0;
162 }
163 
164 /* Write register functions */
165 
166 static int xen_pt_byte_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
167                                  uint8_t *val, uint8_t dev_value,
168                                  uint8_t valid_mask)
169 {
170     XenPTRegInfo *reg = cfg_entry->reg;
171     uint8_t writable_mask = 0;
172     uint8_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
173 
174     /* modify emulate register */
175     writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
176     cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
177 
178     /* create value for writing to I/O device register */
179     *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
180 
181     return 0;
182 }
183 static int xen_pt_word_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
184                                  uint16_t *val, uint16_t dev_value,
185                                  uint16_t valid_mask)
186 {
187     XenPTRegInfo *reg = cfg_entry->reg;
188     uint16_t writable_mask = 0;
189     uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
190 
191     /* modify emulate register */
192     writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
193     cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
194 
195     /* create value for writing to I/O device register */
196     *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
197 
198     return 0;
199 }
200 static int xen_pt_long_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
201                                  uint32_t *val, uint32_t dev_value,
202                                  uint32_t valid_mask)
203 {
204     XenPTRegInfo *reg = cfg_entry->reg;
205     uint32_t writable_mask = 0;
206     uint32_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
207 
208     /* modify emulate register */
209     writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
210     cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
211 
212     /* create value for writing to I/O device register */
213     *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
214 
215     return 0;
216 }
217 
218 
219 /* XenPTRegInfo declaration
220  * - only for emulated register (either a part or whole bit).
221  * - for passthrough register that need special behavior (like interacting with
222  *   other component), set emu_mask to all 0 and specify r/w func properly.
223  * - do NOT use ALL F for init_val, otherwise the tbl will not be registered.
224  */
225 
226 /********************
227  * Header Type0
228  */
229 
230 static int xen_pt_vendor_reg_init(XenPCIPassthroughState *s,
231                                   XenPTRegInfo *reg, uint32_t real_offset,
232                                   uint32_t *data)
233 {
234     *data = s->real_device.vendor_id;
235     return 0;
236 }
237 static int xen_pt_device_reg_init(XenPCIPassthroughState *s,
238                                   XenPTRegInfo *reg, uint32_t real_offset,
239                                   uint32_t *data)
240 {
241     *data = s->real_device.device_id;
242     return 0;
243 }
244 static int xen_pt_status_reg_init(XenPCIPassthroughState *s,
245                                   XenPTRegInfo *reg, uint32_t real_offset,
246                                   uint32_t *data)
247 {
248     XenPTRegGroup *reg_grp_entry = NULL;
249     XenPTReg *reg_entry = NULL;
250     uint32_t reg_field = 0;
251 
252     /* find Header register group */
253     reg_grp_entry = xen_pt_find_reg_grp(s, PCI_CAPABILITY_LIST);
254     if (reg_grp_entry) {
255         /* find Capabilities Pointer register */
256         reg_entry = xen_pt_find_reg(reg_grp_entry, PCI_CAPABILITY_LIST);
257         if (reg_entry) {
258             /* check Capabilities Pointer register */
259             if (reg_entry->data) {
260                 reg_field |= PCI_STATUS_CAP_LIST;
261             } else {
262                 reg_field &= ~PCI_STATUS_CAP_LIST;
263             }
264         } else {
265             xen_shutdown_fatal_error("Internal error: Couldn't find XenPTReg*"
266                                      " for Capabilities Pointer register."
267                                      " (%s)\n", __func__);
268             return -1;
269         }
270     } else {
271         xen_shutdown_fatal_error("Internal error: Couldn't find XenPTRegGroup"
272                                  " for Header. (%s)\n", __func__);
273         return -1;
274     }
275 
276     *data = reg_field;
277     return 0;
278 }
279 static int xen_pt_header_type_reg_init(XenPCIPassthroughState *s,
280                                        XenPTRegInfo *reg, uint32_t real_offset,
281                                        uint32_t *data)
282 {
283     /* read PCI_HEADER_TYPE */
284     *data = reg->init_val | 0x80;
285     return 0;
286 }
287 
288 /* initialize Interrupt Pin register */
289 static int xen_pt_irqpin_reg_init(XenPCIPassthroughState *s,
290                                   XenPTRegInfo *reg, uint32_t real_offset,
291                                   uint32_t *data)
292 {
293     *data = xen_pt_pci_read_intx(s);
294     return 0;
295 }
296 
297 /* Command register */
298 static int xen_pt_cmd_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
299                                 uint16_t *val, uint16_t dev_value,
300                                 uint16_t valid_mask)
301 {
302     XenPTRegInfo *reg = cfg_entry->reg;
303     uint16_t writable_mask = 0;
304     uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
305 
306     /* modify emulate register */
307     writable_mask = ~reg->ro_mask & valid_mask;
308     cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
309 
310     /* create value for writing to I/O device register */
311     if (*val & PCI_COMMAND_INTX_DISABLE) {
312         throughable_mask |= PCI_COMMAND_INTX_DISABLE;
313     } else {
314         if (s->machine_irq) {
315             throughable_mask |= PCI_COMMAND_INTX_DISABLE;
316         }
317     }
318 
319     *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
320 
321     return 0;
322 }
323 
324 /* BAR */
325 #define XEN_PT_BAR_MEM_RO_MASK    0x0000000F  /* BAR ReadOnly mask(Memory) */
326 #define XEN_PT_BAR_MEM_EMU_MASK   0xFFFFFFF0  /* BAR emul mask(Memory) */
327 #define XEN_PT_BAR_IO_RO_MASK     0x00000003  /* BAR ReadOnly mask(I/O) */
328 #define XEN_PT_BAR_IO_EMU_MASK    0xFFFFFFFC  /* BAR emul mask(I/O) */
329 
330 static bool is_64bit_bar(PCIIORegion *r)
331 {
332     return !!(r->type & PCI_BASE_ADDRESS_MEM_TYPE_64);
333 }
334 
335 static uint64_t xen_pt_get_bar_size(PCIIORegion *r)
336 {
337     if (is_64bit_bar(r)) {
338         uint64_t size64;
339         size64 = (r + 1)->size;
340         size64 <<= 32;
341         size64 += r->size;
342         return size64;
343     }
344     return r->size;
345 }
346 
347 static XenPTBarFlag xen_pt_bar_reg_parse(XenPCIPassthroughState *s,
348                                          int index)
349 {
350     PCIDevice *d = &s->dev;
351     XenPTRegion *region = NULL;
352     PCIIORegion *r;
353 
354     /* check 64bit BAR */
355     if ((0 < index) && (index < PCI_ROM_SLOT)) {
356         int type = s->real_device.io_regions[index - 1].type;
357 
358         if ((type & XEN_HOST_PCI_REGION_TYPE_MEM)
359             && (type & XEN_HOST_PCI_REGION_TYPE_MEM_64)) {
360             region = &s->bases[index - 1];
361             if (region->bar_flag != XEN_PT_BAR_FLAG_UPPER) {
362                 return XEN_PT_BAR_FLAG_UPPER;
363             }
364         }
365     }
366 
367     /* check unused BAR */
368     r = &d->io_regions[index];
369     if (!xen_pt_get_bar_size(r)) {
370         return XEN_PT_BAR_FLAG_UNUSED;
371     }
372 
373     /* for ExpROM BAR */
374     if (index == PCI_ROM_SLOT) {
375         return XEN_PT_BAR_FLAG_MEM;
376     }
377 
378     /* check BAR I/O indicator */
379     if (s->real_device.io_regions[index].type & XEN_HOST_PCI_REGION_TYPE_IO) {
380         return XEN_PT_BAR_FLAG_IO;
381     } else {
382         return XEN_PT_BAR_FLAG_MEM;
383     }
384 }
385 
386 static inline uint32_t base_address_with_flags(XenHostPCIIORegion *hr)
387 {
388     if (hr->type & XEN_HOST_PCI_REGION_TYPE_IO) {
389         return hr->base_addr | (hr->bus_flags & ~PCI_BASE_ADDRESS_IO_MASK);
390     } else {
391         return hr->base_addr | (hr->bus_flags & ~PCI_BASE_ADDRESS_MEM_MASK);
392     }
393 }
394 
395 static int xen_pt_bar_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg,
396                                uint32_t real_offset, uint32_t *data)
397 {
398     uint32_t reg_field = 0;
399     int index;
400 
401     index = xen_pt_bar_offset_to_index(reg->offset);
402     if (index < 0 || index >= PCI_NUM_REGIONS) {
403         XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index);
404         return -1;
405     }
406 
407     /* set BAR flag */
408     s->bases[index].bar_flag = xen_pt_bar_reg_parse(s, index);
409     if (s->bases[index].bar_flag == XEN_PT_BAR_FLAG_UNUSED) {
410         reg_field = XEN_PT_INVALID_REG;
411     }
412 
413     *data = reg_field;
414     return 0;
415 }
416 static int xen_pt_bar_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
417                                uint32_t *value, uint32_t valid_mask)
418 {
419     XenPTRegInfo *reg = cfg_entry->reg;
420     uint32_t valid_emu_mask = 0;
421     uint32_t bar_emu_mask = 0;
422     int index;
423 
424     /* get BAR index */
425     index = xen_pt_bar_offset_to_index(reg->offset);
426     if (index < 0 || index >= PCI_NUM_REGIONS - 1) {
427         XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index);
428         return -1;
429     }
430 
431     /* use fixed-up value from kernel sysfs */
432     *value = base_address_with_flags(&s->real_device.io_regions[index]);
433 
434     /* set emulate mask depend on BAR flag */
435     switch (s->bases[index].bar_flag) {
436     case XEN_PT_BAR_FLAG_MEM:
437         bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK;
438         break;
439     case XEN_PT_BAR_FLAG_IO:
440         bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK;
441         break;
442     case XEN_PT_BAR_FLAG_UPPER:
443         bar_emu_mask = XEN_PT_BAR_ALLF;
444         break;
445     default:
446         break;
447     }
448 
449     /* emulate BAR */
450     valid_emu_mask = bar_emu_mask & valid_mask;
451     *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask);
452 
453     return 0;
454 }
455 static int xen_pt_bar_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
456                                 uint32_t *val, uint32_t dev_value,
457                                 uint32_t valid_mask)
458 {
459     XenPTRegInfo *reg = cfg_entry->reg;
460     XenPTRegion *base = NULL;
461     PCIDevice *d = &s->dev;
462     const PCIIORegion *r;
463     uint32_t writable_mask = 0;
464     uint32_t bar_emu_mask = 0;
465     uint32_t bar_ro_mask = 0;
466     uint32_t r_size = 0;
467     int index = 0;
468 
469     index = xen_pt_bar_offset_to_index(reg->offset);
470     if (index < 0 || index >= PCI_NUM_REGIONS) {
471         XEN_PT_ERR(d, "Internal error: Invalid BAR index [%d].\n", index);
472         return -1;
473     }
474 
475     r = &d->io_regions[index];
476     base = &s->bases[index];
477     r_size = xen_pt_get_emul_size(base->bar_flag, r->size);
478 
479     /* set emulate mask and read-only mask values depend on the BAR flag */
480     switch (s->bases[index].bar_flag) {
481     case XEN_PT_BAR_FLAG_MEM:
482         bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK;
483         if (!r_size) {
484             /* low 32 bits mask for 64 bit bars */
485             bar_ro_mask = XEN_PT_BAR_ALLF;
486         } else {
487             bar_ro_mask = XEN_PT_BAR_MEM_RO_MASK | (r_size - 1);
488         }
489         break;
490     case XEN_PT_BAR_FLAG_IO:
491         bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK;
492         bar_ro_mask = XEN_PT_BAR_IO_RO_MASK | (r_size - 1);
493         break;
494     case XEN_PT_BAR_FLAG_UPPER:
495         bar_emu_mask = XEN_PT_BAR_ALLF;
496         bar_ro_mask = r_size ? r_size - 1 : 0;
497         break;
498     default:
499         break;
500     }
501 
502     /* modify emulate register */
503     writable_mask = bar_emu_mask & ~bar_ro_mask & valid_mask;
504     cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
505 
506     /* check whether we need to update the virtual region address or not */
507     switch (s->bases[index].bar_flag) {
508     case XEN_PT_BAR_FLAG_UPPER:
509     case XEN_PT_BAR_FLAG_MEM:
510         /* nothing to do */
511         break;
512     case XEN_PT_BAR_FLAG_IO:
513         /* nothing to do */
514         break;
515     default:
516         break;
517     }
518 
519     /* create value for writing to I/O device register */
520     *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0);
521 
522     return 0;
523 }
524 
525 /* write Exp ROM BAR */
526 static int xen_pt_exp_rom_bar_reg_write(XenPCIPassthroughState *s,
527                                         XenPTReg *cfg_entry, uint32_t *val,
528                                         uint32_t dev_value, uint32_t valid_mask)
529 {
530     XenPTRegInfo *reg = cfg_entry->reg;
531     XenPTRegion *base = NULL;
532     PCIDevice *d = (PCIDevice *)&s->dev;
533     uint32_t writable_mask = 0;
534     uint32_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
535     pcibus_t r_size = 0;
536     uint32_t bar_ro_mask = 0;
537 
538     r_size = d->io_regions[PCI_ROM_SLOT].size;
539     base = &s->bases[PCI_ROM_SLOT];
540     /* align memory type resource size */
541     r_size = xen_pt_get_emul_size(base->bar_flag, r_size);
542 
543     /* set emulate mask and read-only mask */
544     bar_ro_mask = (reg->ro_mask | (r_size - 1)) & ~PCI_ROM_ADDRESS_ENABLE;
545 
546     /* modify emulate register */
547     writable_mask = ~bar_ro_mask & valid_mask;
548     cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
549 
550     /* create value for writing to I/O device register */
551     *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
552 
553     return 0;
554 }
555 
556 /* Header Type0 reg static information table */
557 static XenPTRegInfo xen_pt_emu_reg_header0[] = {
558     /* Vendor ID reg */
559     {
560         .offset     = PCI_VENDOR_ID,
561         .size       = 2,
562         .init_val   = 0x0000,
563         .ro_mask    = 0xFFFF,
564         .emu_mask   = 0xFFFF,
565         .init       = xen_pt_vendor_reg_init,
566         .u.w.read   = xen_pt_word_reg_read,
567         .u.w.write  = xen_pt_word_reg_write,
568     },
569     /* Device ID reg */
570     {
571         .offset     = PCI_DEVICE_ID,
572         .size       = 2,
573         .init_val   = 0x0000,
574         .ro_mask    = 0xFFFF,
575         .emu_mask   = 0xFFFF,
576         .init       = xen_pt_device_reg_init,
577         .u.w.read   = xen_pt_word_reg_read,
578         .u.w.write  = xen_pt_word_reg_write,
579     },
580     /* Command reg */
581     {
582         .offset     = PCI_COMMAND,
583         .size       = 2,
584         .init_val   = 0x0000,
585         .res_mask   = 0xF880,
586         .emu_mask   = 0x0743,
587         .init       = xen_pt_common_reg_init,
588         .u.w.read   = xen_pt_word_reg_read,
589         .u.w.write  = xen_pt_cmd_reg_write,
590     },
591     /* Capabilities Pointer reg */
592     {
593         .offset     = PCI_CAPABILITY_LIST,
594         .size       = 1,
595         .init_val   = 0x00,
596         .ro_mask    = 0xFF,
597         .emu_mask   = 0xFF,
598         .init       = xen_pt_ptr_reg_init,
599         .u.b.read   = xen_pt_byte_reg_read,
600         .u.b.write  = xen_pt_byte_reg_write,
601     },
602     /* Status reg */
603     /* use emulated Cap Ptr value to initialize,
604      * so need to be declared after Cap Ptr reg
605      */
606     {
607         .offset     = PCI_STATUS,
608         .size       = 2,
609         .init_val   = 0x0000,
610         .res_mask   = 0x0007,
611         .ro_mask    = 0x06F8,
612         .emu_mask   = 0x0010,
613         .init       = xen_pt_status_reg_init,
614         .u.w.read   = xen_pt_word_reg_read,
615         .u.w.write  = xen_pt_word_reg_write,
616     },
617     /* Cache Line Size reg */
618     {
619         .offset     = PCI_CACHE_LINE_SIZE,
620         .size       = 1,
621         .init_val   = 0x00,
622         .ro_mask    = 0x00,
623         .emu_mask   = 0xFF,
624         .init       = xen_pt_common_reg_init,
625         .u.b.read   = xen_pt_byte_reg_read,
626         .u.b.write  = xen_pt_byte_reg_write,
627     },
628     /* Latency Timer reg */
629     {
630         .offset     = PCI_LATENCY_TIMER,
631         .size       = 1,
632         .init_val   = 0x00,
633         .ro_mask    = 0x00,
634         .emu_mask   = 0xFF,
635         .init       = xen_pt_common_reg_init,
636         .u.b.read   = xen_pt_byte_reg_read,
637         .u.b.write  = xen_pt_byte_reg_write,
638     },
639     /* Header Type reg */
640     {
641         .offset     = PCI_HEADER_TYPE,
642         .size       = 1,
643         .init_val   = 0x00,
644         .ro_mask    = 0xFF,
645         .emu_mask   = 0x00,
646         .init       = xen_pt_header_type_reg_init,
647         .u.b.read   = xen_pt_byte_reg_read,
648         .u.b.write  = xen_pt_byte_reg_write,
649     },
650     /* Interrupt Line reg */
651     {
652         .offset     = PCI_INTERRUPT_LINE,
653         .size       = 1,
654         .init_val   = 0x00,
655         .ro_mask    = 0x00,
656         .emu_mask   = 0xFF,
657         .init       = xen_pt_common_reg_init,
658         .u.b.read   = xen_pt_byte_reg_read,
659         .u.b.write  = xen_pt_byte_reg_write,
660     },
661     /* Interrupt Pin reg */
662     {
663         .offset     = PCI_INTERRUPT_PIN,
664         .size       = 1,
665         .init_val   = 0x00,
666         .ro_mask    = 0xFF,
667         .emu_mask   = 0xFF,
668         .init       = xen_pt_irqpin_reg_init,
669         .u.b.read   = xen_pt_byte_reg_read,
670         .u.b.write  = xen_pt_byte_reg_write,
671     },
672     /* BAR 0 reg */
673     /* mask of BAR need to be decided later, depends on IO/MEM type */
674     {
675         .offset     = PCI_BASE_ADDRESS_0,
676         .size       = 4,
677         .init_val   = 0x00000000,
678         .init       = xen_pt_bar_reg_init,
679         .u.dw.read  = xen_pt_bar_reg_read,
680         .u.dw.write = xen_pt_bar_reg_write,
681     },
682     /* BAR 1 reg */
683     {
684         .offset     = PCI_BASE_ADDRESS_1,
685         .size       = 4,
686         .init_val   = 0x00000000,
687         .init       = xen_pt_bar_reg_init,
688         .u.dw.read  = xen_pt_bar_reg_read,
689         .u.dw.write = xen_pt_bar_reg_write,
690     },
691     /* BAR 2 reg */
692     {
693         .offset     = PCI_BASE_ADDRESS_2,
694         .size       = 4,
695         .init_val   = 0x00000000,
696         .init       = xen_pt_bar_reg_init,
697         .u.dw.read  = xen_pt_bar_reg_read,
698         .u.dw.write = xen_pt_bar_reg_write,
699     },
700     /* BAR 3 reg */
701     {
702         .offset     = PCI_BASE_ADDRESS_3,
703         .size       = 4,
704         .init_val   = 0x00000000,
705         .init       = xen_pt_bar_reg_init,
706         .u.dw.read  = xen_pt_bar_reg_read,
707         .u.dw.write = xen_pt_bar_reg_write,
708     },
709     /* BAR 4 reg */
710     {
711         .offset     = PCI_BASE_ADDRESS_4,
712         .size       = 4,
713         .init_val   = 0x00000000,
714         .init       = xen_pt_bar_reg_init,
715         .u.dw.read  = xen_pt_bar_reg_read,
716         .u.dw.write = xen_pt_bar_reg_write,
717     },
718     /* BAR 5 reg */
719     {
720         .offset     = PCI_BASE_ADDRESS_5,
721         .size       = 4,
722         .init_val   = 0x00000000,
723         .init       = xen_pt_bar_reg_init,
724         .u.dw.read  = xen_pt_bar_reg_read,
725         .u.dw.write = xen_pt_bar_reg_write,
726     },
727     /* Expansion ROM BAR reg */
728     {
729         .offset     = PCI_ROM_ADDRESS,
730         .size       = 4,
731         .init_val   = 0x00000000,
732         .ro_mask    = 0x000007FE,
733         .emu_mask   = 0xFFFFF800,
734         .init       = xen_pt_bar_reg_init,
735         .u.dw.read  = xen_pt_long_reg_read,
736         .u.dw.write = xen_pt_exp_rom_bar_reg_write,
737     },
738     {
739         .size = 0,
740     },
741 };
742 
743 
744 /*********************************
745  * Vital Product Data Capability
746  */
747 
748 /* Vital Product Data Capability Structure reg static information table */
749 static XenPTRegInfo xen_pt_emu_reg_vpd[] = {
750     {
751         .offset     = PCI_CAP_LIST_NEXT,
752         .size       = 1,
753         .init_val   = 0x00,
754         .ro_mask    = 0xFF,
755         .emu_mask   = 0xFF,
756         .init       = xen_pt_ptr_reg_init,
757         .u.b.read   = xen_pt_byte_reg_read,
758         .u.b.write  = xen_pt_byte_reg_write,
759     },
760     {
761         .offset     = PCI_VPD_ADDR,
762         .size       = 2,
763         .ro_mask    = 0x0003,
764         .emu_mask   = 0x0003,
765         .init       = xen_pt_common_reg_init,
766         .u.w.read   = xen_pt_word_reg_read,
767         .u.w.write  = xen_pt_word_reg_write,
768     },
769     {
770         .size = 0,
771     },
772 };
773 
774 
775 /**************************************
776  * Vendor Specific Capability
777  */
778 
779 /* Vendor Specific Capability Structure reg static information table */
780 static XenPTRegInfo xen_pt_emu_reg_vendor[] = {
781     {
782         .offset     = PCI_CAP_LIST_NEXT,
783         .size       = 1,
784         .init_val   = 0x00,
785         .ro_mask    = 0xFF,
786         .emu_mask   = 0xFF,
787         .init       = xen_pt_ptr_reg_init,
788         .u.b.read   = xen_pt_byte_reg_read,
789         .u.b.write  = xen_pt_byte_reg_write,
790     },
791     {
792         .size = 0,
793     },
794 };
795 
796 
797 /*****************************
798  * PCI Express Capability
799  */
800 
801 static inline uint8_t get_capability_version(XenPCIPassthroughState *s,
802                                              uint32_t offset)
803 {
804     uint8_t flags = pci_get_byte(s->dev.config + offset + PCI_EXP_FLAGS);
805     return flags & PCI_EXP_FLAGS_VERS;
806 }
807 
808 static inline uint8_t get_device_type(XenPCIPassthroughState *s,
809                                       uint32_t offset)
810 {
811     uint8_t flags = pci_get_byte(s->dev.config + offset + PCI_EXP_FLAGS);
812     return (flags & PCI_EXP_FLAGS_TYPE) >> 4;
813 }
814 
815 /* initialize Link Control register */
816 static int xen_pt_linkctrl_reg_init(XenPCIPassthroughState *s,
817                                     XenPTRegInfo *reg, uint32_t real_offset,
818                                     uint32_t *data)
819 {
820     uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset);
821     uint8_t dev_type = get_device_type(s, real_offset - reg->offset);
822 
823     /* no need to initialize in case of Root Complex Integrated Endpoint
824      * with cap_ver 1.x
825      */
826     if ((dev_type == PCI_EXP_TYPE_RC_END) && (cap_ver == 1)) {
827         *data = XEN_PT_INVALID_REG;
828     }
829 
830     *data = reg->init_val;
831     return 0;
832 }
833 /* initialize Device Control 2 register */
834 static int xen_pt_devctrl2_reg_init(XenPCIPassthroughState *s,
835                                     XenPTRegInfo *reg, uint32_t real_offset,
836                                     uint32_t *data)
837 {
838     uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset);
839 
840     /* no need to initialize in case of cap_ver 1.x */
841     if (cap_ver == 1) {
842         *data = XEN_PT_INVALID_REG;
843     }
844 
845     *data = reg->init_val;
846     return 0;
847 }
848 /* initialize Link Control 2 register */
849 static int xen_pt_linkctrl2_reg_init(XenPCIPassthroughState *s,
850                                      XenPTRegInfo *reg, uint32_t real_offset,
851                                      uint32_t *data)
852 {
853     uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset);
854     uint32_t reg_field = 0;
855 
856     /* no need to initialize in case of cap_ver 1.x */
857     if (cap_ver == 1) {
858         reg_field = XEN_PT_INVALID_REG;
859     } else {
860         /* set Supported Link Speed */
861         uint8_t lnkcap = pci_get_byte(s->dev.config + real_offset - reg->offset
862                                       + PCI_EXP_LNKCAP);
863         reg_field |= PCI_EXP_LNKCAP_SLS & lnkcap;
864     }
865 
866     *data = reg_field;
867     return 0;
868 }
869 
870 /* PCI Express Capability Structure reg static information table */
871 static XenPTRegInfo xen_pt_emu_reg_pcie[] = {
872     /* Next Pointer reg */
873     {
874         .offset     = PCI_CAP_LIST_NEXT,
875         .size       = 1,
876         .init_val   = 0x00,
877         .ro_mask    = 0xFF,
878         .emu_mask   = 0xFF,
879         .init       = xen_pt_ptr_reg_init,
880         .u.b.read   = xen_pt_byte_reg_read,
881         .u.b.write  = xen_pt_byte_reg_write,
882     },
883     /* Device Capabilities reg */
884     {
885         .offset     = PCI_EXP_DEVCAP,
886         .size       = 4,
887         .init_val   = 0x00000000,
888         .ro_mask    = 0xFFFFFFFF,
889         .emu_mask   = 0x10000000,
890         .init       = xen_pt_common_reg_init,
891         .u.dw.read  = xen_pt_long_reg_read,
892         .u.dw.write = xen_pt_long_reg_write,
893     },
894     /* Device Control reg */
895     {
896         .offset     = PCI_EXP_DEVCTL,
897         .size       = 2,
898         .init_val   = 0x2810,
899         .ro_mask    = 0x8400,
900         .emu_mask   = 0xFFFF,
901         .init       = xen_pt_common_reg_init,
902         .u.w.read   = xen_pt_word_reg_read,
903         .u.w.write  = xen_pt_word_reg_write,
904     },
905     /* Device Status reg */
906     {
907         .offset     = PCI_EXP_DEVSTA,
908         .size       = 2,
909         .res_mask   = 0xFFC0,
910         .ro_mask    = 0x0030,
911         .init       = xen_pt_common_reg_init,
912         .u.w.read   = xen_pt_word_reg_read,
913         .u.w.write  = xen_pt_word_reg_write,
914     },
915     /* Link Control reg */
916     {
917         .offset     = PCI_EXP_LNKCTL,
918         .size       = 2,
919         .init_val   = 0x0000,
920         .ro_mask    = 0xFC34,
921         .emu_mask   = 0xFFFF,
922         .init       = xen_pt_linkctrl_reg_init,
923         .u.w.read   = xen_pt_word_reg_read,
924         .u.w.write  = xen_pt_word_reg_write,
925     },
926     /* Link Status reg */
927     {
928         .offset     = PCI_EXP_LNKSTA,
929         .size       = 2,
930         .ro_mask    = 0x3FFF,
931         .init       = xen_pt_common_reg_init,
932         .u.w.read   = xen_pt_word_reg_read,
933         .u.w.write  = xen_pt_word_reg_write,
934     },
935     /* Device Control 2 reg */
936     {
937         .offset     = 0x28,
938         .size       = 2,
939         .init_val   = 0x0000,
940         .ro_mask    = 0xFFE0,
941         .emu_mask   = 0xFFFF,
942         .init       = xen_pt_devctrl2_reg_init,
943         .u.w.read   = xen_pt_word_reg_read,
944         .u.w.write  = xen_pt_word_reg_write,
945     },
946     /* Link Control 2 reg */
947     {
948         .offset     = 0x30,
949         .size       = 2,
950         .init_val   = 0x0000,
951         .ro_mask    = 0xE040,
952         .emu_mask   = 0xFFFF,
953         .init       = xen_pt_linkctrl2_reg_init,
954         .u.w.read   = xen_pt_word_reg_read,
955         .u.w.write  = xen_pt_word_reg_write,
956     },
957     {
958         .size = 0,
959     },
960 };
961 
962 
963 /*********************************
964  * Power Management Capability
965  */
966 
967 /* write Power Management Control/Status register */
968 static int xen_pt_pmcsr_reg_write(XenPCIPassthroughState *s,
969                                   XenPTReg *cfg_entry, uint16_t *val,
970                                   uint16_t dev_value, uint16_t valid_mask)
971 {
972     XenPTRegInfo *reg = cfg_entry->reg;
973     uint16_t writable_mask = 0;
974     uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
975 
976     /* modify emulate register */
977     writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
978     cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
979 
980     /* create value for writing to I/O device register */
981     *val = XEN_PT_MERGE_VALUE(*val, dev_value & ~PCI_PM_CTRL_PME_STATUS,
982                               throughable_mask);
983 
984     return 0;
985 }
986 
987 /* Power Management Capability reg static information table */
988 static XenPTRegInfo xen_pt_emu_reg_pm[] = {
989     /* Next Pointer reg */
990     {
991         .offset     = PCI_CAP_LIST_NEXT,
992         .size       = 1,
993         .init_val   = 0x00,
994         .ro_mask    = 0xFF,
995         .emu_mask   = 0xFF,
996         .init       = xen_pt_ptr_reg_init,
997         .u.b.read   = xen_pt_byte_reg_read,
998         .u.b.write  = xen_pt_byte_reg_write,
999     },
1000     /* Power Management Capabilities reg */
1001     {
1002         .offset     = PCI_CAP_FLAGS,
1003         .size       = 2,
1004         .init_val   = 0x0000,
1005         .ro_mask    = 0xFFFF,
1006         .emu_mask   = 0xF9C8,
1007         .init       = xen_pt_common_reg_init,
1008         .u.w.read   = xen_pt_word_reg_read,
1009         .u.w.write  = xen_pt_word_reg_write,
1010     },
1011     /* PCI Power Management Control/Status reg */
1012     {
1013         .offset     = PCI_PM_CTRL,
1014         .size       = 2,
1015         .init_val   = 0x0008,
1016         .res_mask   = 0x00F0,
1017         .ro_mask    = 0xE10C,
1018         .emu_mask   = 0x810B,
1019         .init       = xen_pt_common_reg_init,
1020         .u.w.read   = xen_pt_word_reg_read,
1021         .u.w.write  = xen_pt_pmcsr_reg_write,
1022     },
1023     {
1024         .size = 0,
1025     },
1026 };
1027 
1028 
1029 /********************************
1030  * MSI Capability
1031  */
1032 
1033 /* Helper */
1034 #define xen_pt_msi_check_type(offset, flags, what) \
1035         ((offset) == ((flags) & PCI_MSI_FLAGS_64BIT ? \
1036                       PCI_MSI_##what##_64 : PCI_MSI_##what##_32))
1037 
1038 /* Message Control register */
1039 static int xen_pt_msgctrl_reg_init(XenPCIPassthroughState *s,
1040                                    XenPTRegInfo *reg, uint32_t real_offset,
1041                                    uint32_t *data)
1042 {
1043     PCIDevice *d = &s->dev;
1044     XenPTMSI *msi = s->msi;
1045     uint16_t reg_field = 0;
1046 
1047     /* use I/O device register's value as initial value */
1048     reg_field = pci_get_word(d->config + real_offset);
1049 
1050     if (reg_field & PCI_MSI_FLAGS_ENABLE) {
1051         XEN_PT_LOG(&s->dev, "MSI already enabled, disabling it first\n");
1052         xen_host_pci_set_word(&s->real_device, real_offset,
1053                               reg_field & ~PCI_MSI_FLAGS_ENABLE);
1054     }
1055     msi->flags |= reg_field;
1056     msi->ctrl_offset = real_offset;
1057     msi->initialized = false;
1058     msi->mapped = false;
1059 
1060     *data = reg->init_val;
1061     return 0;
1062 }
1063 static int xen_pt_msgctrl_reg_write(XenPCIPassthroughState *s,
1064                                     XenPTReg *cfg_entry, uint16_t *val,
1065                                     uint16_t dev_value, uint16_t valid_mask)
1066 {
1067     XenPTRegInfo *reg = cfg_entry->reg;
1068     XenPTMSI *msi = s->msi;
1069     uint16_t writable_mask = 0;
1070     uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
1071 
1072     /* Currently no support for multi-vector */
1073     if (*val & PCI_MSI_FLAGS_QSIZE) {
1074         XEN_PT_WARN(&s->dev, "Tries to set more than 1 vector ctrl %x\n", *val);
1075     }
1076 
1077     /* modify emulate register */
1078     writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
1079     cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
1080     msi->flags |= cfg_entry->data & ~PCI_MSI_FLAGS_ENABLE;
1081 
1082     /* create value for writing to I/O device register */
1083     *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
1084 
1085     /* update MSI */
1086     if (*val & PCI_MSI_FLAGS_ENABLE) {
1087         /* setup MSI pirq for the first time */
1088         if (!msi->initialized) {
1089             /* Init physical one */
1090             XEN_PT_LOG(&s->dev, "setup MSI\n");
1091             if (xen_pt_msi_setup(s)) {
1092                 /* We do not broadcast the error to the framework code, so
1093                  * that MSI errors are contained in MSI emulation code and
1094                  * QEMU can go on running.
1095                  * Guest MSI would be actually not working.
1096                  */
1097                 *val &= ~PCI_MSI_FLAGS_ENABLE;
1098                 XEN_PT_WARN(&s->dev, "Can not map MSI.\n");
1099                 return 0;
1100             }
1101             if (xen_pt_msi_update(s)) {
1102                 *val &= ~PCI_MSI_FLAGS_ENABLE;
1103                 XEN_PT_WARN(&s->dev, "Can not bind MSI\n");
1104                 return 0;
1105             }
1106             msi->initialized = true;
1107             msi->mapped = true;
1108         }
1109         msi->flags |= PCI_MSI_FLAGS_ENABLE;
1110     } else if (msi->mapped) {
1111         xen_pt_msi_disable(s);
1112     }
1113 
1114     return 0;
1115 }
1116 
1117 /* initialize Message Upper Address register */
1118 static int xen_pt_msgaddr64_reg_init(XenPCIPassthroughState *s,
1119                                      XenPTRegInfo *reg, uint32_t real_offset,
1120                                      uint32_t *data)
1121 {
1122     /* no need to initialize in case of 32 bit type */
1123     if (!(s->msi->flags & PCI_MSI_FLAGS_64BIT)) {
1124         *data = XEN_PT_INVALID_REG;
1125     } else {
1126         *data = reg->init_val;
1127     }
1128 
1129     return 0;
1130 }
1131 /* this function will be called twice (for 32 bit and 64 bit type) */
1132 /* initialize Message Data register */
1133 static int xen_pt_msgdata_reg_init(XenPCIPassthroughState *s,
1134                                    XenPTRegInfo *reg, uint32_t real_offset,
1135                                    uint32_t *data)
1136 {
1137     uint32_t flags = s->msi->flags;
1138     uint32_t offset = reg->offset;
1139 
1140     /* check the offset whether matches the type or not */
1141     if (xen_pt_msi_check_type(offset, flags, DATA)) {
1142         *data = reg->init_val;
1143     } else {
1144         *data = XEN_PT_INVALID_REG;
1145     }
1146     return 0;
1147 }
1148 
1149 /* this function will be called twice (for 32 bit and 64 bit type) */
1150 /* initialize Mask register */
1151 static int xen_pt_mask_reg_init(XenPCIPassthroughState *s,
1152                                 XenPTRegInfo *reg, uint32_t real_offset,
1153                                 uint32_t *data)
1154 {
1155     uint32_t flags = s->msi->flags;
1156 
1157     /* check the offset whether matches the type or not */
1158     if (!(flags & PCI_MSI_FLAGS_MASKBIT)) {
1159         *data = XEN_PT_INVALID_REG;
1160     } else if (xen_pt_msi_check_type(reg->offset, flags, MASK)) {
1161         *data = reg->init_val;
1162     } else {
1163         *data = XEN_PT_INVALID_REG;
1164     }
1165     return 0;
1166 }
1167 
1168 /* this function will be called twice (for 32 bit and 64 bit type) */
1169 /* initialize Pending register */
1170 static int xen_pt_pending_reg_init(XenPCIPassthroughState *s,
1171                                    XenPTRegInfo *reg, uint32_t real_offset,
1172                                    uint32_t *data)
1173 {
1174     uint32_t flags = s->msi->flags;
1175 
1176     /* check the offset whether matches the type or not */
1177     if (!(flags & PCI_MSI_FLAGS_MASKBIT)) {
1178         *data = XEN_PT_INVALID_REG;
1179     } else if (xen_pt_msi_check_type(reg->offset, flags, PENDING)) {
1180         *data = reg->init_val;
1181     } else {
1182         *data = XEN_PT_INVALID_REG;
1183     }
1184     return 0;
1185 }
1186 
1187 /* write Message Address register */
1188 static int xen_pt_msgaddr32_reg_write(XenPCIPassthroughState *s,
1189                                       XenPTReg *cfg_entry, uint32_t *val,
1190                                       uint32_t dev_value, uint32_t valid_mask)
1191 {
1192     XenPTRegInfo *reg = cfg_entry->reg;
1193     uint32_t writable_mask = 0;
1194     uint32_t old_addr = cfg_entry->data;
1195 
1196     /* modify emulate register */
1197     writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
1198     cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
1199     s->msi->addr_lo = cfg_entry->data;
1200 
1201     /* create value for writing to I/O device register */
1202     *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0);
1203 
1204     /* update MSI */
1205     if (cfg_entry->data != old_addr) {
1206         if (s->msi->mapped) {
1207             xen_pt_msi_update(s);
1208         }
1209     }
1210 
1211     return 0;
1212 }
1213 /* write Message Upper Address register */
1214 static int xen_pt_msgaddr64_reg_write(XenPCIPassthroughState *s,
1215                                       XenPTReg *cfg_entry, uint32_t *val,
1216                                       uint32_t dev_value, uint32_t valid_mask)
1217 {
1218     XenPTRegInfo *reg = cfg_entry->reg;
1219     uint32_t writable_mask = 0;
1220     uint32_t old_addr = cfg_entry->data;
1221 
1222     /* check whether the type is 64 bit or not */
1223     if (!(s->msi->flags & PCI_MSI_FLAGS_64BIT)) {
1224         XEN_PT_ERR(&s->dev,
1225                    "Can't write to the upper address without 64 bit support\n");
1226         return -1;
1227     }
1228 
1229     /* modify emulate register */
1230     writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
1231     cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
1232     /* update the msi_info too */
1233     s->msi->addr_hi = cfg_entry->data;
1234 
1235     /* create value for writing to I/O device register */
1236     *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0);
1237 
1238     /* update MSI */
1239     if (cfg_entry->data != old_addr) {
1240         if (s->msi->mapped) {
1241             xen_pt_msi_update(s);
1242         }
1243     }
1244 
1245     return 0;
1246 }
1247 
1248 
1249 /* this function will be called twice (for 32 bit and 64 bit type) */
1250 /* write Message Data register */
1251 static int xen_pt_msgdata_reg_write(XenPCIPassthroughState *s,
1252                                     XenPTReg *cfg_entry, uint16_t *val,
1253                                     uint16_t dev_value, uint16_t valid_mask)
1254 {
1255     XenPTRegInfo *reg = cfg_entry->reg;
1256     XenPTMSI *msi = s->msi;
1257     uint16_t writable_mask = 0;
1258     uint16_t old_data = cfg_entry->data;
1259     uint32_t offset = reg->offset;
1260 
1261     /* check the offset whether matches the type or not */
1262     if (!xen_pt_msi_check_type(offset, msi->flags, DATA)) {
1263         /* exit I/O emulator */
1264         XEN_PT_ERR(&s->dev, "the offset does not match the 32/64 bit type!\n");
1265         return -1;
1266     }
1267 
1268     /* modify emulate register */
1269     writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
1270     cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
1271     /* update the msi_info too */
1272     msi->data = cfg_entry->data;
1273 
1274     /* create value for writing to I/O device register */
1275     *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0);
1276 
1277     /* update MSI */
1278     if (cfg_entry->data != old_data) {
1279         if (msi->mapped) {
1280             xen_pt_msi_update(s);
1281         }
1282     }
1283 
1284     return 0;
1285 }
1286 
1287 /* MSI Capability Structure reg static information table */
1288 static XenPTRegInfo xen_pt_emu_reg_msi[] = {
1289     /* Next Pointer reg */
1290     {
1291         .offset     = PCI_CAP_LIST_NEXT,
1292         .size       = 1,
1293         .init_val   = 0x00,
1294         .ro_mask    = 0xFF,
1295         .emu_mask   = 0xFF,
1296         .init       = xen_pt_ptr_reg_init,
1297         .u.b.read   = xen_pt_byte_reg_read,
1298         .u.b.write  = xen_pt_byte_reg_write,
1299     },
1300     /* Message Control reg */
1301     {
1302         .offset     = PCI_MSI_FLAGS,
1303         .size       = 2,
1304         .init_val   = 0x0000,
1305         .res_mask   = 0xFE00,
1306         .ro_mask    = 0x018E,
1307         .emu_mask   = 0x017E,
1308         .init       = xen_pt_msgctrl_reg_init,
1309         .u.w.read   = xen_pt_word_reg_read,
1310         .u.w.write  = xen_pt_msgctrl_reg_write,
1311     },
1312     /* Message Address reg */
1313     {
1314         .offset     = PCI_MSI_ADDRESS_LO,
1315         .size       = 4,
1316         .init_val   = 0x00000000,
1317         .ro_mask    = 0x00000003,
1318         .emu_mask   = 0xFFFFFFFF,
1319         .init       = xen_pt_common_reg_init,
1320         .u.dw.read  = xen_pt_long_reg_read,
1321         .u.dw.write = xen_pt_msgaddr32_reg_write,
1322     },
1323     /* Message Upper Address reg (if PCI_MSI_FLAGS_64BIT set) */
1324     {
1325         .offset     = PCI_MSI_ADDRESS_HI,
1326         .size       = 4,
1327         .init_val   = 0x00000000,
1328         .ro_mask    = 0x00000000,
1329         .emu_mask   = 0xFFFFFFFF,
1330         .init       = xen_pt_msgaddr64_reg_init,
1331         .u.dw.read  = xen_pt_long_reg_read,
1332         .u.dw.write = xen_pt_msgaddr64_reg_write,
1333     },
1334     /* Message Data reg (16 bits of data for 32-bit devices) */
1335     {
1336         .offset     = PCI_MSI_DATA_32,
1337         .size       = 2,
1338         .init_val   = 0x0000,
1339         .ro_mask    = 0x0000,
1340         .emu_mask   = 0xFFFF,
1341         .init       = xen_pt_msgdata_reg_init,
1342         .u.w.read   = xen_pt_word_reg_read,
1343         .u.w.write  = xen_pt_msgdata_reg_write,
1344     },
1345     /* Message Data reg (16 bits of data for 64-bit devices) */
1346     {
1347         .offset     = PCI_MSI_DATA_64,
1348         .size       = 2,
1349         .init_val   = 0x0000,
1350         .ro_mask    = 0x0000,
1351         .emu_mask   = 0xFFFF,
1352         .init       = xen_pt_msgdata_reg_init,
1353         .u.w.read   = xen_pt_word_reg_read,
1354         .u.w.write  = xen_pt_msgdata_reg_write,
1355     },
1356     /* Mask reg (if PCI_MSI_FLAGS_MASKBIT set, for 32-bit devices) */
1357     {
1358         .offset     = PCI_MSI_MASK_32,
1359         .size       = 4,
1360         .init_val   = 0x00000000,
1361         .ro_mask    = 0xFFFFFFFF,
1362         .emu_mask   = 0xFFFFFFFF,
1363         .init       = xen_pt_mask_reg_init,
1364         .u.dw.read  = xen_pt_long_reg_read,
1365         .u.dw.write = xen_pt_long_reg_write,
1366     },
1367     /* Mask reg (if PCI_MSI_FLAGS_MASKBIT set, for 64-bit devices) */
1368     {
1369         .offset     = PCI_MSI_MASK_64,
1370         .size       = 4,
1371         .init_val   = 0x00000000,
1372         .ro_mask    = 0xFFFFFFFF,
1373         .emu_mask   = 0xFFFFFFFF,
1374         .init       = xen_pt_mask_reg_init,
1375         .u.dw.read  = xen_pt_long_reg_read,
1376         .u.dw.write = xen_pt_long_reg_write,
1377     },
1378     /* Pending reg (if PCI_MSI_FLAGS_MASKBIT set, for 32-bit devices) */
1379     {
1380         .offset     = PCI_MSI_MASK_32 + 4,
1381         .size       = 4,
1382         .init_val   = 0x00000000,
1383         .ro_mask    = 0xFFFFFFFF,
1384         .emu_mask   = 0x00000000,
1385         .init       = xen_pt_pending_reg_init,
1386         .u.dw.read  = xen_pt_long_reg_read,
1387         .u.dw.write = xen_pt_long_reg_write,
1388     },
1389     /* Pending reg (if PCI_MSI_FLAGS_MASKBIT set, for 64-bit devices) */
1390     {
1391         .offset     = PCI_MSI_MASK_64 + 4,
1392         .size       = 4,
1393         .init_val   = 0x00000000,
1394         .ro_mask    = 0xFFFFFFFF,
1395         .emu_mask   = 0x00000000,
1396         .init       = xen_pt_pending_reg_init,
1397         .u.dw.read  = xen_pt_long_reg_read,
1398         .u.dw.write = xen_pt_long_reg_write,
1399     },
1400     {
1401         .size = 0,
1402     },
1403 };
1404 
1405 
1406 /**************************************
1407  * MSI-X Capability
1408  */
1409 
1410 /* Message Control register for MSI-X */
1411 static int xen_pt_msixctrl_reg_init(XenPCIPassthroughState *s,
1412                                     XenPTRegInfo *reg, uint32_t real_offset,
1413                                     uint32_t *data)
1414 {
1415     PCIDevice *d = &s->dev;
1416     uint16_t reg_field = 0;
1417 
1418     /* use I/O device register's value as initial value */
1419     reg_field = pci_get_word(d->config + real_offset);
1420 
1421     if (reg_field & PCI_MSIX_FLAGS_ENABLE) {
1422         XEN_PT_LOG(d, "MSIX already enabled, disabling it first\n");
1423         xen_host_pci_set_word(&s->real_device, real_offset,
1424                               reg_field & ~PCI_MSIX_FLAGS_ENABLE);
1425     }
1426 
1427     s->msix->ctrl_offset = real_offset;
1428 
1429     *data = reg->init_val;
1430     return 0;
1431 }
1432 static int xen_pt_msixctrl_reg_write(XenPCIPassthroughState *s,
1433                                      XenPTReg *cfg_entry, uint16_t *val,
1434                                      uint16_t dev_value, uint16_t valid_mask)
1435 {
1436     XenPTRegInfo *reg = cfg_entry->reg;
1437     uint16_t writable_mask = 0;
1438     uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
1439     int debug_msix_enabled_old;
1440 
1441     /* modify emulate register */
1442     writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
1443     cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
1444 
1445     /* create value for writing to I/O device register */
1446     *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
1447 
1448     /* update MSI-X */
1449     if ((*val & PCI_MSIX_FLAGS_ENABLE)
1450         && !(*val & PCI_MSIX_FLAGS_MASKALL)) {
1451         xen_pt_msix_update(s);
1452     } else if (!(*val & PCI_MSIX_FLAGS_ENABLE) && s->msix->enabled) {
1453         xen_pt_msix_disable(s);
1454     }
1455 
1456     debug_msix_enabled_old = s->msix->enabled;
1457     s->msix->enabled = !!(*val & PCI_MSIX_FLAGS_ENABLE);
1458     if (s->msix->enabled != debug_msix_enabled_old) {
1459         XEN_PT_LOG(&s->dev, "%s MSI-X\n",
1460                    s->msix->enabled ? "enable" : "disable");
1461     }
1462 
1463     return 0;
1464 }
1465 
1466 /* MSI-X Capability Structure reg static information table */
1467 static XenPTRegInfo xen_pt_emu_reg_msix[] = {
1468     /* Next Pointer reg */
1469     {
1470         .offset     = PCI_CAP_LIST_NEXT,
1471         .size       = 1,
1472         .init_val   = 0x00,
1473         .ro_mask    = 0xFF,
1474         .emu_mask   = 0xFF,
1475         .init       = xen_pt_ptr_reg_init,
1476         .u.b.read   = xen_pt_byte_reg_read,
1477         .u.b.write  = xen_pt_byte_reg_write,
1478     },
1479     /* Message Control reg */
1480     {
1481         .offset     = PCI_MSI_FLAGS,
1482         .size       = 2,
1483         .init_val   = 0x0000,
1484         .res_mask   = 0x3800,
1485         .ro_mask    = 0x07FF,
1486         .emu_mask   = 0x0000,
1487         .init       = xen_pt_msixctrl_reg_init,
1488         .u.w.read   = xen_pt_word_reg_read,
1489         .u.w.write  = xen_pt_msixctrl_reg_write,
1490     },
1491     {
1492         .size = 0,
1493     },
1494 };
1495 
1496 
1497 /****************************
1498  * Capabilities
1499  */
1500 
1501 /* capability structure register group size functions */
1502 
1503 static int xen_pt_reg_grp_size_init(XenPCIPassthroughState *s,
1504                                     const XenPTRegGroupInfo *grp_reg,
1505                                     uint32_t base_offset, uint8_t *size)
1506 {
1507     *size = grp_reg->grp_size;
1508     return 0;
1509 }
1510 /* get Vendor Specific Capability Structure register group size */
1511 static int xen_pt_vendor_size_init(XenPCIPassthroughState *s,
1512                                    const XenPTRegGroupInfo *grp_reg,
1513                                    uint32_t base_offset, uint8_t *size)
1514 {
1515     *size = pci_get_byte(s->dev.config + base_offset + 0x02);
1516     return 0;
1517 }
1518 /* get PCI Express Capability Structure register group size */
1519 static int xen_pt_pcie_size_init(XenPCIPassthroughState *s,
1520                                  const XenPTRegGroupInfo *grp_reg,
1521                                  uint32_t base_offset, uint8_t *size)
1522 {
1523     PCIDevice *d = &s->dev;
1524     uint8_t version = get_capability_version(s, base_offset);
1525     uint8_t type = get_device_type(s, base_offset);
1526     uint8_t pcie_size = 0;
1527 
1528 
1529     /* calculate size depend on capability version and device/port type */
1530     /* in case of PCI Express Base Specification Rev 1.x */
1531     if (version == 1) {
1532         /* The PCI Express Capabilities, Device Capabilities, and Device
1533          * Status/Control registers are required for all PCI Express devices.
1534          * The Link Capabilities and Link Status/Control are required for all
1535          * Endpoints that are not Root Complex Integrated Endpoints. Endpoints
1536          * are not required to implement registers other than those listed
1537          * above and terminate the capability structure.
1538          */
1539         switch (type) {
1540         case PCI_EXP_TYPE_ENDPOINT:
1541         case PCI_EXP_TYPE_LEG_END:
1542             pcie_size = 0x14;
1543             break;
1544         case PCI_EXP_TYPE_RC_END:
1545             /* has no link */
1546             pcie_size = 0x0C;
1547             break;
1548             /* only EndPoint passthrough is supported */
1549         case PCI_EXP_TYPE_ROOT_PORT:
1550         case PCI_EXP_TYPE_UPSTREAM:
1551         case PCI_EXP_TYPE_DOWNSTREAM:
1552         case PCI_EXP_TYPE_PCI_BRIDGE:
1553         case PCI_EXP_TYPE_PCIE_BRIDGE:
1554         case PCI_EXP_TYPE_RC_EC:
1555         default:
1556             XEN_PT_ERR(d, "Unsupported device/port type %#x.\n", type);
1557             return -1;
1558         }
1559     }
1560     /* in case of PCI Express Base Specification Rev 2.0 */
1561     else if (version == 2) {
1562         switch (type) {
1563         case PCI_EXP_TYPE_ENDPOINT:
1564         case PCI_EXP_TYPE_LEG_END:
1565         case PCI_EXP_TYPE_RC_END:
1566             /* For Functions that do not implement the registers,
1567              * these spaces must be hardwired to 0b.
1568              */
1569             pcie_size = 0x3C;
1570             break;
1571             /* only EndPoint passthrough is supported */
1572         case PCI_EXP_TYPE_ROOT_PORT:
1573         case PCI_EXP_TYPE_UPSTREAM:
1574         case PCI_EXP_TYPE_DOWNSTREAM:
1575         case PCI_EXP_TYPE_PCI_BRIDGE:
1576         case PCI_EXP_TYPE_PCIE_BRIDGE:
1577         case PCI_EXP_TYPE_RC_EC:
1578         default:
1579             XEN_PT_ERR(d, "Unsupported device/port type %#x.\n", type);
1580             return -1;
1581         }
1582     } else {
1583         XEN_PT_ERR(d, "Unsupported capability version %#x.\n", version);
1584         return -1;
1585     }
1586 
1587     *size = pcie_size;
1588     return 0;
1589 }
1590 /* get MSI Capability Structure register group size */
1591 static int xen_pt_msi_size_init(XenPCIPassthroughState *s,
1592                                 const XenPTRegGroupInfo *grp_reg,
1593                                 uint32_t base_offset, uint8_t *size)
1594 {
1595     PCIDevice *d = &s->dev;
1596     uint16_t msg_ctrl = 0;
1597     uint8_t msi_size = 0xa;
1598 
1599     msg_ctrl = pci_get_word(d->config + (base_offset + PCI_MSI_FLAGS));
1600 
1601     /* check if 64-bit address is capable of per-vector masking */
1602     if (msg_ctrl & PCI_MSI_FLAGS_64BIT) {
1603         msi_size += 4;
1604     }
1605     if (msg_ctrl & PCI_MSI_FLAGS_MASKBIT) {
1606         msi_size += 10;
1607     }
1608 
1609     s->msi = g_new0(XenPTMSI, 1);
1610     s->msi->pirq = XEN_PT_UNASSIGNED_PIRQ;
1611 
1612     *size = msi_size;
1613     return 0;
1614 }
1615 /* get MSI-X Capability Structure register group size */
1616 static int xen_pt_msix_size_init(XenPCIPassthroughState *s,
1617                                  const XenPTRegGroupInfo *grp_reg,
1618                                  uint32_t base_offset, uint8_t *size)
1619 {
1620     int rc = 0;
1621 
1622     rc = xen_pt_msix_init(s, base_offset);
1623 
1624     if (rc < 0) {
1625         XEN_PT_ERR(&s->dev, "Internal error: Invalid xen_pt_msix_init.\n");
1626         return rc;
1627     }
1628 
1629     *size = grp_reg->grp_size;
1630     return 0;
1631 }
1632 
1633 
1634 static const XenPTRegGroupInfo xen_pt_emu_reg_grps[] = {
1635     /* Header Type0 reg group */
1636     {
1637         .grp_id      = 0xFF,
1638         .grp_type    = XEN_PT_GRP_TYPE_EMU,
1639         .grp_size    = 0x40,
1640         .size_init   = xen_pt_reg_grp_size_init,
1641         .emu_regs = xen_pt_emu_reg_header0,
1642     },
1643     /* PCI PowerManagement Capability reg group */
1644     {
1645         .grp_id      = PCI_CAP_ID_PM,
1646         .grp_type    = XEN_PT_GRP_TYPE_EMU,
1647         .grp_size    = PCI_PM_SIZEOF,
1648         .size_init   = xen_pt_reg_grp_size_init,
1649         .emu_regs = xen_pt_emu_reg_pm,
1650     },
1651     /* AGP Capability Structure reg group */
1652     {
1653         .grp_id     = PCI_CAP_ID_AGP,
1654         .grp_type   = XEN_PT_GRP_TYPE_HARDWIRED,
1655         .grp_size   = 0x30,
1656         .size_init  = xen_pt_reg_grp_size_init,
1657     },
1658     /* Vital Product Data Capability Structure reg group */
1659     {
1660         .grp_id      = PCI_CAP_ID_VPD,
1661         .grp_type    = XEN_PT_GRP_TYPE_EMU,
1662         .grp_size    = 0x08,
1663         .size_init   = xen_pt_reg_grp_size_init,
1664         .emu_regs = xen_pt_emu_reg_vpd,
1665     },
1666     /* Slot Identification reg group */
1667     {
1668         .grp_id     = PCI_CAP_ID_SLOTID,
1669         .grp_type   = XEN_PT_GRP_TYPE_HARDWIRED,
1670         .grp_size   = 0x04,
1671         .size_init  = xen_pt_reg_grp_size_init,
1672     },
1673     /* MSI Capability Structure reg group */
1674     {
1675         .grp_id      = PCI_CAP_ID_MSI,
1676         .grp_type    = XEN_PT_GRP_TYPE_EMU,
1677         .grp_size    = 0xFF,
1678         .size_init   = xen_pt_msi_size_init,
1679         .emu_regs = xen_pt_emu_reg_msi,
1680     },
1681     /* PCI-X Capabilities List Item reg group */
1682     {
1683         .grp_id     = PCI_CAP_ID_PCIX,
1684         .grp_type   = XEN_PT_GRP_TYPE_HARDWIRED,
1685         .grp_size   = 0x18,
1686         .size_init  = xen_pt_reg_grp_size_init,
1687     },
1688     /* Vendor Specific Capability Structure reg group */
1689     {
1690         .grp_id      = PCI_CAP_ID_VNDR,
1691         .grp_type    = XEN_PT_GRP_TYPE_EMU,
1692         .grp_size    = 0xFF,
1693         .size_init   = xen_pt_vendor_size_init,
1694         .emu_regs = xen_pt_emu_reg_vendor,
1695     },
1696     /* SHPC Capability List Item reg group */
1697     {
1698         .grp_id     = PCI_CAP_ID_SHPC,
1699         .grp_type   = XEN_PT_GRP_TYPE_HARDWIRED,
1700         .grp_size   = 0x08,
1701         .size_init  = xen_pt_reg_grp_size_init,
1702     },
1703     /* Subsystem ID and Subsystem Vendor ID Capability List Item reg group */
1704     {
1705         .grp_id     = PCI_CAP_ID_SSVID,
1706         .grp_type   = XEN_PT_GRP_TYPE_HARDWIRED,
1707         .grp_size   = 0x08,
1708         .size_init  = xen_pt_reg_grp_size_init,
1709     },
1710     /* AGP 8x Capability Structure reg group */
1711     {
1712         .grp_id     = PCI_CAP_ID_AGP3,
1713         .grp_type   = XEN_PT_GRP_TYPE_HARDWIRED,
1714         .grp_size   = 0x30,
1715         .size_init  = xen_pt_reg_grp_size_init,
1716     },
1717     /* PCI Express Capability Structure reg group */
1718     {
1719         .grp_id      = PCI_CAP_ID_EXP,
1720         .grp_type    = XEN_PT_GRP_TYPE_EMU,
1721         .grp_size    = 0xFF,
1722         .size_init   = xen_pt_pcie_size_init,
1723         .emu_regs = xen_pt_emu_reg_pcie,
1724     },
1725     /* MSI-X Capability Structure reg group */
1726     {
1727         .grp_id      = PCI_CAP_ID_MSIX,
1728         .grp_type    = XEN_PT_GRP_TYPE_EMU,
1729         .grp_size    = 0x0C,
1730         .size_init   = xen_pt_msix_size_init,
1731         .emu_regs = xen_pt_emu_reg_msix,
1732     },
1733     {
1734         .grp_size = 0,
1735     },
1736 };
1737 
1738 /* initialize Capabilities Pointer or Next Pointer register */
1739 static int xen_pt_ptr_reg_init(XenPCIPassthroughState *s,
1740                                XenPTRegInfo *reg, uint32_t real_offset,
1741                                uint32_t *data)
1742 {
1743     int i;
1744     uint8_t *config = s->dev.config;
1745     uint32_t reg_field = pci_get_byte(config + real_offset);
1746     uint8_t cap_id = 0;
1747 
1748     /* find capability offset */
1749     while (reg_field) {
1750         for (i = 0; xen_pt_emu_reg_grps[i].grp_size != 0; i++) {
1751             if (xen_pt_hide_dev_cap(&s->real_device,
1752                                     xen_pt_emu_reg_grps[i].grp_id)) {
1753                 continue;
1754             }
1755 
1756             cap_id = pci_get_byte(config + reg_field + PCI_CAP_LIST_ID);
1757             if (xen_pt_emu_reg_grps[i].grp_id == cap_id) {
1758                 if (xen_pt_emu_reg_grps[i].grp_type == XEN_PT_GRP_TYPE_EMU) {
1759                     goto out;
1760                 }
1761                 /* ignore the 0 hardwired capability, find next one */
1762                 break;
1763             }
1764         }
1765 
1766         /* next capability */
1767         reg_field = pci_get_byte(config + reg_field + PCI_CAP_LIST_NEXT);
1768     }
1769 
1770 out:
1771     *data = reg_field;
1772     return 0;
1773 }
1774 
1775 
1776 /*************
1777  * Main
1778  */
1779 
1780 static uint8_t find_cap_offset(XenPCIPassthroughState *s, uint8_t cap)
1781 {
1782     uint8_t id;
1783     unsigned max_cap = PCI_CAP_MAX;
1784     uint8_t pos = PCI_CAPABILITY_LIST;
1785     uint8_t status = 0;
1786 
1787     if (xen_host_pci_get_byte(&s->real_device, PCI_STATUS, &status)) {
1788         return 0;
1789     }
1790     if ((status & PCI_STATUS_CAP_LIST) == 0) {
1791         return 0;
1792     }
1793 
1794     while (max_cap--) {
1795         if (xen_host_pci_get_byte(&s->real_device, pos, &pos)) {
1796             break;
1797         }
1798         if (pos < PCI_CONFIG_HEADER_SIZE) {
1799             break;
1800         }
1801 
1802         pos &= ~3;
1803         if (xen_host_pci_get_byte(&s->real_device,
1804                                   pos + PCI_CAP_LIST_ID, &id)) {
1805             break;
1806         }
1807 
1808         if (id == 0xff) {
1809             break;
1810         }
1811         if (id == cap) {
1812             return pos;
1813         }
1814 
1815         pos += PCI_CAP_LIST_NEXT;
1816     }
1817     return 0;
1818 }
1819 
1820 static int xen_pt_config_reg_init(XenPCIPassthroughState *s,
1821                                   XenPTRegGroup *reg_grp, XenPTRegInfo *reg)
1822 {
1823     XenPTReg *reg_entry;
1824     uint32_t data = 0;
1825     int rc = 0;
1826 
1827     reg_entry = g_new0(XenPTReg, 1);
1828     reg_entry->reg = reg;
1829 
1830     if (reg->init) {
1831         /* initialize emulate register */
1832         rc = reg->init(s, reg_entry->reg,
1833                        reg_grp->base_offset + reg->offset, &data);
1834         if (rc < 0) {
1835             g_free(reg_entry);
1836             return rc;
1837         }
1838         if (data == XEN_PT_INVALID_REG) {
1839             /* free unused BAR register entry */
1840             g_free(reg_entry);
1841             return 0;
1842         }
1843         /* set register value */
1844         reg_entry->data = data;
1845     }
1846     /* list add register entry */
1847     QLIST_INSERT_HEAD(&reg_grp->reg_tbl_list, reg_entry, entries);
1848 
1849     return 0;
1850 }
1851 
1852 int xen_pt_config_init(XenPCIPassthroughState *s)
1853 {
1854     int i, rc;
1855 
1856     QLIST_INIT(&s->reg_grps);
1857 
1858     for (i = 0; xen_pt_emu_reg_grps[i].grp_size != 0; i++) {
1859         uint32_t reg_grp_offset = 0;
1860         XenPTRegGroup *reg_grp_entry = NULL;
1861 
1862         if (xen_pt_emu_reg_grps[i].grp_id != 0xFF) {
1863             if (xen_pt_hide_dev_cap(&s->real_device,
1864                                     xen_pt_emu_reg_grps[i].grp_id)) {
1865                 continue;
1866             }
1867 
1868             reg_grp_offset = find_cap_offset(s, xen_pt_emu_reg_grps[i].grp_id);
1869 
1870             if (!reg_grp_offset) {
1871                 continue;
1872             }
1873         }
1874 
1875         reg_grp_entry = g_new0(XenPTRegGroup, 1);
1876         QLIST_INIT(&reg_grp_entry->reg_tbl_list);
1877         QLIST_INSERT_HEAD(&s->reg_grps, reg_grp_entry, entries);
1878 
1879         reg_grp_entry->base_offset = reg_grp_offset;
1880         reg_grp_entry->reg_grp = xen_pt_emu_reg_grps + i;
1881         if (xen_pt_emu_reg_grps[i].size_init) {
1882             /* get register group size */
1883             rc = xen_pt_emu_reg_grps[i].size_init(s, reg_grp_entry->reg_grp,
1884                                                   reg_grp_offset,
1885                                                   &reg_grp_entry->size);
1886             if (rc < 0) {
1887                 xen_pt_config_delete(s);
1888                 return rc;
1889             }
1890         }
1891 
1892         if (xen_pt_emu_reg_grps[i].grp_type == XEN_PT_GRP_TYPE_EMU) {
1893             if (xen_pt_emu_reg_grps[i].emu_regs) {
1894                 int j = 0;
1895                 XenPTRegInfo *regs = xen_pt_emu_reg_grps[i].emu_regs;
1896                 /* initialize capability register */
1897                 for (j = 0; regs->size != 0; j++, regs++) {
1898                     /* initialize capability register */
1899                     rc = xen_pt_config_reg_init(s, reg_grp_entry, regs);
1900                     if (rc < 0) {
1901                         xen_pt_config_delete(s);
1902                         return rc;
1903                     }
1904                 }
1905             }
1906         }
1907     }
1908 
1909     return 0;
1910 }
1911 
1912 /* delete all emulate register */
1913 void xen_pt_config_delete(XenPCIPassthroughState *s)
1914 {
1915     struct XenPTRegGroup *reg_group, *next_grp;
1916     struct XenPTReg *reg, *next_reg;
1917 
1918     /* free MSI/MSI-X info table */
1919     if (s->msix) {
1920         xen_pt_msix_delete(s);
1921     }
1922     if (s->msi) {
1923         g_free(s->msi);
1924     }
1925 
1926     /* free all register group entry */
1927     QLIST_FOREACH_SAFE(reg_group, &s->reg_grps, entries, next_grp) {
1928         /* free all register entry */
1929         QLIST_FOREACH_SAFE(reg, &reg_group->reg_tbl_list, entries, next_reg) {
1930             QLIST_REMOVE(reg, entries);
1931             g_free(reg);
1932         }
1933 
1934         QLIST_REMOVE(reg_group, entries);
1935         g_free(reg_group);
1936     }
1937 }
1938