Home
last modified time | relevance | path

Searched refs:cfg (Results 1 – 25 of 133) sorted by relevance

123456

/qemu/tests/qtest/
H A Dsifive-e-aon-watchdog-test.c196 cfg = FIELD_DP32(cfg, AON_WDT_WDOGCFG, SCALE, i); in test_scaled_wdogs()
218 cfg = FIELD_DP32(cfg, AON_WDT_WDOGCFG, SCALE, 0); in test_watchdog()
240 cfg = FIELD_DP32(cfg, AON_WDT_WDOGCFG, IP0, 0); in test_watchdog()
260 cfg = FIELD_DP32(cfg, AON_WDT_WDOGCFG, SCALE, 15); in test_scaled_watchdog()
282 cfg = FIELD_DP32(cfg, AON_WDT_WDOGCFG, IP0, 0); in test_scaled_watchdog()
302 cfg = FIELD_DP32(cfg, AON_WDT_WDOGCFG, SCALE, 0); in test_periodic_int()
303 cfg = FIELD_DP32(cfg, AON_WDT_WDOGCFG, ZEROCMP, 1); in test_periodic_int()
321 cfg = FIELD_DP32(cfg, AON_WDT_WDOGCFG, IP0, 0); in test_periodic_int()
340 cfg = FIELD_DP32(cfg, AON_WDT_WDOGCFG, IP0, 0); in test_periodic_int()
360 cfg = FIELD_DP32(cfg, AON_WDT_WDOGCFG, SCALE, 15); in test_enable_disable()
[all …]
/qemu/target/microblaze/
H A Dcpu.c235 if (cpu->cfg.addr_size < 32 || cpu->cfg.addr_size > 64) { in mb_cpu_realizefn()
243 version = cpu->cfg.version ? cpu->cfg.version : DEFAULT_CPU_VERSION; in mb_cpu_realizefn()
255 cpu->cfg.pvr_regs[0] = in mb_cpu_realizefn()
270 cpu->cfg.pvr_regs[1] = cpu->cfg.pvr_user2; in mb_cpu_realizefn()
272 cpu->cfg.pvr_regs[2] = in mb_cpu_realizefn()
293 cpu->cfg.pvr_regs[5] |= in mb_cpu_realizefn()
296 cpu->cfg.pvr_regs[10] = in mb_cpu_realizefn()
300 cpu->cfg.pvr_regs[11] = ((cpu->cfg.use_mmu ? PVR11_USE_MMU : 0) | in mb_cpu_realizefn()
303 cpu->cfg.mmu = 3; in mb_cpu_realizefn()
305 cpu->cfg.mmu_zones = 16; in mb_cpu_realizefn()
[all …]
H A Dmmu.c120 if (tlb_zsel > cpu->cfg.mmu_zones) { in mmu_translate()
126 if (cpu->cfg.mmu == 1) { in mmu_translate()
163 lu->paddr = tlb_rpn & cpu->cfg.addr_mask; in mmu_translate()
185 if (cpu->cfg.mmu < 2 || !cpu->cfg.mmu_tlb_access) { in mmu_read()
198 if (!(cpu->cfg.mmu_tlb_access & 1)) { in mmu_read()
211 if (!(cpu->cfg.mmu_tlb_access & 1)) { in mmu_read()
242 if (cpu->cfg.mmu < 2 || !cpu->cfg.mmu_tlb_access) { in mmu_write()
268 if (cpu->cfg.mmu_tlb_access <= 1) { in mmu_write()
282 if (cpu->cfg.mmu_tlb_access <= 1) { in mmu_write()
302 if (cpu->cfg.mmu_tlb_access <= 1) { in mmu_write()
/qemu/util/
H A Dthrottle.c221 memset(cfg, 0, sizeof(*cfg)); in throttle_config_init()
223 cfg->buckets[i].burst_length = 1; in throttle_config_init()
231 throttle_config_init(&ts->cfg); in throttle_init()
303 if (cfg->buckets[i].avg > 0) { in throttle_enabled()
344 if (cfg->op_size && in throttle_is_valid()
402 ts->cfg = *cfg; in throttle_config()
406 ts->cfg.buckets[i].level = 0; in throttle_config()
420 *cfg = ts->cfg; in throttle_get_config()
486 if (ts->cfg.op_size && size > ts->cfg.op_size) { in throttle_account()
605 cfg->op_size = arg->iops_size; in throttle_limits_to_config()
[all …]
/qemu/tests/unit/
H A Dtest-throttle.c28 static ThrottleConfig cfg; variable
42 throttle_config_init(&cfg); in test_leak_bucket()
91 throttle_config_init(&cfg); in test_compute_wait()
177 g_assert(!ts.cfg.op_size); in test_init()
209 g_assert(!ts.cfg.op_size); in test_init_readonly()
241 g_assert(!ts.cfg.op_size); in test_init_writeonly()
340 cfg.buckets[index].avg = MAX(cfg.buckets[index].avg, 1); in set_cfg_value()
350 throttle_config_init(&cfg); in test_enabled()
374 throttle_config_init(&cfg); in test_conflicts_for_one_set()
558 cfg.op_size = 4096; in test_iops_size_is_missing_limit()
[all …]
/qemu/fsdev/
H A Dqemu-fsdev-throttle.c36 throttle_config_init(&fst->cfg); in fsdev_throttle_parse_opts()
37 fst->cfg.buckets[THROTTLE_BPS_TOTAL].avg = in fsdev_throttle_parse_opts()
39 fst->cfg.buckets[THROTTLE_BPS_READ].avg = in fsdev_throttle_parse_opts()
41 fst->cfg.buckets[THROTTLE_BPS_WRITE].avg = in fsdev_throttle_parse_opts()
43 fst->cfg.buckets[THROTTLE_OPS_TOTAL].avg = in fsdev_throttle_parse_opts()
45 fst->cfg.buckets[THROTTLE_OPS_READ].avg = in fsdev_throttle_parse_opts()
58 fst->cfg.buckets[THROTTLE_OPS_READ].max = in fsdev_throttle_parse_opts()
75 fst->cfg.op_size = in fsdev_throttle_parse_opts()
83 if (throttle_enabled(&fst->cfg)) { in fsdev_throttle_init()
102 if (throttle_enabled(&fst->cfg)) { in fsdev_co_throttle_request()
[all …]
/qemu/target/riscv/tcg/
H A Dtcg-cpu.c276 if (cfg->elen > 64 || cfg->elen < 8) { in riscv_cpu_validate_v()
537 if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) { in riscv_cpu_validate_set_extensions()
557 if ((cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) && !cpu->cfg.ext_zfinx) { in riscv_cpu_validate_set_extensions()
610 if ((cpu->cfg.ext_zcf || cpu->cfg.ext_zcd || cpu->cfg.ext_zcb || in riscv_cpu_validate_set_extensions()
611 cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt) && !cpu->cfg.ext_zca) { in riscv_cpu_validate_set_extensions()
617 if (cpu->cfg.ext_zcd && (cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt)) { in riscv_cpu_validate_set_extensions()
623 if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_zicsr) { in riscv_cpu_validate_set_extensions()
670 if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkb || cpu->cfg.ext_zvkg || in riscv_cpu_validate_set_extensions()
671 cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksed || in riscv_cpu_validate_set_extensions()
678 if ((cpu->cfg.ext_zvbc || cpu->cfg.ext_zvknhb) && !cpu->cfg.ext_zve64x) { in riscv_cpu_validate_set_extensions()
[all …]
/qemu/roms/
H A Dedk2-build.py44 def get_coredir(cfg): argument
49 def get_toolchain(cfg, build): argument
51 return cfg[build]['tool']
53 return cfg['global']['tool']
57 coredir = get_coredir(cfg)
180 b = cfg[build]
261 coredir = get_coredir(cfg)
319 def build_list(cfg): argument
320 for build in cfg.sections():
384 cfg.optionxform = str
[all …]
/qemu/target/riscv/
H A Dcpu.c213 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; in isa_ext_is_enabled()
220 bool *ext_enabled = (void *)&cpu->cfg + ext_offset; in isa_ext_update_enabled()
413 cpu->cfg.satp_mode.supported |= (1 << i); in set_satp_mode_max_supported()
428 cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE); in set_satp_mode_default_map()
432 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; in set_satp_mode_default_map()
451 cpu->cfg.ext_zifencei = true; in riscv_any_cpu_init()
452 cpu->cfg.ext_zicsr = true; in riscv_any_cpu_init()
453 cpu->cfg.mmu = true; in riscv_any_cpu_init()
454 cpu->cfg in riscv_any_cpu_init()
[all...]
H A Dcpu_cfg.h178 static inline bool always_true_p(const RISCVCPUConfig *cfg __attribute__((__unused__))) in always_true_p() argument
183 static inline bool has_xthead_p(const RISCVCPUConfig *cfg) in has_xthead_p() argument
185 return cfg->ext_xtheadba || cfg->ext_xtheadbb || in has_xthead_p()
186 cfg->ext_xtheadbs || cfg->ext_xtheadcmo || in has_xthead_p()
187 cfg->ext_xtheadcondmov || in has_xthead_p()
188 cfg->ext_xtheadfmemidx || cfg->ext_xtheadfmv || in has_xthead_p()
189 cfg->ext_xtheadmac || cfg->ext_xtheadmemidx || in has_xthead_p()
190 cfg->ext_xtheadmempair || cfg->ext_xtheadsync; in has_xthead_p()
194 static inline bool has_ ## ext ## _p(const RISCVCPUConfig *cfg) \
196 return cfg->ext_ ## ext ; \
/qemu/hw/arm/
H A Dsmmu-common.c83 key = smmu_get_iotlb_key(cfg->asid, cfg->s2cfg.vmid, in smmu_iotlb_lookup()
94 trace_smmu_iotlb_lookup_hit(cfg->asid, cfg->s2cfg.vmid, iova, in smmu_iotlb_lookup()
95 cfg->iotlb_hits, cfg->iotlb_misses, in smmu_iotlb_lookup()
97 (cfg->iotlb_hits + cfg->iotlb_misses)); in smmu_iotlb_lookup()
100 trace_smmu_iotlb_lookup_miss(cfg->asid, cfg->s2cfg.vmid, iova, in smmu_iotlb_lookup()
101 cfg->iotlb_hits, cfg->iotlb_misses, in smmu_iotlb_lookup()
103 (cfg->iotlb_hits + cfg->iotlb_misses)); in smmu_iotlb_lookup()
117 *key = smmu_get_iotlb_key(cfg->asid, cfg->s2cfg.vmid, new->entry.iova, in smmu_iotlb_insert()
119 trace_smmu_iotlb_insert(cfg->asid, cfg->s2cfg.vmid, new->entry.iova, in smmu_iotlb_insert()
267 bool tbi = extract64(iova, 55, 1) ? TBI1(cfg->tbi) : TBI0(cfg->tbi); in select_tt()
[all …]
H A Dsmmuv3.c37 #define PTW_RECORD_FAULT(cfg) (((cfg)->stage == 1) ? (cfg)->record_faults : \ argument
405 cfg->stage = 2; in decode_ste_s2_cfg()
444 if (cfg->s2cfg.vttb & ~(MAKE_64BIT_MASK(0, cfg->s2cfg.eff_ps))) { in decode_ste_s2_cfg()
448 cfg->s2cfg.vttb, cfg->s2cfg.eff_ps); in decode_ste_s2_cfg()
460 if (!s2_pgtable_config_valid(cfg->s2cfg.sl0, cfg->s2cfg.tsz, in decode_ste_s2_cfg()
681 cfg->stage = 1; in decode_cd()
684 cfg->oas = MIN(oas2bits(SMMU_IDR5_OAS), cfg->oas); in decode_cd()
765 if (cfg->aborted || cfg->bypassed || (cfg->stage == 2)) { in smmuv3_decode_config()
796 if (cfg) { in smmuv3_get_config()
817 return cfg; in smmuv3_get_config()
[all …]
H A Dxen_arm.c61 } cfg; member
167 sysbus_mmio_map(busdev, 0, xam->cfg.tpm_base_addr); in xen_enable_tpm()
169 trace_xen_enable_tpm(xam->cfg.tpm_base_addr); in xen_enable_tpm()
193 if (xam->cfg.tpm_base_addr) { in xen_arm_init()
207 uint64_t value = xam->cfg.tpm_base_addr; in xen_arm_get_tpm_base_addr()
223 xam->cfg.tpm_base_addr = value; in xen_arm_set_tpm_base_addr()
/qemu/hw/pci-host/
H A Dgpex-acpi.c132 PCIBus *bus = cfg->bus; in acpi_dsdt_add_gpex()
185 cfg->pio.base, 0, 0, 0); in acpi_dsdt_add_gpex()
208 acpi_dsdt_add_pci_route_table(dev, cfg->irq); in acpi_dsdt_add_gpex()
224 if (cfg->mmio32.size) { in acpi_dsdt_add_gpex()
227 cfg->mmio32.base + cfg->mmio32.size - 1); in acpi_dsdt_add_gpex()
237 if (cfg->pio.size) { in acpi_dsdt_add_gpex()
250 if (cfg->mmio64.size) { in acpi_dsdt_add_gpex()
253 cfg->mmio64.base + cfg->mmio64.size - 1); in acpi_dsdt_add_gpex()
274 cfg->ecam.base, in acpi_dsdt_add_gpex()
275 cfg->ecam.base + cfg->ecam.size - 1, in acpi_dsdt_add_gpex()
[all …]
/qemu/block/
H A Dqapi-sysemu.c423 ThrottleConfig cfg; in qmp_block_set_io_throttle() local
438 throttle_config_init(&cfg); in qmp_block_set_io_throttle()
439 cfg.buckets[THROTTLE_BPS_TOTAL].avg = arg->bps; in qmp_block_set_io_throttle()
440 cfg.buckets[THROTTLE_BPS_READ].avg = arg->bps_rd; in qmp_block_set_io_throttle()
441 cfg.buckets[THROTTLE_BPS_WRITE].avg = arg->bps_wr; in qmp_block_set_io_throttle()
443 cfg.buckets[THROTTLE_OPS_TOTAL].avg = arg->iops; in qmp_block_set_io_throttle()
444 cfg.buckets[THROTTLE_OPS_READ].avg = arg->iops_rd; in qmp_block_set_io_throttle()
486 cfg.op_size = arg->iops_size; in qmp_block_set_io_throttle()
489 if (!throttle_is_valid(&cfg, errp)) { in qmp_block_set_io_throttle()
493 if (throttle_enabled(&cfg)) { in qmp_block_set_io_throttle()
[all …]
H A Dthrottle-groups.c498 throttle_get_config(ts, cfg); in throttle_group_get_config()
774 ThrottleConfig cfg; in throttle_group_obj_complete() local
790 throttle_get_config(&tg->ts, &cfg); in throttle_group_obj_complete()
816 ThrottleConfig *cfg; in throttle_group_set() local
837 cfg = &tg->ts.cfg; in throttle_group_set()
854 cfg->op_size = value; in throttle_group_set()
863 ThrottleConfig cfg; in throttle_group_get() local
867 throttle_get_config(&tg->ts, &cfg); in throttle_group_get()
879 value = cfg.op_size; in throttle_group_get()
892 ThrottleConfig cfg; in throttle_group_set_limits() local
[all …]
H A Dqapi.c92 ThrottleConfig cfg; in bdrv_block_device_info() local
98 info->bps_rd = cfg.buckets[THROTTLE_BPS_READ].avg; in bdrv_block_device_info()
102 info->iops_rd = cfg.buckets[THROTTLE_OPS_READ].avg; in bdrv_block_device_info()
121 cfg.buckets[THROTTLE_BPS_TOTAL].burst_length; in bdrv_block_device_info()
124 cfg.buckets[THROTTLE_BPS_READ].burst_length; in bdrv_block_device_info()
127 cfg.buckets[THROTTLE_BPS_WRITE].burst_length; in bdrv_block_device_info()
131 cfg.buckets[THROTTLE_OPS_TOTAL].burst_length; in bdrv_block_device_info()
134 cfg.buckets[THROTTLE_OPS_READ].burst_length; in bdrv_block_device_info()
137 cfg.buckets[THROTTLE_OPS_WRITE].burst_length; in bdrv_block_device_info()
139 info->has_iops_size = cfg.op_size; in bdrv_block_device_info()
[all …]
/qemu/hw/misc/
H A Dxlnx-versal-cframe-reg.c99 if (faddr > s->cfg.blktype_num_frames[blktype]) { in cframe_incr_far()
164 if (s->cfg.cfu_fdro) { in cfrm_readout_frames()
223 s->cfg.blktype_num_frames[1]); in cfrm_last_frame_bot_post_read()
225 s->cfg.blktype_num_frames[0]); in cfrm_last_frame_bot_post_read()
229 s->cfg.blktype_num_frames[3]); in cfrm_last_frame_bot_post_read()
231 s->cfg.blktype_num_frames[2]); in cfrm_last_frame_bot_post_read()
256 s->cfg.blktype_num_frames[5]); in cfrm_last_frame_top_post_read()
258 s->cfg.blktype_num_frames[4]); in cfrm_last_frame_top_post_read()
262 s->cfg.blktype_num_frames[6]); in cfrm_last_frame_top_post_read()
591 if (s->cfg.cframe[i]) { in cframes_bcast_write()
[all …]
H A Dxlnx-versal-cfu.c190 for (int i = 0; i < ARRAY_SIZE(s->cfg.cframe); i++) { in cfu_transfer_cfi_packet()
191 if (s->cfg.cframe[i]) { in cfu_transfer_cfi_packet()
192 xlnx_cfi_transfer_packet(s->cfg.cframe[i], pkt); in cfu_transfer_cfi_packet()
196 assert(row_addr < ARRAY_SIZE(s->cfg.cframe)); in cfu_transfer_cfi_packet()
198 if (s->cfg.cframe[row_addr]) { in cfu_transfer_cfi_packet()
286 if (s->cfg.cfu) { in cfu_sfr_write()
287 cfu_transfer_cfi_packet(s->cfg.cfu, row_addr, &pkt); in cfu_sfr_write()
423 DEFINE_PROP_LINK("cframe0", XlnxVersalCFUAPB, cfg.cframe[0],
425 DEFINE_PROP_LINK("cframe1", XlnxVersalCFUAPB, cfg.cframe[1],
427 DEFINE_PROP_LINK("cframe2", XlnxVersalCFUAPB, cfg.cframe[2],
[all …]
H A Dxlnx-versal-crl.c102 for (i = 0; i < ARRAY_SIZE(s->cfg.adma); i++) { in crl_rst_adma_prew()
144 REGFIELD_RESET(dev, s, RST_USB0, RESET, val64, s->cfg.usb); in crl_rst_usb_prew()
347 for (i = 0; i < ARRAY_SIZE(s->cfg.cpu_r5); ++i) { in crl_init()
349 (Object **)&s->cfg.cpu_r5[i], in crl_init()
354 for (i = 0; i < ARRAY_SIZE(s->cfg.adma); ++i) { in crl_init()
356 (Object **)&s->cfg.adma[i], in crl_init()
361 for (i = 0; i < ARRAY_SIZE(s->cfg.uart); ++i) { in crl_init()
363 (Object **)&s->cfg.uart[i], in crl_init()
368 for (i = 0; i < ARRAY_SIZE(s->cfg.gem); ++i) { in crl_init()
370 (Object **)&s->cfg.gem[i], in crl_init()
[all …]
H A Dxlnx-versal-xramc.c137 ARRAY_FIELD_DP32(s->regs, XRAM_IMP, SIZE, s->cfg.encoded_size); in xram_ctrl_reset_enter()
162 switch (s->cfg.size) { in xram_ctrl_realize()
164 s->cfg.encoded_size = 0; in xram_ctrl_realize()
167 s->cfg.encoded_size = 1; in xram_ctrl_realize()
170 s->cfg.encoded_size = 2; in xram_ctrl_realize()
173 s->cfg.encoded_size = 3; in xram_ctrl_realize()
176 s->cfg.encoded_size = 4; in xram_ctrl_realize()
179 error_setg(errp, "Unsupported XRAM size %" PRId64, s->cfg.size); in xram_ctrl_realize()
185 s->cfg.size, &error_fatal); in xram_ctrl_realize()
222 DEFINE_PROP_UINT64("size", XlnxXramCtrl, cfg.size, 1 * MiB),
/qemu/include/qemu/
H A Dthrottle.h98 ThrottleConfig cfg; /* configuration */ member
142 bool throttle_enabled(ThrottleConfig *cfg);
144 bool throttle_is_valid(ThrottleConfig *cfg, Error **errp);
148 ThrottleConfig *cfg);
150 void throttle_get_config(ThrottleState *ts, ThrottleConfig *cfg);
152 void throttle_config_init(ThrottleConfig *cfg);
161 void throttle_limits_to_config(ThrottleLimits *arg, ThrottleConfig *cfg,
163 void throttle_config_to_limits(ThrottleConfig *cfg, ThrottleLimits *var);
/qemu/hw/input/
H A Dvirtio-input.c107 VirtIOInputConfig *cfg; in virtio_input_find_config() local
110 if (select == cfg->config.select && in virtio_input_find_config()
111 subsel == cfg->config.subsel) { in virtio_input_find_config()
112 return &cfg->config; in virtio_input_find_config()
121 VirtIOInputConfig *cfg; in virtio_input_add_config() local
130 cfg = g_new0(VirtIOInputConfig, 1); in virtio_input_add_config()
131 cfg->config = *config; in virtio_input_add_config()
238 VirtIOInputConfig *cfg; in virtio_input_device_realize() local
254 vinput->cfg_size = cfg->config.size; in virtio_input_device_realize()
268 VirtIOInputConfig *cfg, *next; in virtio_input_finalize() local
[all …]
/qemu/hw/display/
H A Dramfb.c36 struct RAMFBCfg cfg; member
87 width = be32_to_cpu(s->cfg.width); in ramfb_fw_cfg_write()
88 height = be32_to_cpu(s->cfg.height); in ramfb_fw_cfg_write()
89 stride = be32_to_cpu(s->cfg.stride); in ramfb_fw_cfg_write()
90 fourcc = be32_to_cpu(s->cfg.fourcc); in ramfb_fw_cfg_write()
91 addr = be64_to_cpu(s->cfg.addr); in ramfb_fw_cfg_write()
133 VMSTATE_BUFFER_UNSAFE(cfg, RAMFBState, 0, sizeof(RAMFBCfg)),
153 &s->cfg, sizeof(s->cfg), false); in ramfb_setup()
/qemu/hw/pci/
H A Dpcie_sriov.c32 uint8_t *cfg = dev->config + offset; in pcie_sriov_pf_init() local
42 pci_set_word(cfg + PCI_SRIOV_VF_OFFSET, vf_offset); in pcie_sriov_pf_init()
43 pci_set_word(cfg + PCI_SRIOV_VF_STRIDE, vf_stride); in pcie_sriov_pf_init()
50 pci_set_word(cfg + PCI_SRIOV_SUP_PGSIZE, SRIOV_SUP_PGSIZE_MINREQ); in pcie_sriov_pf_init()
56 pci_set_word(cfg + PCI_SRIOV_SYS_PGSIZE, 0x1); in pcie_sriov_pf_init()
59 pci_set_word(cfg + PCI_SRIOV_VF_DID, vf_dev_id); in pcie_sriov_pf_init()
60 pci_set_word(cfg + PCI_SRIOV_INITIAL_VF, init_vfs); in pcie_sriov_pf_init()
61 pci_set_word(cfg + PCI_SRIOV_TOTAL_VF, total_vfs); in pcie_sriov_pf_init()
62 pci_set_word(cfg + PCI_SRIOV_NUM_VF, 0); in pcie_sriov_pf_init()
279 uint8_t *cfg = dev->config + dev->exp.sriov_cap; in pcie_sriov_pf_add_sup_pgsize() local
[all …]

123456