xref: /qemu/docs/config/mach-virt-graphical.cfg (revision 370ed600)
1# mach-virt - VirtIO guest (graphical console)
2# =========================================================
3#
4# Usage:
5#
6#   $ qemu-system-aarch64 \
7#     -nodefaults \
8#     -readconfig mach-virt-graphical.cfg \
9#     -cpu host
10#
11# You will probably need to tweak the lines marked as
12# CHANGE ME before being able to use this configuration!
13#
14# The guest will have a selection of VirtIO devices
15# tailored towards optimal performance with modern guests,
16# and will be accessed through a graphical console.
17#
18# ---------------------------------------------------------
19#
20# Using -nodefaults is required to have full control over
21# the virtual hardware: when it's specified, QEMU will
22# populate the board with only the builtin peripherals,
23# such as the PL011 UART, plus a PCI Express Root Bus; the
24# user will then have to explicitly add further devices.
25#
26# The PCI Express Root Bus shows up in the guest as:
27#
28#   00:00.0 Host bridge
29#
30# This configuration file adds a number of other useful
31# devices, more specifically:
32#
33#   00:01.0 Display controller
34#   00.1c.* PCI bridge (PCI Express Root Ports)
35#   01:00.0 SCSI storage controller
36#   02:00.0 Ethernet controller
37#   03:00.0 USB controller
38#
39# More information about these devices is available below.
40
41
42# Machine options
43# =========================================================
44#
45# We use the virt machine type and enable KVM acceleration
46# for better performance.
47#
48# Using less than 1 GiB of memory is probably not going to
49# yield good performance in the guest, and might even lead
50# to obscure boot issues in some cases.
51#
52# Unfortunately, there is no way to configure the CPU model
53# in this file, so it will have to be provided on the
54# command line, but we can configure the guest to use the
55# same GIC version as the host.
56
57[machine]
58  type = "virt"
59  gic-version = "host"
60
61[accel]
62  accel = "kvm"
63
64[memory]
65  size = "1024"
66
67
68# Firmware configuration
69# =========================================================
70#
71# There are two parts to the firmware: a read-only image
72# containing the executable code, which is shared between
73# guests, and a read/write variable store that is owned
74# by one specific guest, exclusively, and is used to
75# record information such as the UEFI boot order.
76#
77# For any new guest, its permanent, private variable store
78# should initially be copied from the template file
79# provided along with the firmware binary.
80#
81# Depending on the OS distribution you're using on the
82# host, the name of the package containing the firmware
83# binary and variable store template, as well as the paths
84# to the files themselves, will be different. For example:
85#
86# Fedora
87#   edk2-aarch64                                      (pkg)
88#   /usr/share/edk2/aarch64/QEMU_EFI-pflash.raw       (bin)
89#   /usr/share/edk2/aarch64/vars-template-pflash.raw  (var)
90#
91# RHEL
92#   AAVMF                                             (pkg)
93#   /usr/share/AAVMF/AAVMF_CODE.fd                    (bin)
94#   /usr/share/AAVMF/AAVMF_VARS.fd                    (var)
95#
96# Debian/Ubuntu
97#   qemu-efi                                          (pkg)
98#   /usr/share/AAVMF/AAVMF_CODE.fd                    (bin)
99#   /usr/share/AAVMF/AAVMF_VARS.fd                    (var)
100
101[drive "uefi-binary"]
102  file = "/usr/share/AAVMF/AAVMF_CODE.fd"       # CHANGE ME
103  format = "raw"
104  if = "pflash"
105  unit = "0"
106  readonly = "on"
107
108[drive "uefi-varstore"]
109  file = "guest_VARS.fd"                        # CHANGE ME
110  format = "raw"
111  if = "pflash"
112  unit = "1"
113
114
115# PCI bridge (PCI Express Root Ports)
116# =========================================================
117#
118# We create eight PCI Express Root Ports, and we plug them
119# all into separate functions of the same slot. Some of
120# them will be used by devices, the rest will remain
121# available for hotplug.
122
123[device "pcie.1"]
124  driver = "pcie-root-port"
125  bus = "pcie.0"
126  addr = "1c.0"
127  port = "1"
128  chassis = "1"
129  multifunction = "on"
130
131[device "pcie.2"]
132  driver = "pcie-root-port"
133  bus = "pcie.0"
134  addr = "1c.1"
135  port = "2"
136  chassis = "2"
137
138[device "pcie.3"]
139  driver = "pcie-root-port"
140  bus = "pcie.0"
141  addr = "1c.2"
142  port = "3"
143  chassis = "3"
144
145[device "pcie.4"]
146  driver = "pcie-root-port"
147  bus = "pcie.0"
148  addr = "1c.3"
149  port = "4"
150  chassis = "4"
151
152[device "pcie.5"]
153  driver = "pcie-root-port"
154  bus = "pcie.0"
155  addr = "1c.4"
156  port = "5"
157  chassis = "5"
158
159[device "pcie.6"]
160  driver = "pcie-root-port"
161  bus = "pcie.0"
162  addr = "1c.5"
163  port = "6"
164  chassis = "6"
165
166[device "pcie.7"]
167  driver = "pcie-root-port"
168  bus = "pcie.0"
169  addr = "1c.6"
170  port = "7"
171  chassis = "7"
172
173[device "pcie.8"]
174  driver = "pcie-root-port"
175  bus = "pcie.0"
176  addr = "1c.7"
177  port = "8"
178  chassis = "8"
179
180
181# SCSI storage controller (and storage)
182# =========================================================
183#
184# We use virtio-scsi here so that we can (hot)plug a large
185# number of disks without running into issues; a SCSI disk,
186# backed by a qcow2 disk image on the host's filesystem, is
187# attached to it.
188#
189# We also create an optical disk, mostly for installation
190# purposes: once the guest OS has been successfully
191# installed, the guest will no longer boot from optical
192# media. If you don't want, or no longer want, to have an
193# optical disk in the guest you can safely comment out
194# all relevant sections below.
195
196[device "scsi"]
197  driver = "virtio-scsi-pci"
198  bus = "pcie.1"
199  addr = "00.0"
200
201[device "scsi-disk"]
202  driver = "scsi-hd"
203  bus = "scsi.0"
204  drive = "disk"
205  bootindex = "1"
206
207[drive "disk"]
208  file = "guest.qcow2"                          # CHANGE ME
209  format = "qcow2"
210  if = "none"
211
212[device "scsi-optical-disk"]
213  driver = "scsi-cd"
214  bus = "scsi.0"
215  drive = "optical-disk"
216  bootindex = "2"
217
218[drive "optical-disk"]
219  file = "install.iso"                          # CHANGE ME
220  format = "raw"
221  if = "none"
222
223
224# Ethernet controller
225# =========================================================
226#
227# We use virtio-net for improved performance over emulated
228# hardware; on the host side, we take advantage of user
229# networking so that the QEMU process doesn't require any
230# additional privileges.
231
232[netdev "hostnet"]
233  type = "user"
234
235[device "net"]
236  driver = "virtio-net-pci"
237  netdev = "hostnet"
238  bus = "pcie.2"
239  addr = "00.0"
240
241
242# USB controller (and input devices)
243# =========================================================
244#
245# We add a virtualization-friendly USB 3.0 controller and
246# a USB keyboard / USB tablet combo so that graphical
247# guests can be controlled appropriately.
248
249[device "usb"]
250  driver = "nec-usb-xhci"
251  bus = "pcie.3"
252  addr = "00.0"
253
254[device "keyboard"]
255  driver = "usb-kbd"
256  bus = "usb.0"
257
258[device "tablet"]
259  driver = "usb-tablet"
260  bus = "usb.0"
261
262
263# Display controller
264# =========================================================
265#
266# We use virtio-gpu because the legacy VGA framebuffer is
267# very troublesome on aarch64, and virtio-gpu is the only
268# video device that doesn't implement it.
269#
270# If you're running the guest on a remote, potentially
271# headless host, you will probably want to append something
272# like
273#
274#   -display vnc=127.0.0.1:0
275#
276# to the command line in order to prevent QEMU from
277# creating a graphical display window on the host and
278# enable remote access instead.
279
280[device "video"]
281  driver = "virtio-gpu"
282  bus = "pcie.0"
283  addr = "01.0"
284