]> git.proxmox.com Git - mirror_qemu.git/blob - docs/mach-virt-graphical.cfg
rbd: Fix to cleanly reject -drive without pool or image
[mirror_qemu.git] / docs / mach-virt-graphical.cfg
1 # mach-virt - VirtIO guest (graphical console)
2 # =========================================================
3 #
4 # Usage:
5 #
6 # $ qemu-system-aarch64 \
7 # -nodefaults \
8 # -readconfig mach-virt-graphical.cfg \
9 # -cpu host
10 #
11 # You will probably need to tweak the lines marked as
12 # CHANGE ME before being able to use this configuration!
13 #
14 # The guest will have a selection of VirtIO devices
15 # tailored towards optimal performance with modern guests,
16 # and will be accessed through a graphical console.
17 #
18 # ---------------------------------------------------------
19 #
20 # Using -nodefaults is required to have full control over
21 # the virtual hardware: when it's specified, QEMU will
22 # populate the board with only the builtin peripherals,
23 # such as the PL011 UART, plus a PCI Express Root Bus; the
24 # user will then have to explicitly add further devices.
25 #
26 # The PCI Express Root Bus shows up in the guest as:
27 #
28 # 00:00.0 Host bridge
29 #
30 # This configuration file adds a number of other useful
31 # devices, more specifically:
32 #
33 # 00:01.0 Display controller
34 # 00.1c.* PCI bridge (PCI Express Root Ports)
35 # 01:00.0 SCSI storage controller
36 # 02:00.0 Ethernet controller
37 # 03:00.0 USB controller
38 #
39 # More information about these devices is available below.
40
41
42 # Machine options
43 # =========================================================
44 #
45 # We use the virt machine type and enable KVM acceleration
46 # for better performance.
47 #
48 # Using less than 1 GiB of memory is probably not going to
49 # yield good performance in the guest, and might even lead
50 # to obscure boot issues in some cases.
51 #
52 # Unfortunately, there is no way to configure the CPU model
53 # in this file, so it will have to be provided on the
54 # command line, but we can configure the guest to use the
55 # same GIC version as the host.
56
57 [machine]
58 type = "virt"
59 accel = "kvm"
60 gic-version = "host"
61
62 [memory]
63 size = "1024"
64
65
66 # Firmware configuration
67 # =========================================================
68 #
69 # There are two parts to the firmware: a read-only image
70 # containing the executable code, which is shared between
71 # guests, and a read/write variable store that is owned
72 # by one specific guest, exclusively, and is used to
73 # record information such as the UEFI boot order.
74 #
75 # For any new guest, its permanent, private variable store
76 # should initially be copied from the template file
77 # provided along with the firmware binary.
78 #
79 # Depending on the OS distribution you're using on the
80 # host, the name of the package containing the firmware
81 # binary and variable store template, as well as the paths
82 # to the files themselves, will be different. For example:
83 #
84 # Fedora
85 # edk2-aarch64 (pkg)
86 # /usr/share/edk2/aarch64/QEMU_EFI-pflash.raw (bin)
87 # /usr/share/edk2/aarch64/vars-template-pflash.raw (var)
88 #
89 # RHEL
90 # AAVMF (pkg)
91 # /usr/share/AAVMF/AAVMF_CODE.fd (bin)
92 # /usr/share/AAVMF/AAVMF_VARS.fd (var)
93 #
94 # Debian/Ubuntu
95 # qemu-efi (pkg)
96 # /usr/share/AAVMF/AAVMF_CODE.fd (bin)
97 # /usr/share/AAVMF/AAVMF_VARS.fd (var)
98
99 [drive "uefi-binary"]
100 file = "/usr/share/AAVMF/AAVMF_CODE.fd" # CHANGE ME
101 format = "raw"
102 if = "pflash"
103 unit = "0"
104 readonly = "on"
105
106 [drive "uefi-varstore"]
107 file = "guest_VARS.fd" # CHANGE ME
108 format = "raw"
109 if = "pflash"
110 unit = "1"
111
112
113 # PCI bridge (PCI Express Root Ports)
114 # =========================================================
115 #
116 # We create eight PCI Express Root Ports, and we plug them
117 # all into separate functions of the same slot. Some of
118 # them will be used by devices, the rest will remain
119 # available for hotplug.
120
121 [device "pcie.1"]
122 driver = "pcie-root-port"
123 bus = "pcie.0"
124 addr = "1c.0"
125 port = "1"
126 chassis = "1"
127 multifunction = "on"
128
129 [device "pcie.2"]
130 driver = "pcie-root-port"
131 bus = "pcie.0"
132 addr = "1c.1"
133 port = "2"
134 chassis = "2"
135
136 [device "pcie.3"]
137 driver = "pcie-root-port"
138 bus = "pcie.0"
139 addr = "1c.2"
140 port = "3"
141 chassis = "3"
142
143 [device "pcie.4"]
144 driver = "pcie-root-port"
145 bus = "pcie.0"
146 addr = "1c.3"
147 port = "4"
148 chassis = "4"
149
150 [device "pcie.5"]
151 driver = "pcie-root-port"
152 bus = "pcie.0"
153 addr = "1c.4"
154 port = "5"
155 chassis = "5"
156
157 [device "pcie.6"]
158 driver = "pcie-root-port"
159 bus = "pcie.0"
160 addr = "1c.5"
161 port = "6"
162 chassis = "6"
163
164 [device "pcie.7"]
165 driver = "pcie-root-port"
166 bus = "pcie.0"
167 addr = "1c.6"
168 port = "7"
169 chassis = "7"
170
171 [device "pcie.8"]
172 driver = "pcie-root-port"
173 bus = "pcie.0"
174 addr = "1c.7"
175 port = "8"
176 chassis = "8"
177
178
179 # SCSI storage controller (and storage)
180 # =========================================================
181 #
182 # We use virtio-scsi here so that we can (hot)plug a large
183 # number of disks without running into issues; a SCSI disk,
184 # backed by a qcow2 disk image on the host's filesystem, is
185 # attached to it.
186 #
187 # We also create an optical disk, mostly for installation
188 # purposes: once the guest OS has been succesfully
189 # installed, the guest will no longer boot from optical
190 # media. If you don't want, or no longer want, to have an
191 # optical disk in the guest you can safely comment out
192 # all relevant sections below.
193
194 [device "scsi"]
195 driver = "virtio-scsi-pci"
196 bus = "pcie.1"
197 addr = "00.0"
198
199 [device "scsi-disk"]
200 driver = "scsi-hd"
201 bus = "scsi.0"
202 drive = "disk"
203 bootindex = "1"
204
205 [drive "disk"]
206 file = "guest.qcow2" # CHANGE ME
207 format = "qcow2"
208 if = "none"
209
210 [device "scsi-optical-disk"]
211 driver = "scsi-cd"
212 bus = "scsi.0"
213 drive = "optical-disk"
214 bootindex = "2"
215
216 [drive "optical-disk"]
217 file = "install.iso" # CHANGE ME
218 format = "raw"
219 if = "none"
220
221
222 # Ethernet controller
223 # =========================================================
224 #
225 # We use virtio-net for improved performance over emulated
226 # hardware; on the host side, we take advantage of user
227 # networking so that the QEMU process doesn't require any
228 # additional privileges.
229
230 [netdev "hostnet"]
231 type = "user"
232
233 [device "net"]
234 driver = "virtio-net-pci"
235 netdev = "hostnet"
236 bus = "pcie.2"
237 addr = "00.0"
238
239
240 # USB controller (and input devices)
241 # =========================================================
242 #
243 # We add a virtualization-friendly USB 3.0 controller and
244 # a USB keyboard / USB tablet combo so that graphical
245 # guests can be controlled appropriately.
246
247 [device "usb"]
248 driver = "nec-usb-xhci"
249 bus = "pcie.3"
250 addr = "00.0"
251
252 [device "keyboard"]
253 driver = "usb-kbd"
254 bus = "usb.0"
255
256 [device "tablet"]
257 driver = "usb-tablet"
258 bus = "usb.0"
259
260
261 # Display controller
262 # =========================================================
263 #
264 # We use virtio-gpu because the legacy VGA framebuffer is
265 # very troublesome on aarch64, and virtio-gpu is the only
266 # video device that doesn't implement it.
267 #
268 # If you're running the guest on a remote, potentially
269 # headless host, you will probably want to append something
270 # like
271 #
272 # -display vnc=127.0.0.1:0
273 #
274 # to the command line in order to prevent QEMU from
275 # creating a graphical display window on the host and
276 # enable remote access instead.
277
278 [device "video"]
279 driver = "virtio-gpu"
280 bus = "pcie.0"
281 addr = "01.0"