]> git.proxmox.com Git - mirror_qemu.git/blame - docs/config/mach-virt-serial.cfg
Merge tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu into...
[mirror_qemu.git] / docs / config / mach-virt-serial.cfg
CommitLineData
166d4346
AB
1# mach-virt - VirtIO guest (serial console)
2# =========================================================
3#
4# Usage:
5#
6# $ qemu-system-aarch64 \
7# -nodefaults \
8# -readconfig mach-virt-serial.cfg \
9# -display none -serial mon:stdio \
10# -cpu host
11#
12# You will probably need to tweak the lines marked as
13# CHANGE ME before being able to use this configuration!
14#
15# The guest will have a selection of VirtIO devices
16# tailored towards optimal performance with modern guests,
17# and will be accessed through the serial console.
18#
19# ---------------------------------------------------------
20#
21# Using -nodefaults is required to have full control over
22# the virtual hardware: when it's specified, QEMU will
23# populate the board with only the builtin peripherals,
24# such as the PL011 UART, plus a PCI Express Root Bus; the
25# user will then have to explicitly add further devices.
26#
27# The PCI Express Root Bus shows up in the guest as:
28#
29# 00:00.0 Host bridge
30#
31# This configuration file adds a number of other useful
32# devices, more specifically:
33#
34# 00.1c.* PCI bridge (PCI Express Root Ports)
35# 01:00.0 SCSI storage controller
36# 02:00.0 Ethernet controller
37#
38# More information about these devices is available below.
39#
40# We use '-display none' to prevent QEMU from creating a
41# graphical display window, which would serve no use in
42# this specific configuration, and '-serial mon:stdio' to
43# multiplex the guest's serial console and the QEMU monitor
44# to the host's stdio; use 'Ctrl+A h' to learn how to
45# switch between the two and more.
46
47
48# Machine options
49# =========================================================
50#
51# We use the virt machine type and enable KVM acceleration
52# for better performance.
53#
54# Using less than 1 GiB of memory is probably not going to
55# yield good performance in the guest, and might even lead
56# to obscure boot issues in some cases.
57#
58# Unfortunately, there is no way to configure the CPU model
59# in this file, so it will have to be provided on the
60# command line, but we can configure the guest to use the
61# same GIC version as the host.
62
63[machine]
64 type = "virt"
166d4346
AB
65 gic-version = "host"
66
4477035e
TH
67[accel]
68 accel = "kvm"
69
166d4346
AB
70[memory]
71 size = "1024"
72
73
74# Firmware configuration
75# =========================================================
76#
77# There are two parts to the firmware: a read-only image
78# containing the executable code, which is shared between
79# guests, and a read/write variable store that is owned
80# by one specific guest, exclusively, and is used to
81# record information such as the UEFI boot order.
82#
83# For any new guest, its permanent, private variable store
84# should initially be copied from the template file
85# provided along with the firmware binary.
86#
87# Depending on the OS distribution you're using on the
88# host, the name of the package containing the firmware
89# binary and variable store template, as well as the paths
90# to the files themselves, will be different. For example:
91#
92# Fedora
93# edk2-aarch64 (pkg)
94# /usr/share/edk2/aarch64/QEMU_EFI-pflash.raw (bin)
95# /usr/share/edk2/aarch64/vars-template-pflash.raw (var)
96#
97# RHEL
98# AAVMF (pkg)
99# /usr/share/AAVMF/AAVMF_CODE.fd (bin)
100# /usr/share/AAVMF/AAVMF_VARS.fd (var)
101#
102# Debian/Ubuntu
103# qemu-efi (pkg)
104# /usr/share/AAVMF/AAVMF_CODE.fd (bin)
105# /usr/share/AAVMF/AAVMF_VARS.fd (var)
106
107[drive "uefi-binary"]
108 file = "/usr/share/AAVMF/AAVMF_CODE.fd" # CHANGE ME
109 format = "raw"
110 if = "pflash"
111 unit = "0"
112 readonly = "on"
113
114[drive "uefi-varstore"]
115 file = "guest_VARS.fd" # CHANGE ME
116 format = "raw"
117 if = "pflash"
118 unit = "1"
119
120
121# PCI bridge (PCI Express Root Ports)
122# =========================================================
123#
124# We create eight PCI Express Root Ports, and we plug them
125# all into separate functions of the same slot. Some of
126# them will be used by devices, the rest will remain
127# available for hotplug.
128
129[device "pcie.1"]
130 driver = "pcie-root-port"
131 bus = "pcie.0"
132 addr = "1c.0"
133 port = "1"
134 chassis = "1"
135 multifunction = "on"
136
137[device "pcie.2"]
138 driver = "pcie-root-port"
139 bus = "pcie.0"
140 addr = "1c.1"
141 port = "2"
142 chassis = "2"
143
144[device "pcie.3"]
145 driver = "pcie-root-port"
146 bus = "pcie.0"
147 addr = "1c.2"
148 port = "3"
149 chassis = "3"
150
151[device "pcie.4"]
152 driver = "pcie-root-port"
153 bus = "pcie.0"
154 addr = "1c.3"
155 port = "4"
156 chassis = "4"
157
158[device "pcie.5"]
159 driver = "pcie-root-port"
160 bus = "pcie.0"
161 addr = "1c.4"
162 port = "5"
163 chassis = "5"
164
165[device "pcie.6"]
166 driver = "pcie-root-port"
167 bus = "pcie.0"
168 addr = "1c.5"
169 port = "6"
170 chassis = "6"
171
172[device "pcie.7"]
173 driver = "pcie-root-port"
174 bus = "pcie.0"
175 addr = "1c.6"
176 port = "7"
177 chassis = "7"
178
179[device "pcie.8"]
180 driver = "pcie-root-port"
181 bus = "pcie.0"
182 addr = "1c.7"
183 port = "8"
184 chassis = "8"
185
186
187# SCSI storage controller (and storage)
188# =========================================================
189#
190# We use virtio-scsi here so that we can (hot)plug a large
191# number of disks without running into issues; a SCSI disk,
192# backed by a qcow2 disk image on the host's filesystem, is
193# attached to it.
194#
195# We also create an optical disk, mostly for installation
9277d81f 196# purposes: once the guest OS has been successfully
166d4346
AB
197# installed, the guest will no longer boot from optical
198# media. If you don't want, or no longer want, to have an
199# optical disk in the guest you can safely comment out
200# all relevant sections below.
201
202[device "scsi"]
203 driver = "virtio-scsi-pci"
204 bus = "pcie.1"
205 addr = "00.0"
206
207[device "scsi-disk"]
208 driver = "scsi-hd"
209 bus = "scsi.0"
210 drive = "disk"
211 bootindex = "1"
212
213[drive "disk"]
214 file = "guest.qcow2" # CHANGE ME
215 format = "qcow2"
216 if = "none"
217
218[device "scsi-optical-disk"]
219 driver = "scsi-cd"
220 bus = "scsi.0"
221 drive = "optical-disk"
222 bootindex = "2"
223
224[drive "optical-disk"]
225 file = "install.iso" # CHANGE ME
226 format = "raw"
227 if = "none"
228
229
230# Ethernet controller
231# =========================================================
232#
233# We use virtio-net for improved performance over emulated
234# hardware; on the host side, we take advantage of user
235# networking so that the QEMU process doesn't require any
236# additional privileges.
237
238[netdev "hostnet"]
239 type = "user"
240
241[device "net"]
242 driver = "virtio-net-pci"
243 netdev = "hostnet"
244 bus = "pcie.2"
245 addr = "00.0"