]> git.proxmox.com Git - mirror_qemu.git/blame - backends/iommufd.c
Merge tag 'pull-target-arm-20231219' of https://git.linaro.org/people/pmaydell/qemu...
[mirror_qemu.git] / backends / iommufd.c
CommitLineData
6e6d8ac6
EA
1/*
2 * iommufd container backend
3 *
4 * Copyright (C) 2023 Intel Corporation.
5 * Copyright Red Hat, Inc. 2023
6 *
7 * Authors: Yi Liu <yi.l.liu@intel.com>
8 * Eric Auger <eric.auger@redhat.com>
9 *
10 * SPDX-License-Identifier: GPL-2.0-or-later
11 */
12
13#include "qemu/osdep.h"
14#include "sysemu/iommufd.h"
15#include "qapi/error.h"
16#include "qapi/qmp/qerror.h"
17#include "qemu/module.h"
18#include "qom/object_interfaces.h"
19#include "qemu/error-report.h"
20#include "monitor/monitor.h"
21#include "trace.h"
22#include <sys/ioctl.h>
23#include <linux/iommufd.h>
24
25static void iommufd_backend_init(Object *obj)
26{
27 IOMMUFDBackend *be = IOMMUFD_BACKEND(obj);
28
29 be->fd = -1;
30 be->users = 0;
31 be->owned = true;
32 qemu_mutex_init(&be->lock);
33}
34
35static void iommufd_backend_finalize(Object *obj)
36{
37 IOMMUFDBackend *be = IOMMUFD_BACKEND(obj);
38
39 if (be->owned) {
40 close(be->fd);
41 be->fd = -1;
42 }
43}
44
45static void iommufd_backend_set_fd(Object *obj, const char *str, Error **errp)
46{
47 IOMMUFDBackend *be = IOMMUFD_BACKEND(obj);
48 int fd = -1;
49
50 fd = monitor_fd_param(monitor_cur(), str, errp);
51 if (fd == -1) {
52 error_prepend(errp, "Could not parse remote object fd %s:", str);
53 return;
54 }
55 qemu_mutex_lock(&be->lock);
56 be->fd = fd;
57 be->owned = false;
58 qemu_mutex_unlock(&be->lock);
59 trace_iommu_backend_set_fd(be->fd);
60}
61
62static bool iommufd_backend_can_be_deleted(UserCreatable *uc)
63{
64 IOMMUFDBackend *be = IOMMUFD_BACKEND(uc);
65
66 return !be->users;
67}
68
69static void iommufd_backend_class_init(ObjectClass *oc, void *data)
70{
71 UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc);
72
73 ucc->can_be_deleted = iommufd_backend_can_be_deleted;
74
75 object_class_property_add_str(oc, "fd", NULL, iommufd_backend_set_fd);
76}
77
78int iommufd_backend_connect(IOMMUFDBackend *be, Error **errp)
79{
80 int fd, ret = 0;
81
82 qemu_mutex_lock(&be->lock);
83 if (be->users == UINT32_MAX) {
84 error_setg(errp, "too many connections");
85 ret = -E2BIG;
86 goto out;
87 }
88 if (be->owned && !be->users) {
89 fd = qemu_open_old("/dev/iommu", O_RDWR);
90 if (fd < 0) {
91 error_setg_errno(errp, errno, "/dev/iommu opening failed");
92 ret = fd;
93 goto out;
94 }
95 be->fd = fd;
96 }
97 be->users++;
98out:
99 trace_iommufd_backend_connect(be->fd, be->owned,
100 be->users, ret);
101 qemu_mutex_unlock(&be->lock);
102 return ret;
103}
104
105void iommufd_backend_disconnect(IOMMUFDBackend *be)
106{
107 qemu_mutex_lock(&be->lock);
108 if (!be->users) {
109 goto out;
110 }
111 be->users--;
112 if (!be->users && be->owned) {
113 close(be->fd);
114 be->fd = -1;
115 }
116out:
117 trace_iommufd_backend_disconnect(be->fd, be->users);
118 qemu_mutex_unlock(&be->lock);
119}
120
121int iommufd_backend_alloc_ioas(IOMMUFDBackend *be, uint32_t *ioas_id,
122 Error **errp)
123{
124 int ret, fd = be->fd;
125 struct iommu_ioas_alloc alloc_data = {
126 .size = sizeof(alloc_data),
127 .flags = 0,
128 };
129
130 ret = ioctl(fd, IOMMU_IOAS_ALLOC, &alloc_data);
131 if (ret) {
132 error_setg_errno(errp, errno, "Failed to allocate ioas");
133 return ret;
134 }
135
136 *ioas_id = alloc_data.out_ioas_id;
137 trace_iommufd_backend_alloc_ioas(fd, *ioas_id, ret);
138
139 return ret;
140}
141
142void iommufd_backend_free_id(IOMMUFDBackend *be, uint32_t id)
143{
144 int ret, fd = be->fd;
145 struct iommu_destroy des = {
146 .size = sizeof(des),
147 .id = id,
148 };
149
150 ret = ioctl(fd, IOMMU_DESTROY, &des);
151 trace_iommufd_backend_free_id(fd, id, ret);
152 if (ret) {
153 error_report("Failed to free id: %u %m", id);
154 }
155}
156
157int iommufd_backend_map_dma(IOMMUFDBackend *be, uint32_t ioas_id, hwaddr iova,
158 ram_addr_t size, void *vaddr, bool readonly)
159{
160 int ret, fd = be->fd;
161 struct iommu_ioas_map map = {
162 .size = sizeof(map),
163 .flags = IOMMU_IOAS_MAP_READABLE |
164 IOMMU_IOAS_MAP_FIXED_IOVA,
165 .ioas_id = ioas_id,
166 .__reserved = 0,
167 .user_va = (uintptr_t)vaddr,
168 .iova = iova,
169 .length = size,
170 };
171
172 if (!readonly) {
173 map.flags |= IOMMU_IOAS_MAP_WRITEABLE;
174 }
175
176 ret = ioctl(fd, IOMMU_IOAS_MAP, &map);
177 trace_iommufd_backend_map_dma(fd, ioas_id, iova, size,
178 vaddr, readonly, ret);
179 if (ret) {
180 ret = -errno;
181
182 /* TODO: Not support mapping hardware PCI BAR region for now. */
183 if (errno == EFAULT) {
184 warn_report("IOMMU_IOAS_MAP failed: %m, PCI BAR?");
185 } else {
186 error_report("IOMMU_IOAS_MAP failed: %m");
187 }
188 }
189 return ret;
190}
191
192int iommufd_backend_unmap_dma(IOMMUFDBackend *be, uint32_t ioas_id,
193 hwaddr iova, ram_addr_t size)
194{
195 int ret, fd = be->fd;
196 struct iommu_ioas_unmap unmap = {
197 .size = sizeof(unmap),
198 .ioas_id = ioas_id,
199 .iova = iova,
200 .length = size,
201 };
202
203 ret = ioctl(fd, IOMMU_IOAS_UNMAP, &unmap);
204 /*
205 * IOMMUFD takes mapping as some kind of object, unmapping
206 * nonexistent mapping is treated as deleting a nonexistent
207 * object and return ENOENT. This is different from legacy
208 * backend which allows it. vIOMMU may trigger a lot of
209 * redundant unmapping, to avoid flush the log, treat them
210 * as succeess for IOMMUFD just like legacy backend.
211 */
212 if (ret && errno == ENOENT) {
213 trace_iommufd_backend_unmap_dma_non_exist(fd, ioas_id, iova, size, ret);
214 ret = 0;
215 } else {
216 trace_iommufd_backend_unmap_dma(fd, ioas_id, iova, size, ret);
217 }
218
219 if (ret) {
220 ret = -errno;
221 error_report("IOMMU_IOAS_UNMAP failed: %m");
222 }
223 return ret;
224}
225
226static const TypeInfo iommufd_backend_info = {
227 .name = TYPE_IOMMUFD_BACKEND,
228 .parent = TYPE_OBJECT,
229 .instance_size = sizeof(IOMMUFDBackend),
230 .instance_init = iommufd_backend_init,
231 .instance_finalize = iommufd_backend_finalize,
232 .class_size = sizeof(IOMMUFDBackendClass),
233 .class_init = iommufd_backend_class_init,
234 .interfaces = (InterfaceInfo[]) {
235 { TYPE_USER_CREATABLE },
236 { }
237 }
238};
239
240static void register_types(void)
241{
242 type_register_static(&iommufd_backend_info);
243}
244
245type_init(register_types);