]>
Commit | Line | Data |
---|---|---|
6e6d8ac6 EA |
1 | /* |
2 | * iommufd container backend | |
3 | * | |
4 | * Copyright (C) 2023 Intel Corporation. | |
5 | * Copyright Red Hat, Inc. 2023 | |
6 | * | |
7 | * Authors: Yi Liu <yi.l.liu@intel.com> | |
8 | * Eric Auger <eric.auger@redhat.com> | |
9 | * | |
10 | * SPDX-License-Identifier: GPL-2.0-or-later | |
11 | */ | |
12 | ||
13 | #include "qemu/osdep.h" | |
14 | #include "sysemu/iommufd.h" | |
15 | #include "qapi/error.h" | |
16 | #include "qapi/qmp/qerror.h" | |
17 | #include "qemu/module.h" | |
18 | #include "qom/object_interfaces.h" | |
19 | #include "qemu/error-report.h" | |
20 | #include "monitor/monitor.h" | |
21 | #include "trace.h" | |
22 | #include <sys/ioctl.h> | |
23 | #include <linux/iommufd.h> | |
24 | ||
25 | static void iommufd_backend_init(Object *obj) | |
26 | { | |
27 | IOMMUFDBackend *be = IOMMUFD_BACKEND(obj); | |
28 | ||
29 | be->fd = -1; | |
30 | be->users = 0; | |
31 | be->owned = true; | |
6e6d8ac6 EA |
32 | } |
33 | ||
34 | static void iommufd_backend_finalize(Object *obj) | |
35 | { | |
36 | IOMMUFDBackend *be = IOMMUFD_BACKEND(obj); | |
37 | ||
38 | if (be->owned) { | |
39 | close(be->fd); | |
40 | be->fd = -1; | |
41 | } | |
42 | } | |
43 | ||
44 | static void iommufd_backend_set_fd(Object *obj, const char *str, Error **errp) | |
45 | { | |
46 | IOMMUFDBackend *be = IOMMUFD_BACKEND(obj); | |
47 | int fd = -1; | |
48 | ||
49 | fd = monitor_fd_param(monitor_cur(), str, errp); | |
50 | if (fd == -1) { | |
51 | error_prepend(errp, "Could not parse remote object fd %s:", str); | |
52 | return; | |
53 | } | |
6e6d8ac6 EA |
54 | be->fd = fd; |
55 | be->owned = false; | |
6e6d8ac6 EA |
56 | trace_iommu_backend_set_fd(be->fd); |
57 | } | |
58 | ||
59 | static bool iommufd_backend_can_be_deleted(UserCreatable *uc) | |
60 | { | |
61 | IOMMUFDBackend *be = IOMMUFD_BACKEND(uc); | |
62 | ||
63 | return !be->users; | |
64 | } | |
65 | ||
66 | static void iommufd_backend_class_init(ObjectClass *oc, void *data) | |
67 | { | |
68 | UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc); | |
69 | ||
70 | ucc->can_be_deleted = iommufd_backend_can_be_deleted; | |
71 | ||
72 | object_class_property_add_str(oc, "fd", NULL, iommufd_backend_set_fd); | |
73 | } | |
74 | ||
75 | int iommufd_backend_connect(IOMMUFDBackend *be, Error **errp) | |
76 | { | |
77 | int fd, ret = 0; | |
78 | ||
6e6d8ac6 EA |
79 | if (be->owned && !be->users) { |
80 | fd = qemu_open_old("/dev/iommu", O_RDWR); | |
81 | if (fd < 0) { | |
82 | error_setg_errno(errp, errno, "/dev/iommu opening failed"); | |
83 | ret = fd; | |
84 | goto out; | |
85 | } | |
86 | be->fd = fd; | |
87 | } | |
88 | be->users++; | |
89 | out: | |
90 | trace_iommufd_backend_connect(be->fd, be->owned, | |
91 | be->users, ret); | |
6e6d8ac6 EA |
92 | return ret; |
93 | } | |
94 | ||
95 | void iommufd_backend_disconnect(IOMMUFDBackend *be) | |
96 | { | |
6e6d8ac6 EA |
97 | if (!be->users) { |
98 | goto out; | |
99 | } | |
100 | be->users--; | |
101 | if (!be->users && be->owned) { | |
102 | close(be->fd); | |
103 | be->fd = -1; | |
104 | } | |
105 | out: | |
106 | trace_iommufd_backend_disconnect(be->fd, be->users); | |
6e6d8ac6 EA |
107 | } |
108 | ||
109 | int iommufd_backend_alloc_ioas(IOMMUFDBackend *be, uint32_t *ioas_id, | |
110 | Error **errp) | |
111 | { | |
112 | int ret, fd = be->fd; | |
113 | struct iommu_ioas_alloc alloc_data = { | |
114 | .size = sizeof(alloc_data), | |
115 | .flags = 0, | |
116 | }; | |
117 | ||
118 | ret = ioctl(fd, IOMMU_IOAS_ALLOC, &alloc_data); | |
119 | if (ret) { | |
120 | error_setg_errno(errp, errno, "Failed to allocate ioas"); | |
121 | return ret; | |
122 | } | |
123 | ||
124 | *ioas_id = alloc_data.out_ioas_id; | |
125 | trace_iommufd_backend_alloc_ioas(fd, *ioas_id, ret); | |
126 | ||
127 | return ret; | |
128 | } | |
129 | ||
130 | void iommufd_backend_free_id(IOMMUFDBackend *be, uint32_t id) | |
131 | { | |
132 | int ret, fd = be->fd; | |
133 | struct iommu_destroy des = { | |
134 | .size = sizeof(des), | |
135 | .id = id, | |
136 | }; | |
137 | ||
138 | ret = ioctl(fd, IOMMU_DESTROY, &des); | |
139 | trace_iommufd_backend_free_id(fd, id, ret); | |
140 | if (ret) { | |
141 | error_report("Failed to free id: %u %m", id); | |
142 | } | |
143 | } | |
144 | ||
145 | int iommufd_backend_map_dma(IOMMUFDBackend *be, uint32_t ioas_id, hwaddr iova, | |
146 | ram_addr_t size, void *vaddr, bool readonly) | |
147 | { | |
148 | int ret, fd = be->fd; | |
149 | struct iommu_ioas_map map = { | |
150 | .size = sizeof(map), | |
151 | .flags = IOMMU_IOAS_MAP_READABLE | | |
152 | IOMMU_IOAS_MAP_FIXED_IOVA, | |
153 | .ioas_id = ioas_id, | |
154 | .__reserved = 0, | |
155 | .user_va = (uintptr_t)vaddr, | |
156 | .iova = iova, | |
157 | .length = size, | |
158 | }; | |
159 | ||
160 | if (!readonly) { | |
161 | map.flags |= IOMMU_IOAS_MAP_WRITEABLE; | |
162 | } | |
163 | ||
164 | ret = ioctl(fd, IOMMU_IOAS_MAP, &map); | |
165 | trace_iommufd_backend_map_dma(fd, ioas_id, iova, size, | |
166 | vaddr, readonly, ret); | |
167 | if (ret) { | |
168 | ret = -errno; | |
169 | ||
170 | /* TODO: Not support mapping hardware PCI BAR region for now. */ | |
171 | if (errno == EFAULT) { | |
172 | warn_report("IOMMU_IOAS_MAP failed: %m, PCI BAR?"); | |
173 | } else { | |
174 | error_report("IOMMU_IOAS_MAP failed: %m"); | |
175 | } | |
176 | } | |
177 | return ret; | |
178 | } | |
179 | ||
180 | int iommufd_backend_unmap_dma(IOMMUFDBackend *be, uint32_t ioas_id, | |
181 | hwaddr iova, ram_addr_t size) | |
182 | { | |
183 | int ret, fd = be->fd; | |
184 | struct iommu_ioas_unmap unmap = { | |
185 | .size = sizeof(unmap), | |
186 | .ioas_id = ioas_id, | |
187 | .iova = iova, | |
188 | .length = size, | |
189 | }; | |
190 | ||
191 | ret = ioctl(fd, IOMMU_IOAS_UNMAP, &unmap); | |
192 | /* | |
193 | * IOMMUFD takes mapping as some kind of object, unmapping | |
194 | * nonexistent mapping is treated as deleting a nonexistent | |
195 | * object and return ENOENT. This is different from legacy | |
196 | * backend which allows it. vIOMMU may trigger a lot of | |
197 | * redundant unmapping, to avoid flush the log, treat them | |
198 | * as succeess for IOMMUFD just like legacy backend. | |
199 | */ | |
200 | if (ret && errno == ENOENT) { | |
201 | trace_iommufd_backend_unmap_dma_non_exist(fd, ioas_id, iova, size, ret); | |
202 | ret = 0; | |
203 | } else { | |
204 | trace_iommufd_backend_unmap_dma(fd, ioas_id, iova, size, ret); | |
205 | } | |
206 | ||
207 | if (ret) { | |
208 | ret = -errno; | |
209 | error_report("IOMMU_IOAS_UNMAP failed: %m"); | |
210 | } | |
211 | return ret; | |
212 | } | |
213 | ||
214 | static const TypeInfo iommufd_backend_info = { | |
215 | .name = TYPE_IOMMUFD_BACKEND, | |
216 | .parent = TYPE_OBJECT, | |
217 | .instance_size = sizeof(IOMMUFDBackend), | |
218 | .instance_init = iommufd_backend_init, | |
219 | .instance_finalize = iommufd_backend_finalize, | |
220 | .class_size = sizeof(IOMMUFDBackendClass), | |
221 | .class_init = iommufd_backend_class_init, | |
222 | .interfaces = (InterfaceInfo[]) { | |
223 | { TYPE_USER_CREATABLE }, | |
224 | { } | |
225 | } | |
226 | }; | |
227 | ||
228 | static void register_types(void) | |
229 | { | |
230 | type_register_static(&iommufd_backend_info); | |
231 | } | |
232 | ||
233 | type_init(register_types); |