]> git.proxmox.com Git - mirror_qemu.git/blame - backends/cryptodev-vhost.c
Merge tag 'for_upstream' of git://git.kernel.org/pub/scm/virt/kvm/mst/qemu into staging
[mirror_qemu.git] / backends / cryptodev-vhost.c
CommitLineData
042cea27
GA
1/*
2 * QEMU Cryptodev backend for QEMU cipher APIs
3 *
4 * Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD.
5 *
6 * Authors:
7 * Gonglei <arei.gonglei@huawei.com>
8 * Jay Zhou <jianjay.zhou@huawei.com>
9 *
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
0dda001b 13 * version 2.1 of the License, or (at your option) any later version.
042cea27
GA
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 *
23 */
24
25#include "qemu/osdep.h"
5da73dab 26#include "hw/virtio/virtio-bus.h"
042cea27
GA
27#include "sysemu/cryptodev-vhost.h"
28
29#ifdef CONFIG_VHOST_CRYPTO
5da73dab
GA
30#include "qapi/error.h"
31#include "qapi/qmp/qerror.h"
32#include "qemu/error-report.h"
33#include "hw/virtio/virtio-crypto.h"
34#include "sysemu/cryptodev-vhost-user.h"
35
042cea27
GA
36uint64_t
37cryptodev_vhost_get_max_queues(
38 CryptoDevBackendVhost *crypto)
39{
40 return crypto->dev.max_queues;
41}
42
43void cryptodev_vhost_cleanup(CryptoDevBackendVhost *crypto)
44{
45 vhost_dev_cleanup(&crypto->dev);
46 g_free(crypto);
47}
48
49struct CryptoDevBackendVhost *
50cryptodev_vhost_init(
51 CryptoDevBackendVhostOptions *options)
52{
53 int r;
54 CryptoDevBackendVhost *crypto;
a6945f22 55 Error *local_err = NULL;
042cea27
GA
56
57 crypto = g_new(CryptoDevBackendVhost, 1);
58 crypto->dev.max_queues = 1;
59 crypto->dev.nvqs = 1;
60 crypto->dev.vqs = crypto->vqs;
61
62 crypto->cc = options->cc;
63
64 crypto->dev.protocol_features = 0;
65 crypto->backend = -1;
66
67 /* vhost-user needs vq_index to initiate a specific queue pair */
68 crypto->dev.vq_index = crypto->cc->queue_index * crypto->dev.nvqs;
69
a6945f22
KW
70 r = vhost_dev_init(&crypto->dev, options->opaque, options->backend_type, 0,
71 &local_err);
042cea27 72 if (r < 0) {
a6945f22 73 error_report_err(local_err);
042cea27
GA
74 goto fail;
75 }
76
77 return crypto;
78fail:
79 g_free(crypto);
80 return NULL;
81}
82
5da73dab
GA
83static int
84cryptodev_vhost_start_one(CryptoDevBackendVhost *crypto,
85 VirtIODevice *dev)
86{
87 int r;
88
89 crypto->dev.nvqs = 1;
90 crypto->dev.vqs = crypto->vqs;
91
92 r = vhost_dev_enable_notifiers(&crypto->dev, dev);
93 if (r < 0) {
94 goto fail_notifiers;
95 }
96
97 r = vhost_dev_start(&crypto->dev, dev);
98 if (r < 0) {
99 goto fail_start;
100 }
101
102 return 0;
103
104fail_start:
105 vhost_dev_disable_notifiers(&crypto->dev, dev);
106fail_notifiers:
107 return r;
108}
109
110static void
111cryptodev_vhost_stop_one(CryptoDevBackendVhost *crypto,
112 VirtIODevice *dev)
113{
114 vhost_dev_stop(&crypto->dev, dev);
115 vhost_dev_disable_notifiers(&crypto->dev, dev);
116}
117
118CryptoDevBackendVhost *
119cryptodev_get_vhost(CryptoDevBackendClient *cc,
120 CryptoDevBackend *b,
121 uint16_t queue)
122{
123 CryptoDevBackendVhost *vhost_crypto = NULL;
124
125 if (!cc) {
126 return NULL;
127 }
128
129 switch (cc->type) {
130#if defined(CONFIG_VHOST_USER) && defined(CONFIG_LINUX)
131 case CRYPTODEV_BACKEND_TYPE_VHOST_USER:
132 vhost_crypto = cryptodev_vhost_user_get_vhost(cc, b, queue);
133 break;
134#endif
135 default:
136 break;
137 }
138
139 return vhost_crypto;
140}
141
142static void
143cryptodev_vhost_set_vq_index(CryptoDevBackendVhost *crypto,
144 int vq_index)
145{
146 crypto->dev.vq_index = vq_index;
147}
148
149static int
150vhost_set_vring_enable(CryptoDevBackendClient *cc,
151 CryptoDevBackend *b,
152 uint16_t queue, int enable)
153{
154 CryptoDevBackendVhost *crypto =
155 cryptodev_get_vhost(cc, b, queue);
156 const VhostOps *vhost_ops;
157
158 cc->vring_enable = enable;
159
160 if (!crypto) {
161 return 0;
162 }
163
164 vhost_ops = crypto->dev.vhost_ops;
165 if (vhost_ops->vhost_set_vring_enable) {
166 return vhost_ops->vhost_set_vring_enable(&crypto->dev, enable);
167 }
168
169 return 0;
170}
171
172int cryptodev_vhost_start(VirtIODevice *dev, int total_queues)
173{
174 VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(dev);
175 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
176 VirtioBusState *vbus = VIRTIO_BUS(qbus);
177 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
178 int r, e;
179 int i;
180 CryptoDevBackend *b = vcrypto->cryptodev;
181 CryptoDevBackendVhost *vhost_crypto;
182 CryptoDevBackendClient *cc;
183
184 if (!k->set_guest_notifiers) {
185 error_report("binding does not support guest notifiers");
186 return -ENOSYS;
187 }
188
189 for (i = 0; i < total_queues; i++) {
190 cc = b->conf.peers.ccs[i];
191
192 vhost_crypto = cryptodev_get_vhost(cc, b, i);
193 cryptodev_vhost_set_vq_index(vhost_crypto, i);
194
195 /* Suppress the masking guest notifiers on vhost user
196 * because vhost user doesn't interrupt masking/unmasking
197 * properly.
198 */
199 if (cc->type == CRYPTODEV_BACKEND_TYPE_VHOST_USER) {
200 dev->use_guest_notifier_mask = false;
201 }
202 }
203
204 r = k->set_guest_notifiers(qbus->parent, total_queues, true);
205 if (r < 0) {
206 error_report("error binding guest notifier: %d", -r);
207 goto err;
208 }
209
210 for (i = 0; i < total_queues; i++) {
211 cc = b->conf.peers.ccs[i];
212
213 vhost_crypto = cryptodev_get_vhost(cc, b, i);
214 r = cryptodev_vhost_start_one(vhost_crypto, dev);
215
216 if (r < 0) {
217 goto err_start;
218 }
219
220 if (cc->vring_enable) {
221 /* restore vring enable state */
222 r = vhost_set_vring_enable(cc, b, i, cc->vring_enable);
223
224 if (r < 0) {
225 goto err_start;
226 }
227 }
228 }
229
230 return 0;
231
232err_start:
233 while (--i >= 0) {
234 cc = b->conf.peers.ccs[i];
235 vhost_crypto = cryptodev_get_vhost(cc, b, i);
236 cryptodev_vhost_stop_one(vhost_crypto, dev);
237 }
238 e = k->set_guest_notifiers(qbus->parent, total_queues, false);
239 if (e < 0) {
240 error_report("vhost guest notifier cleanup failed: %d", e);
241 }
242err:
243 return r;
244}
245
246void cryptodev_vhost_stop(VirtIODevice *dev, int total_queues)
247{
248 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
249 VirtioBusState *vbus = VIRTIO_BUS(qbus);
250 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
251 VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(dev);
252 CryptoDevBackend *b = vcrypto->cryptodev;
253 CryptoDevBackendVhost *vhost_crypto;
254 CryptoDevBackendClient *cc;
255 size_t i;
256 int r;
257
258 for (i = 0; i < total_queues; i++) {
259 cc = b->conf.peers.ccs[i];
260
261 vhost_crypto = cryptodev_get_vhost(cc, b, i);
262 cryptodev_vhost_stop_one(vhost_crypto, dev);
263 }
264
265 r = k->set_guest_notifiers(qbus->parent, total_queues, false);
266 if (r < 0) {
267 error_report("vhost guest notifier cleanup failed: %d", r);
268 }
269 assert(r >= 0);
270}
271
272void cryptodev_vhost_virtqueue_mask(VirtIODevice *dev,
273 int queue,
274 int idx, bool mask)
275{
276 VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(dev);
277 CryptoDevBackend *b = vcrypto->cryptodev;
278 CryptoDevBackendVhost *vhost_crypto;
279 CryptoDevBackendClient *cc;
280
281 assert(queue < MAX_CRYPTO_QUEUE_NUM);
282
283 cc = b->conf.peers.ccs[queue];
284 vhost_crypto = cryptodev_get_vhost(cc, b, queue);
285
286 vhost_virtqueue_mask(&vhost_crypto->dev, dev, idx, mask);
287}
288
289bool cryptodev_vhost_virtqueue_pending(VirtIODevice *dev,
290 int queue, int idx)
291{
292 VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(dev);
293 CryptoDevBackend *b = vcrypto->cryptodev;
294 CryptoDevBackendVhost *vhost_crypto;
295 CryptoDevBackendClient *cc;
296
297 assert(queue < MAX_CRYPTO_QUEUE_NUM);
298
299 cc = b->conf.peers.ccs[queue];
300 vhost_crypto = cryptodev_get_vhost(cc, b, queue);
301
302 return vhost_virtqueue_pending(&vhost_crypto->dev, idx);
303}
304
042cea27
GA
305#else
306uint64_t
307cryptodev_vhost_get_max_queues(CryptoDevBackendVhost *crypto)
308{
309 return 0;
310}
311
312void cryptodev_vhost_cleanup(CryptoDevBackendVhost *crypto)
313{
314}
315
316struct CryptoDevBackendVhost *
317cryptodev_vhost_init(CryptoDevBackendVhostOptions *options)
318{
319 return NULL;
320}
5da73dab
GA
321
322CryptoDevBackendVhost *
323cryptodev_get_vhost(CryptoDevBackendClient *cc,
324 CryptoDevBackend *b,
325 uint16_t queue)
326{
327 return NULL;
328}
329
330int cryptodev_vhost_start(VirtIODevice *dev, int total_queues)
331{
332 return -1;
333}
334
335void cryptodev_vhost_stop(VirtIODevice *dev, int total_queues)
336{
337}
338
339void cryptodev_vhost_virtqueue_mask(VirtIODevice *dev,
340 int queue,
341 int idx, bool mask)
342{
343}
344
345bool cryptodev_vhost_virtqueue_pending(VirtIODevice *dev,
346 int queue, int idx)
347{
348 return false;
349}
042cea27 350#endif