]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / drivers / vdpa / mlx5 / mlx5_vdpa_virtq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2019 Mellanox Technologies, Ltd
3 */
4 #include <string.h>
5 #include <unistd.h>
6 #include <sys/mman.h>
7
8 #include <rte_malloc.h>
9 #include <rte_errno.h>
10 #include <rte_io.h>
11
12 #include <mlx5_common.h>
13
14 #include "mlx5_vdpa_utils.h"
15 #include "mlx5_vdpa.h"
16
17
18 static void
19 mlx5_vdpa_virtq_handler(void *cb_arg)
20 {
21 struct mlx5_vdpa_virtq *virtq = cb_arg;
22 struct mlx5_vdpa_priv *priv = virtq->priv;
23 uint64_t buf;
24 int nbytes;
25
26 do {
27 nbytes = read(virtq->intr_handle.fd, &buf, 8);
28 if (nbytes < 0) {
29 if (errno == EINTR ||
30 errno == EWOULDBLOCK ||
31 errno == EAGAIN)
32 continue;
33 DRV_LOG(ERR, "Failed to read kickfd of virtq %d: %s",
34 virtq->index, strerror(errno));
35 }
36 break;
37 } while (1);
38 rte_write32(virtq->index, priv->virtq_db_addr);
39 DRV_LOG(DEBUG, "Ring virtq %u doorbell.", virtq->index);
40 }
41
42 static int
43 mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq)
44 {
45 unsigned int i;
46 int retries = MLX5_VDPA_INTR_RETRIES;
47 int ret = -EAGAIN;
48
49 if (virtq->intr_handle.fd != -1) {
50 while (retries-- && ret == -EAGAIN) {
51 ret = rte_intr_callback_unregister(&virtq->intr_handle,
52 mlx5_vdpa_virtq_handler,
53 virtq);
54 if (ret == -EAGAIN) {
55 DRV_LOG(DEBUG, "Try again to unregister fd %d "
56 "of virtq %d interrupt, retries = %d.",
57 virtq->intr_handle.fd,
58 (int)virtq->index, retries);
59 usleep(MLX5_VDPA_INTR_RETRIES_USEC);
60 }
61 }
62 virtq->intr_handle.fd = -1;
63 }
64 if (virtq->virtq)
65 claim_zero(mlx5_devx_cmd_destroy(virtq->virtq));
66 virtq->virtq = NULL;
67 for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
68 if (virtq->umems[i].obj)
69 claim_zero(mlx5_glue->devx_umem_dereg
70 (virtq->umems[i].obj));
71 if (virtq->umems[i].buf)
72 rte_free(virtq->umems[i].buf);
73 }
74 memset(&virtq->umems, 0, sizeof(virtq->umems));
75 if (virtq->eqp.fw_qp)
76 mlx5_vdpa_event_qp_destroy(&virtq->eqp);
77 return 0;
78 }
79
80 void
81 mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv)
82 {
83 int i;
84
85 for (i = 0; i < priv->nr_virtqs; i++) {
86 mlx5_vdpa_virtq_unset(&priv->virtqs[i]);
87 priv->virtqs[i].enable = 0;
88 }
89 if (priv->tis) {
90 claim_zero(mlx5_devx_cmd_destroy(priv->tis));
91 priv->tis = NULL;
92 }
93 if (priv->td) {
94 claim_zero(mlx5_devx_cmd_destroy(priv->td));
95 priv->td = NULL;
96 }
97 if (priv->virtq_db_addr) {
98 claim_zero(munmap(priv->virtq_db_addr, priv->var->length));
99 priv->virtq_db_addr = NULL;
100 }
101 priv->features = 0;
102 priv->nr_virtqs = 0;
103 }
104
105 int
106 mlx5_vdpa_virtq_modify(struct mlx5_vdpa_virtq *virtq, int state)
107 {
108 struct mlx5_devx_virtq_attr attr = {
109 .type = MLX5_VIRTQ_MODIFY_TYPE_STATE,
110 .state = state ? MLX5_VIRTQ_STATE_RDY :
111 MLX5_VIRTQ_STATE_SUSPEND,
112 .queue_index = virtq->index,
113 };
114
115 return mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr);
116 }
117
118 int
119 mlx5_vdpa_virtq_stop(struct mlx5_vdpa_priv *priv, int index)
120 {
121 struct mlx5_devx_virtq_attr attr = {0};
122 struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
123 int ret = mlx5_vdpa_virtq_modify(virtq, 0);
124
125 if (ret)
126 return -1;
127 if (mlx5_devx_cmd_query_virtq(virtq->virtq, &attr)) {
128 DRV_LOG(ERR, "Failed to query virtq %d.", index);
129 return -1;
130 }
131 DRV_LOG(INFO, "Query vid %d vring %d: hw_available_idx=%d, "
132 "hw_used_index=%d", priv->vid, index,
133 attr.hw_available_index, attr.hw_used_index);
134 ret = rte_vhost_set_vring_base(priv->vid, index,
135 attr.hw_available_index,
136 attr.hw_used_index);
137 if (ret) {
138 DRV_LOG(ERR, "Failed to set virtq %d base.", index);
139 return -1;
140 }
141 return 0;
142 }
143
144 static uint64_t
145 mlx5_vdpa_hva_to_gpa(struct rte_vhost_memory *mem, uint64_t hva)
146 {
147 struct rte_vhost_mem_region *reg;
148 uint32_t i;
149 uint64_t gpa = 0;
150
151 for (i = 0; i < mem->nregions; i++) {
152 reg = &mem->regions[i];
153 if (hva >= reg->host_user_addr &&
154 hva < reg->host_user_addr + reg->size) {
155 gpa = hva - reg->host_user_addr + reg->guest_phys_addr;
156 break;
157 }
158 }
159 return gpa;
160 }
161
162 static int
163 mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)
164 {
165 struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
166 struct rte_vhost_vring vq;
167 struct mlx5_devx_virtq_attr attr = {0};
168 uint64_t gpa;
169 int ret;
170 unsigned int i;
171 uint16_t last_avail_idx;
172 uint16_t last_used_idx;
173
174 ret = rte_vhost_get_vhost_vring(priv->vid, index, &vq);
175 if (ret)
176 return -1;
177 virtq->index = index;
178 virtq->vq_size = vq.size;
179 attr.tso_ipv4 = !!(priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4));
180 attr.tso_ipv6 = !!(priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6));
181 attr.tx_csum = !!(priv->features & (1ULL << VIRTIO_NET_F_CSUM));
182 attr.rx_csum = !!(priv->features & (1ULL << VIRTIO_NET_F_GUEST_CSUM));
183 attr.virtio_version_1_0 = !!(priv->features & (1ULL <<
184 VIRTIO_F_VERSION_1));
185 attr.type = (priv->features & (1ULL << VIRTIO_F_RING_PACKED)) ?
186 MLX5_VIRTQ_TYPE_PACKED : MLX5_VIRTQ_TYPE_SPLIT;
187 /*
188 * No need event QPs creation when the guest in poll mode or when the
189 * capability allows it.
190 */
191 attr.event_mode = vq.callfd != -1 || !(priv->caps.event_mode & (1 <<
192 MLX5_VIRTQ_EVENT_MODE_NO_MSIX)) ?
193 MLX5_VIRTQ_EVENT_MODE_QP :
194 MLX5_VIRTQ_EVENT_MODE_NO_MSIX;
195 if (attr.event_mode == MLX5_VIRTQ_EVENT_MODE_QP) {
196 ret = mlx5_vdpa_event_qp_create(priv, vq.size, vq.callfd,
197 &virtq->eqp);
198 if (ret) {
199 DRV_LOG(ERR, "Failed to create event QPs for virtq %d.",
200 index);
201 return -1;
202 }
203 attr.qp_id = virtq->eqp.fw_qp->id;
204 } else {
205 DRV_LOG(INFO, "Virtq %d is, for sure, working by poll mode, no"
206 " need event QPs and event mechanism.", index);
207 }
208 /* Setup 3 UMEMs for each virtq. */
209 for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
210 virtq->umems[i].size = priv->caps.umems[i].a * vq.size +
211 priv->caps.umems[i].b;
212 virtq->umems[i].buf = rte_zmalloc(__func__,
213 virtq->umems[i].size, 4096);
214 if (!virtq->umems[i].buf) {
215 DRV_LOG(ERR, "Cannot allocate umem %d memory for virtq"
216 " %u.", i, index);
217 goto error;
218 }
219 virtq->umems[i].obj = mlx5_glue->devx_umem_reg(priv->ctx,
220 virtq->umems[i].buf,
221 virtq->umems[i].size,
222 IBV_ACCESS_LOCAL_WRITE);
223 if (!virtq->umems[i].obj) {
224 DRV_LOG(ERR, "Failed to register umem %d for virtq %u.",
225 i, index);
226 goto error;
227 }
228 attr.umems[i].id = virtq->umems[i].obj->umem_id;
229 attr.umems[i].offset = 0;
230 attr.umems[i].size = virtq->umems[i].size;
231 }
232 if (attr.type == MLX5_VIRTQ_TYPE_SPLIT) {
233 gpa = mlx5_vdpa_hva_to_gpa(priv->vmem,
234 (uint64_t)(uintptr_t)vq.desc);
235 if (!gpa) {
236 DRV_LOG(ERR, "Failed to get descriptor ring GPA.");
237 goto error;
238 }
239 attr.desc_addr = gpa;
240 gpa = mlx5_vdpa_hva_to_gpa(priv->vmem,
241 (uint64_t)(uintptr_t)vq.used);
242 if (!gpa) {
243 DRV_LOG(ERR, "Failed to get GPA for used ring.");
244 goto error;
245 }
246 attr.used_addr = gpa;
247 gpa = mlx5_vdpa_hva_to_gpa(priv->vmem,
248 (uint64_t)(uintptr_t)vq.avail);
249 if (!gpa) {
250 DRV_LOG(ERR, "Failed to get GPA for available ring.");
251 goto error;
252 }
253 attr.available_addr = gpa;
254 }
255 ret = rte_vhost_get_vring_base(priv->vid, index, &last_avail_idx,
256 &last_used_idx);
257 if (ret) {
258 last_avail_idx = 0;
259 last_used_idx = 0;
260 DRV_LOG(WARNING, "Couldn't get vring base, idx are set to 0");
261 } else {
262 DRV_LOG(INFO, "vid %d: Init last_avail_idx=%d, last_used_idx=%d for "
263 "virtq %d.", priv->vid, last_avail_idx,
264 last_used_idx, index);
265 }
266 attr.hw_available_index = last_avail_idx;
267 attr.hw_used_index = last_used_idx;
268 attr.q_size = vq.size;
269 attr.mkey = priv->gpa_mkey_index;
270 attr.tis_id = priv->tis->id;
271 attr.queue_index = index;
272 virtq->virtq = mlx5_devx_cmd_create_virtq(priv->ctx, &attr);
273 virtq->priv = priv;
274 if (!virtq->virtq)
275 goto error;
276 if (mlx5_vdpa_virtq_modify(virtq, 1))
277 goto error;
278 virtq->priv = priv;
279 rte_write32(virtq->index, priv->virtq_db_addr);
280 /* Setup doorbell mapping. */
281 virtq->intr_handle.fd = vq.kickfd;
282 if (virtq->intr_handle.fd == -1) {
283 DRV_LOG(WARNING, "Virtq %d kickfd is invalid.", index);
284 if (!priv->direct_notifier) {
285 DRV_LOG(ERR, "Virtq %d cannot be notified.", index);
286 goto error;
287 }
288 } else {
289 virtq->intr_handle.type = RTE_INTR_HANDLE_EXT;
290 if (rte_intr_callback_register(&virtq->intr_handle,
291 mlx5_vdpa_virtq_handler,
292 virtq)) {
293 virtq->intr_handle.fd = -1;
294 DRV_LOG(ERR, "Failed to register virtq %d interrupt.",
295 index);
296 goto error;
297 } else {
298 DRV_LOG(DEBUG, "Register fd %d interrupt for virtq %d.",
299 virtq->intr_handle.fd, index);
300 }
301 }
302 return 0;
303 error:
304 mlx5_vdpa_virtq_unset(virtq);
305 return -1;
306 }
307
308 static int
309 mlx5_vdpa_features_validate(struct mlx5_vdpa_priv *priv)
310 {
311 if (priv->features & (1ULL << VIRTIO_F_RING_PACKED)) {
312 if (!(priv->caps.virtio_queue_type & (1 <<
313 MLX5_VIRTQ_TYPE_PACKED))) {
314 DRV_LOG(ERR, "Failed to configur PACKED mode for vdev "
315 "%d - it was not reported by HW/driver"
316 " capability.", priv->vid);
317 return -ENOTSUP;
318 }
319 }
320 if (priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4)) {
321 if (!priv->caps.tso_ipv4) {
322 DRV_LOG(ERR, "Failed to enable TSO4 for vdev %d - TSO4"
323 " was not reported by HW/driver capability.",
324 priv->vid);
325 return -ENOTSUP;
326 }
327 }
328 if (priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6)) {
329 if (!priv->caps.tso_ipv6) {
330 DRV_LOG(ERR, "Failed to enable TSO6 for vdev %d - TSO6"
331 " was not reported by HW/driver capability.",
332 priv->vid);
333 return -ENOTSUP;
334 }
335 }
336 if (priv->features & (1ULL << VIRTIO_NET_F_CSUM)) {
337 if (!priv->caps.tx_csum) {
338 DRV_LOG(ERR, "Failed to enable CSUM for vdev %d - CSUM"
339 " was not reported by HW/driver capability.",
340 priv->vid);
341 return -ENOTSUP;
342 }
343 }
344 if (priv->features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
345 if (!priv->caps.rx_csum) {
346 DRV_LOG(ERR, "Failed to enable GUEST CSUM for vdev %d"
347 " GUEST CSUM was not reported by HW/driver "
348 "capability.", priv->vid);
349 return -ENOTSUP;
350 }
351 }
352 if (priv->features & (1ULL << VIRTIO_F_VERSION_1)) {
353 if (!priv->caps.virtio_version_1_0) {
354 DRV_LOG(ERR, "Failed to enable version 1 for vdev %d "
355 "version 1 was not reported by HW/driver"
356 " capability.", priv->vid);
357 return -ENOTSUP;
358 }
359 }
360 return 0;
361 }
362
363 int
364 mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)
365 {
366 struct mlx5_devx_tis_attr tis_attr = {0};
367 uint32_t i;
368 uint16_t nr_vring = rte_vhost_get_vring_num(priv->vid);
369 int ret = rte_vhost_get_negotiated_features(priv->vid, &priv->features);
370
371 if (ret || mlx5_vdpa_features_validate(priv)) {
372 DRV_LOG(ERR, "Failed to configure negotiated features.");
373 return -1;
374 }
375 if (nr_vring > priv->caps.max_num_virtio_queues * 2) {
376 DRV_LOG(ERR, "Do not support more than %d virtqs(%d).",
377 (int)priv->caps.max_num_virtio_queues * 2,
378 (int)nr_vring);
379 return -1;
380 }
381 /* Always map the entire page. */
382 priv->virtq_db_addr = mmap(NULL, priv->var->length, PROT_READ |
383 PROT_WRITE, MAP_SHARED, priv->ctx->cmd_fd,
384 priv->var->mmap_off);
385 if (priv->virtq_db_addr == MAP_FAILED) {
386 DRV_LOG(ERR, "Failed to map doorbell page %u.", errno);
387 priv->virtq_db_addr = NULL;
388 goto error;
389 } else {
390 DRV_LOG(DEBUG, "VAR address of doorbell mapping is %p.",
391 priv->virtq_db_addr);
392 }
393 priv->td = mlx5_devx_cmd_create_td(priv->ctx);
394 if (!priv->td) {
395 DRV_LOG(ERR, "Failed to create transport domain.");
396 return -rte_errno;
397 }
398 tis_attr.transport_domain = priv->td->id;
399 priv->tis = mlx5_devx_cmd_create_tis(priv->ctx, &tis_attr);
400 if (!priv->tis) {
401 DRV_LOG(ERR, "Failed to create TIS.");
402 goto error;
403 }
404 priv->nr_virtqs = nr_vring;
405 for (i = 0; i < nr_vring; i++) {
406 claim_zero(rte_vhost_enable_guest_notification(priv->vid, i,
407 1));
408 if (mlx5_vdpa_virtq_setup(priv, i))
409 goto error;
410 }
411 return 0;
412 error:
413 mlx5_vdpa_virtqs_release(priv);
414 return -1;
415 }
416
417 int
418 mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable)
419 {
420 struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
421 int ret;
422
423 DRV_LOG(INFO, "Update virtq %d status %sable -> %sable.", index,
424 virtq->enable ? "en" : "dis", enable ? "en" : "dis");
425 if (virtq->enable == !!enable)
426 return 0;
427 if (!priv->configured) {
428 virtq->enable = !!enable;
429 return 0;
430 }
431 if (enable) {
432 /* Configuration might have been updated - reconfigure virtq. */
433 if (virtq->virtq) {
434 ret = mlx5_vdpa_virtq_stop(priv, index);
435 if (ret)
436 DRV_LOG(WARNING, "Failed to stop virtq %d.",
437 index);
438 mlx5_vdpa_virtq_unset(virtq);
439 }
440 ret = mlx5_vdpa_virtq_setup(priv, index);
441 if (ret) {
442 DRV_LOG(ERR, "Failed to setup virtq %d.", index);
443 return ret;
444 /* The only case virtq can stay invalid. */
445 }
446 }
447 virtq->enable = !!enable;
448 if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) {
449 /* Need to add received virtq to the RQT table of the TIRs. */
450 ret = mlx5_vdpa_steer_update(priv);
451 if (ret) {
452 virtq->enable = !enable;
453 return ret;
454 }
455 }
456 return 0;
457 }