]>
Commit | Line | Data |
---|---|---|
11fdf7f2 TL |
1 | /* SPDX-License-Identifier: BSD-3-Clause |
2 | * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD. | |
3 | */ | |
4 | ||
5 | #include <stdint.h> | |
6 | ||
9f95a23c | 7 | #ifdef RTE_EXEC_ENV_LINUX |
11fdf7f2 TL |
8 | #include <dirent.h> |
9 | #include <fcntl.h> | |
10 | #endif | |
11 | ||
12 | #include <rte_io.h> | |
13 | #include <rte_bus.h> | |
14 | ||
15 | #include "virtio_pci.h" | |
16 | #include "virtqueue.h" | |
17 | ||
18 | /* | |
19 | * Following macros are derived from linux/pci_regs.h, however, | |
20 | * we can't simply include that header here, as there is no such | |
21 | * file for non-Linux platform. | |
22 | */ | |
23 | #define PCI_CAPABILITY_LIST 0x34 | |
24 | #define PCI_CAP_ID_VNDR 0x09 | |
25 | #define PCI_CAP_ID_MSIX 0x11 | |
26 | ||
27 | /* | |
28 | * The remaining space is defined by each driver as the per-driver | |
29 | * configuration space. | |
30 | */ | |
31 | #define VIRTIO_PCI_CONFIG(hw) \ | |
32 | (((hw)->use_msix == VIRTIO_MSIX_ENABLED) ? 24 : 20) | |
33 | ||
f67539c2 | 34 | struct virtio_hw_internal crypto_virtio_hw_internal[RTE_MAX_VIRTIO_CRYPTO]; |
11fdf7f2 TL |
35 | |
36 | static inline int | |
37 | check_vq_phys_addr_ok(struct virtqueue *vq) | |
38 | { | |
39 | /* Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit, | |
40 | * and only accepts 32 bit page frame number. | |
41 | * Check if the allocated physical memory exceeds 16TB. | |
42 | */ | |
43 | if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >> | |
44 | (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) { | |
45 | VIRTIO_CRYPTO_INIT_LOG_ERR("vring address shouldn't be above 16TB!"); | |
46 | return 0; | |
47 | } | |
48 | ||
49 | return 1; | |
50 | } | |
51 | ||
52 | static inline void | |
53 | io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi) | |
54 | { | |
55 | rte_write32(val & ((1ULL << 32) - 1), lo); | |
56 | rte_write32(val >> 32, hi); | |
57 | } | |
58 | ||
59 | static void | |
60 | modern_read_dev_config(struct virtio_crypto_hw *hw, size_t offset, | |
61 | void *dst, int length) | |
62 | { | |
63 | int i; | |
64 | uint8_t *p; | |
65 | uint8_t old_gen, new_gen; | |
66 | ||
67 | do { | |
68 | old_gen = rte_read8(&hw->common_cfg->config_generation); | |
69 | ||
70 | p = dst; | |
71 | for (i = 0; i < length; i++) | |
72 | *p++ = rte_read8((uint8_t *)hw->dev_cfg + offset + i); | |
73 | ||
74 | new_gen = rte_read8(&hw->common_cfg->config_generation); | |
75 | } while (old_gen != new_gen); | |
76 | } | |
77 | ||
78 | static void | |
79 | modern_write_dev_config(struct virtio_crypto_hw *hw, size_t offset, | |
80 | const void *src, int length) | |
81 | { | |
82 | int i; | |
83 | const uint8_t *p = src; | |
84 | ||
85 | for (i = 0; i < length; i++) | |
86 | rte_write8((*p++), (((uint8_t *)hw->dev_cfg) + offset + i)); | |
87 | } | |
88 | ||
89 | static uint64_t | |
90 | modern_get_features(struct virtio_crypto_hw *hw) | |
91 | { | |
92 | uint32_t features_lo, features_hi; | |
93 | ||
94 | rte_write32(0, &hw->common_cfg->device_feature_select); | |
95 | features_lo = rte_read32(&hw->common_cfg->device_feature); | |
96 | ||
97 | rte_write32(1, &hw->common_cfg->device_feature_select); | |
98 | features_hi = rte_read32(&hw->common_cfg->device_feature); | |
99 | ||
100 | return ((uint64_t)features_hi << 32) | features_lo; | |
101 | } | |
102 | ||
103 | static void | |
104 | modern_set_features(struct virtio_crypto_hw *hw, uint64_t features) | |
105 | { | |
106 | rte_write32(0, &hw->common_cfg->guest_feature_select); | |
107 | rte_write32(features & ((1ULL << 32) - 1), | |
108 | &hw->common_cfg->guest_feature); | |
109 | ||
110 | rte_write32(1, &hw->common_cfg->guest_feature_select); | |
111 | rte_write32(features >> 32, | |
112 | &hw->common_cfg->guest_feature); | |
113 | } | |
114 | ||
115 | static uint8_t | |
116 | modern_get_status(struct virtio_crypto_hw *hw) | |
117 | { | |
118 | return rte_read8(&hw->common_cfg->device_status); | |
119 | } | |
120 | ||
121 | static void | |
122 | modern_set_status(struct virtio_crypto_hw *hw, uint8_t status) | |
123 | { | |
124 | rte_write8(status, &hw->common_cfg->device_status); | |
125 | } | |
126 | ||
127 | static void | |
128 | modern_reset(struct virtio_crypto_hw *hw) | |
129 | { | |
130 | modern_set_status(hw, VIRTIO_CONFIG_STATUS_RESET); | |
131 | modern_get_status(hw); | |
132 | } | |
133 | ||
134 | static uint8_t | |
135 | modern_get_isr(struct virtio_crypto_hw *hw) | |
136 | { | |
137 | return rte_read8(hw->isr); | |
138 | } | |
139 | ||
140 | static uint16_t | |
141 | modern_set_config_irq(struct virtio_crypto_hw *hw, uint16_t vec) | |
142 | { | |
143 | rte_write16(vec, &hw->common_cfg->msix_config); | |
144 | return rte_read16(&hw->common_cfg->msix_config); | |
145 | } | |
146 | ||
147 | static uint16_t | |
148 | modern_set_queue_irq(struct virtio_crypto_hw *hw, struct virtqueue *vq, | |
149 | uint16_t vec) | |
150 | { | |
151 | rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select); | |
152 | rte_write16(vec, &hw->common_cfg->queue_msix_vector); | |
153 | return rte_read16(&hw->common_cfg->queue_msix_vector); | |
154 | } | |
155 | ||
156 | static uint16_t | |
157 | modern_get_queue_num(struct virtio_crypto_hw *hw, uint16_t queue_id) | |
158 | { | |
159 | rte_write16(queue_id, &hw->common_cfg->queue_select); | |
160 | return rte_read16(&hw->common_cfg->queue_size); | |
161 | } | |
162 | ||
163 | static int | |
164 | modern_setup_queue(struct virtio_crypto_hw *hw, struct virtqueue *vq) | |
165 | { | |
166 | uint64_t desc_addr, avail_addr, used_addr; | |
167 | uint16_t notify_off; | |
168 | ||
169 | if (!check_vq_phys_addr_ok(vq)) | |
170 | return -1; | |
171 | ||
172 | desc_addr = vq->vq_ring_mem; | |
173 | avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc); | |
174 | used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail, | |
175 | ring[vq->vq_nentries]), | |
176 | VIRTIO_PCI_VRING_ALIGN); | |
177 | ||
178 | rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select); | |
179 | ||
180 | io_write64_twopart(desc_addr, &hw->common_cfg->queue_desc_lo, | |
181 | &hw->common_cfg->queue_desc_hi); | |
182 | io_write64_twopart(avail_addr, &hw->common_cfg->queue_avail_lo, | |
183 | &hw->common_cfg->queue_avail_hi); | |
184 | io_write64_twopart(used_addr, &hw->common_cfg->queue_used_lo, | |
185 | &hw->common_cfg->queue_used_hi); | |
186 | ||
187 | notify_off = rte_read16(&hw->common_cfg->queue_notify_off); | |
188 | vq->notify_addr = (void *)((uint8_t *)hw->notify_base + | |
189 | notify_off * hw->notify_off_multiplier); | |
190 | ||
191 | rte_write16(1, &hw->common_cfg->queue_enable); | |
192 | ||
193 | VIRTIO_CRYPTO_INIT_LOG_DBG("queue %u addresses:", vq->vq_queue_index); | |
194 | VIRTIO_CRYPTO_INIT_LOG_DBG("\t desc_addr: %" PRIx64, desc_addr); | |
195 | VIRTIO_CRYPTO_INIT_LOG_DBG("\t aval_addr: %" PRIx64, avail_addr); | |
196 | VIRTIO_CRYPTO_INIT_LOG_DBG("\t used_addr: %" PRIx64, used_addr); | |
197 | VIRTIO_CRYPTO_INIT_LOG_DBG("\t notify addr: %p (notify offset: %u)", | |
198 | vq->notify_addr, notify_off); | |
199 | ||
200 | return 0; | |
201 | } | |
202 | ||
203 | static void | |
204 | modern_del_queue(struct virtio_crypto_hw *hw, struct virtqueue *vq) | |
205 | { | |
206 | rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select); | |
207 | ||
208 | io_write64_twopart(0, &hw->common_cfg->queue_desc_lo, | |
209 | &hw->common_cfg->queue_desc_hi); | |
210 | io_write64_twopart(0, &hw->common_cfg->queue_avail_lo, | |
211 | &hw->common_cfg->queue_avail_hi); | |
212 | io_write64_twopart(0, &hw->common_cfg->queue_used_lo, | |
213 | &hw->common_cfg->queue_used_hi); | |
214 | ||
215 | rte_write16(0, &hw->common_cfg->queue_enable); | |
216 | } | |
217 | ||
218 | static void | |
219 | modern_notify_queue(struct virtio_crypto_hw *hw __rte_unused, | |
220 | struct virtqueue *vq) | |
221 | { | |
222 | rte_write16(vq->vq_queue_index, vq->notify_addr); | |
223 | } | |
224 | ||
225 | const struct virtio_pci_ops virtio_crypto_modern_ops = { | |
226 | .read_dev_cfg = modern_read_dev_config, | |
227 | .write_dev_cfg = modern_write_dev_config, | |
228 | .reset = modern_reset, | |
229 | .get_status = modern_get_status, | |
230 | .set_status = modern_set_status, | |
231 | .get_features = modern_get_features, | |
232 | .set_features = modern_set_features, | |
233 | .get_isr = modern_get_isr, | |
234 | .set_config_irq = modern_set_config_irq, | |
235 | .set_queue_irq = modern_set_queue_irq, | |
236 | .get_queue_num = modern_get_queue_num, | |
237 | .setup_queue = modern_setup_queue, | |
238 | .del_queue = modern_del_queue, | |
239 | .notify_queue = modern_notify_queue, | |
240 | }; | |
241 | ||
242 | void | |
243 | vtpci_read_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset, | |
244 | void *dst, int length) | |
245 | { | |
246 | VTPCI_OPS(hw)->read_dev_cfg(hw, offset, dst, length); | |
247 | } | |
248 | ||
249 | void | |
250 | vtpci_write_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset, | |
251 | const void *src, int length) | |
252 | { | |
253 | VTPCI_OPS(hw)->write_dev_cfg(hw, offset, src, length); | |
254 | } | |
255 | ||
256 | uint64_t | |
257 | vtpci_cryptodev_negotiate_features(struct virtio_crypto_hw *hw, | |
258 | uint64_t host_features) | |
259 | { | |
260 | uint64_t features; | |
261 | ||
262 | /* | |
263 | * Limit negotiated features to what the driver, virtqueue, and | |
264 | * host all support. | |
265 | */ | |
266 | features = host_features & hw->guest_features; | |
267 | VTPCI_OPS(hw)->set_features(hw, features); | |
268 | ||
269 | return features; | |
270 | } | |
271 | ||
272 | void | |
273 | vtpci_cryptodev_reset(struct virtio_crypto_hw *hw) | |
274 | { | |
275 | VTPCI_OPS(hw)->set_status(hw, VIRTIO_CONFIG_STATUS_RESET); | |
276 | /* flush status write */ | |
277 | VTPCI_OPS(hw)->get_status(hw); | |
278 | } | |
279 | ||
280 | void | |
281 | vtpci_cryptodev_reinit_complete(struct virtio_crypto_hw *hw) | |
282 | { | |
283 | vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK); | |
284 | } | |
285 | ||
286 | void | |
287 | vtpci_cryptodev_set_status(struct virtio_crypto_hw *hw, uint8_t status) | |
288 | { | |
289 | if (status != VIRTIO_CONFIG_STATUS_RESET) | |
290 | status |= VTPCI_OPS(hw)->get_status(hw); | |
291 | ||
292 | VTPCI_OPS(hw)->set_status(hw, status); | |
293 | } | |
294 | ||
295 | uint8_t | |
296 | vtpci_cryptodev_get_status(struct virtio_crypto_hw *hw) | |
297 | { | |
298 | return VTPCI_OPS(hw)->get_status(hw); | |
299 | } | |
300 | ||
301 | uint8_t | |
302 | vtpci_cryptodev_isr(struct virtio_crypto_hw *hw) | |
303 | { | |
304 | return VTPCI_OPS(hw)->get_isr(hw); | |
305 | } | |
306 | ||
307 | static void * | |
308 | get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap) | |
309 | { | |
310 | uint8_t bar = cap->bar; | |
311 | uint32_t length = cap->length; | |
312 | uint32_t offset = cap->offset; | |
313 | uint8_t *base; | |
314 | ||
315 | if (bar >= PCI_MAX_RESOURCE) { | |
316 | VIRTIO_CRYPTO_INIT_LOG_ERR("invalid bar: %u", bar); | |
317 | return NULL; | |
318 | } | |
319 | ||
320 | if (offset + length < offset) { | |
321 | VIRTIO_CRYPTO_INIT_LOG_ERR("offset(%u) + length(%u) overflows", | |
322 | offset, length); | |
323 | return NULL; | |
324 | } | |
325 | ||
326 | if (offset + length > dev->mem_resource[bar].len) { | |
327 | VIRTIO_CRYPTO_INIT_LOG_ERR( | |
328 | "invalid cap: overflows bar space: %u > %" PRIu64, | |
329 | offset + length, dev->mem_resource[bar].len); | |
330 | return NULL; | |
331 | } | |
332 | ||
333 | base = dev->mem_resource[bar].addr; | |
334 | if (base == NULL) { | |
335 | VIRTIO_CRYPTO_INIT_LOG_ERR("bar %u base addr is NULL", bar); | |
336 | return NULL; | |
337 | } | |
338 | ||
339 | return base + offset; | |
340 | } | |
341 | ||
342 | #define PCI_MSIX_ENABLE 0x8000 | |
343 | ||
344 | static int | |
345 | virtio_read_caps(struct rte_pci_device *dev, struct virtio_crypto_hw *hw) | |
346 | { | |
347 | uint8_t pos; | |
348 | struct virtio_pci_cap cap; | |
349 | int ret; | |
350 | ||
351 | if (rte_pci_map_device(dev)) { | |
352 | VIRTIO_CRYPTO_INIT_LOG_DBG("failed to map pci device!"); | |
353 | return -1; | |
354 | } | |
355 | ||
356 | ret = rte_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST); | |
357 | if (ret < 0) { | |
358 | VIRTIO_CRYPTO_INIT_LOG_DBG("failed to read pci capability list"); | |
359 | return -1; | |
360 | } | |
361 | ||
362 | while (pos) { | |
363 | ret = rte_pci_read_config(dev, &cap, sizeof(cap), pos); | |
364 | if (ret < 0) { | |
365 | VIRTIO_CRYPTO_INIT_LOG_ERR( | |
366 | "failed to read pci cap at pos: %x", pos); | |
367 | break; | |
368 | } | |
369 | ||
370 | if (cap.cap_vndr == PCI_CAP_ID_MSIX) { | |
371 | /* Transitional devices would also have this capability, | |
372 | * that's why we also check if msix is enabled. | |
373 | * 1st byte is cap ID; 2nd byte is the position of next | |
374 | * cap; next two bytes are the flags. | |
375 | */ | |
376 | uint16_t flags = ((uint16_t *)&cap)[1]; | |
377 | ||
378 | if (flags & PCI_MSIX_ENABLE) | |
379 | hw->use_msix = VIRTIO_MSIX_ENABLED; | |
380 | else | |
381 | hw->use_msix = VIRTIO_MSIX_DISABLED; | |
382 | } | |
383 | ||
384 | if (cap.cap_vndr != PCI_CAP_ID_VNDR) { | |
385 | VIRTIO_CRYPTO_INIT_LOG_DBG( | |
386 | "[%2x] skipping non VNDR cap id: %02x", | |
387 | pos, cap.cap_vndr); | |
388 | goto next; | |
389 | } | |
390 | ||
391 | VIRTIO_CRYPTO_INIT_LOG_DBG( | |
392 | "[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u", | |
393 | pos, cap.cfg_type, cap.bar, cap.offset, cap.length); | |
394 | ||
395 | switch (cap.cfg_type) { | |
396 | case VIRTIO_PCI_CAP_COMMON_CFG: | |
397 | hw->common_cfg = get_cfg_addr(dev, &cap); | |
398 | break; | |
399 | case VIRTIO_PCI_CAP_NOTIFY_CFG: | |
f67539c2 | 400 | ret = rte_pci_read_config(dev, &hw->notify_off_multiplier, |
11fdf7f2 | 401 | 4, pos + sizeof(cap)); |
f67539c2 TL |
402 | if (ret != 4) |
403 | VIRTIO_CRYPTO_INIT_LOG_ERR( | |
404 | "failed to read notify_off_multiplier: ret %d", ret); | |
405 | else | |
406 | hw->notify_base = get_cfg_addr(dev, &cap); | |
11fdf7f2 TL |
407 | break; |
408 | case VIRTIO_PCI_CAP_DEVICE_CFG: | |
409 | hw->dev_cfg = get_cfg_addr(dev, &cap); | |
410 | break; | |
411 | case VIRTIO_PCI_CAP_ISR_CFG: | |
412 | hw->isr = get_cfg_addr(dev, &cap); | |
413 | break; | |
414 | } | |
415 | ||
416 | next: | |
417 | pos = cap.cap_next; | |
418 | } | |
419 | ||
420 | if (hw->common_cfg == NULL || hw->notify_base == NULL || | |
421 | hw->dev_cfg == NULL || hw->isr == NULL) { | |
422 | VIRTIO_CRYPTO_INIT_LOG_INFO("no modern virtio pci device found."); | |
423 | return -1; | |
424 | } | |
425 | ||
426 | VIRTIO_CRYPTO_INIT_LOG_INFO("found modern virtio pci device."); | |
427 | ||
428 | VIRTIO_CRYPTO_INIT_LOG_DBG("common cfg mapped at: %p", hw->common_cfg); | |
429 | VIRTIO_CRYPTO_INIT_LOG_DBG("device cfg mapped at: %p", hw->dev_cfg); | |
430 | VIRTIO_CRYPTO_INIT_LOG_DBG("isr cfg mapped at: %p", hw->isr); | |
431 | VIRTIO_CRYPTO_INIT_LOG_DBG("notify base: %p, notify off multiplier: %u", | |
432 | hw->notify_base, hw->notify_off_multiplier); | |
433 | ||
434 | return 0; | |
435 | } | |
436 | ||
437 | /* | |
438 | * Return -1: | |
439 | * if there is error mapping with VFIO/UIO. | |
440 | * if port map error when driver type is KDRV_NONE. | |
441 | * if whitelisted but driver type is KDRV_UNKNOWN. | |
442 | * Return 1 if kernel driver is managing the device. | |
443 | * Return 0 on success. | |
444 | */ | |
445 | int | |
446 | vtpci_cryptodev_init(struct rte_pci_device *dev, struct virtio_crypto_hw *hw) | |
447 | { | |
448 | /* | |
449 | * Try if we can succeed reading virtio pci caps, which exists | |
450 | * only on modern pci device. If failed, we fallback to legacy | |
451 | * virtio handling. | |
452 | */ | |
453 | if (virtio_read_caps(dev, hw) == 0) { | |
454 | VIRTIO_CRYPTO_INIT_LOG_INFO("modern virtio pci detected."); | |
f67539c2 | 455 | crypto_virtio_hw_internal[hw->dev_id].vtpci_ops = |
11fdf7f2 TL |
456 | &virtio_crypto_modern_ops; |
457 | hw->modern = 1; | |
458 | return 0; | |
459 | } | |
460 | ||
461 | /* | |
462 | * virtio crypto conforms to virtio 1.0 and doesn't support | |
463 | * legacy mode | |
464 | */ | |
465 | return -1; | |
466 | } |