1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
7 #ifdef RTE_EXEC_ENV_LINUX
15 #include "virtio_pci.h"
16 #include "virtqueue.h"
19 * Following macros are derived from linux/pci_regs.h, however,
20 * we can't simply include that header here, as there is no such
21 * file for non-Linux platform.
23 #define PCI_CAPABILITY_LIST 0x34
24 #define PCI_CAP_ID_VNDR 0x09
25 #define PCI_CAP_ID_MSIX 0x11
28 * The remaining space is defined by each driver as the per-driver
29 * configuration space.
31 #define VIRTIO_PCI_CONFIG(hw) \
32 (((hw)->use_msix == VIRTIO_MSIX_ENABLED) ? 24 : 20)
34 struct virtio_hw_internal crypto_virtio_hw_internal
[RTE_MAX_VIRTIO_CRYPTO
];
37 check_vq_phys_addr_ok(struct virtqueue
*vq
)
39 /* Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
40 * and only accepts 32 bit page frame number.
41 * Check if the allocated physical memory exceeds 16TB.
43 if ((vq
->vq_ring_mem
+ vq
->vq_ring_size
- 1) >>
44 (VIRTIO_PCI_QUEUE_ADDR_SHIFT
+ 32)) {
45 VIRTIO_CRYPTO_INIT_LOG_ERR("vring address shouldn't be above 16TB!");
53 io_write64_twopart(uint64_t val
, uint32_t *lo
, uint32_t *hi
)
55 rte_write32(val
& ((1ULL << 32) - 1), lo
);
56 rte_write32(val
>> 32, hi
);
60 modern_read_dev_config(struct virtio_crypto_hw
*hw
, size_t offset
,
61 void *dst
, int length
)
65 uint8_t old_gen
, new_gen
;
68 old_gen
= rte_read8(&hw
->common_cfg
->config_generation
);
71 for (i
= 0; i
< length
; i
++)
72 *p
++ = rte_read8((uint8_t *)hw
->dev_cfg
+ offset
+ i
);
74 new_gen
= rte_read8(&hw
->common_cfg
->config_generation
);
75 } while (old_gen
!= new_gen
);
79 modern_write_dev_config(struct virtio_crypto_hw
*hw
, size_t offset
,
80 const void *src
, int length
)
83 const uint8_t *p
= src
;
85 for (i
= 0; i
< length
; i
++)
86 rte_write8((*p
++), (((uint8_t *)hw
->dev_cfg
) + offset
+ i
));
90 modern_get_features(struct virtio_crypto_hw
*hw
)
92 uint32_t features_lo
, features_hi
;
94 rte_write32(0, &hw
->common_cfg
->device_feature_select
);
95 features_lo
= rte_read32(&hw
->common_cfg
->device_feature
);
97 rte_write32(1, &hw
->common_cfg
->device_feature_select
);
98 features_hi
= rte_read32(&hw
->common_cfg
->device_feature
);
100 return ((uint64_t)features_hi
<< 32) | features_lo
;
104 modern_set_features(struct virtio_crypto_hw
*hw
, uint64_t features
)
106 rte_write32(0, &hw
->common_cfg
->guest_feature_select
);
107 rte_write32(features
& ((1ULL << 32) - 1),
108 &hw
->common_cfg
->guest_feature
);
110 rte_write32(1, &hw
->common_cfg
->guest_feature_select
);
111 rte_write32(features
>> 32,
112 &hw
->common_cfg
->guest_feature
);
116 modern_get_status(struct virtio_crypto_hw
*hw
)
118 return rte_read8(&hw
->common_cfg
->device_status
);
122 modern_set_status(struct virtio_crypto_hw
*hw
, uint8_t status
)
124 rte_write8(status
, &hw
->common_cfg
->device_status
);
128 modern_reset(struct virtio_crypto_hw
*hw
)
130 modern_set_status(hw
, VIRTIO_CONFIG_STATUS_RESET
);
131 modern_get_status(hw
);
135 modern_get_isr(struct virtio_crypto_hw
*hw
)
137 return rte_read8(hw
->isr
);
141 modern_set_config_irq(struct virtio_crypto_hw
*hw
, uint16_t vec
)
143 rte_write16(vec
, &hw
->common_cfg
->msix_config
);
144 return rte_read16(&hw
->common_cfg
->msix_config
);
148 modern_set_queue_irq(struct virtio_crypto_hw
*hw
, struct virtqueue
*vq
,
151 rte_write16(vq
->vq_queue_index
, &hw
->common_cfg
->queue_select
);
152 rte_write16(vec
, &hw
->common_cfg
->queue_msix_vector
);
153 return rte_read16(&hw
->common_cfg
->queue_msix_vector
);
157 modern_get_queue_num(struct virtio_crypto_hw
*hw
, uint16_t queue_id
)
159 rte_write16(queue_id
, &hw
->common_cfg
->queue_select
);
160 return rte_read16(&hw
->common_cfg
->queue_size
);
164 modern_setup_queue(struct virtio_crypto_hw
*hw
, struct virtqueue
*vq
)
166 uint64_t desc_addr
, avail_addr
, used_addr
;
169 if (!check_vq_phys_addr_ok(vq
))
172 desc_addr
= vq
->vq_ring_mem
;
173 avail_addr
= desc_addr
+ vq
->vq_nentries
* sizeof(struct vring_desc
);
174 used_addr
= RTE_ALIGN_CEIL(avail_addr
+ offsetof(struct vring_avail
,
175 ring
[vq
->vq_nentries
]),
176 VIRTIO_PCI_VRING_ALIGN
);
178 rte_write16(vq
->vq_queue_index
, &hw
->common_cfg
->queue_select
);
180 io_write64_twopart(desc_addr
, &hw
->common_cfg
->queue_desc_lo
,
181 &hw
->common_cfg
->queue_desc_hi
);
182 io_write64_twopart(avail_addr
, &hw
->common_cfg
->queue_avail_lo
,
183 &hw
->common_cfg
->queue_avail_hi
);
184 io_write64_twopart(used_addr
, &hw
->common_cfg
->queue_used_lo
,
185 &hw
->common_cfg
->queue_used_hi
);
187 notify_off
= rte_read16(&hw
->common_cfg
->queue_notify_off
);
188 vq
->notify_addr
= (void *)((uint8_t *)hw
->notify_base
+
189 notify_off
* hw
->notify_off_multiplier
);
191 rte_write16(1, &hw
->common_cfg
->queue_enable
);
193 VIRTIO_CRYPTO_INIT_LOG_DBG("queue %u addresses:", vq
->vq_queue_index
);
194 VIRTIO_CRYPTO_INIT_LOG_DBG("\t desc_addr: %" PRIx64
, desc_addr
);
195 VIRTIO_CRYPTO_INIT_LOG_DBG("\t aval_addr: %" PRIx64
, avail_addr
);
196 VIRTIO_CRYPTO_INIT_LOG_DBG("\t used_addr: %" PRIx64
, used_addr
);
197 VIRTIO_CRYPTO_INIT_LOG_DBG("\t notify addr: %p (notify offset: %u)",
198 vq
->notify_addr
, notify_off
);
204 modern_del_queue(struct virtio_crypto_hw
*hw
, struct virtqueue
*vq
)
206 rte_write16(vq
->vq_queue_index
, &hw
->common_cfg
->queue_select
);
208 io_write64_twopart(0, &hw
->common_cfg
->queue_desc_lo
,
209 &hw
->common_cfg
->queue_desc_hi
);
210 io_write64_twopart(0, &hw
->common_cfg
->queue_avail_lo
,
211 &hw
->common_cfg
->queue_avail_hi
);
212 io_write64_twopart(0, &hw
->common_cfg
->queue_used_lo
,
213 &hw
->common_cfg
->queue_used_hi
);
215 rte_write16(0, &hw
->common_cfg
->queue_enable
);
219 modern_notify_queue(struct virtio_crypto_hw
*hw __rte_unused
,
220 struct virtqueue
*vq
)
222 rte_write16(vq
->vq_queue_index
, vq
->notify_addr
);
225 const struct virtio_pci_ops virtio_crypto_modern_ops
= {
226 .read_dev_cfg
= modern_read_dev_config
,
227 .write_dev_cfg
= modern_write_dev_config
,
228 .reset
= modern_reset
,
229 .get_status
= modern_get_status
,
230 .set_status
= modern_set_status
,
231 .get_features
= modern_get_features
,
232 .set_features
= modern_set_features
,
233 .get_isr
= modern_get_isr
,
234 .set_config_irq
= modern_set_config_irq
,
235 .set_queue_irq
= modern_set_queue_irq
,
236 .get_queue_num
= modern_get_queue_num
,
237 .setup_queue
= modern_setup_queue
,
238 .del_queue
= modern_del_queue
,
239 .notify_queue
= modern_notify_queue
,
243 vtpci_read_cryptodev_config(struct virtio_crypto_hw
*hw
, size_t offset
,
244 void *dst
, int length
)
246 VTPCI_OPS(hw
)->read_dev_cfg(hw
, offset
, dst
, length
);
250 vtpci_write_cryptodev_config(struct virtio_crypto_hw
*hw
, size_t offset
,
251 const void *src
, int length
)
253 VTPCI_OPS(hw
)->write_dev_cfg(hw
, offset
, src
, length
);
257 vtpci_cryptodev_negotiate_features(struct virtio_crypto_hw
*hw
,
258 uint64_t host_features
)
263 * Limit negotiated features to what the driver, virtqueue, and
266 features
= host_features
& hw
->guest_features
;
267 VTPCI_OPS(hw
)->set_features(hw
, features
);
273 vtpci_cryptodev_reset(struct virtio_crypto_hw
*hw
)
275 VTPCI_OPS(hw
)->set_status(hw
, VIRTIO_CONFIG_STATUS_RESET
);
276 /* flush status write */
277 VTPCI_OPS(hw
)->get_status(hw
);
281 vtpci_cryptodev_reinit_complete(struct virtio_crypto_hw
*hw
)
283 vtpci_cryptodev_set_status(hw
, VIRTIO_CONFIG_STATUS_DRIVER_OK
);
287 vtpci_cryptodev_set_status(struct virtio_crypto_hw
*hw
, uint8_t status
)
289 if (status
!= VIRTIO_CONFIG_STATUS_RESET
)
290 status
|= VTPCI_OPS(hw
)->get_status(hw
);
292 VTPCI_OPS(hw
)->set_status(hw
, status
);
296 vtpci_cryptodev_get_status(struct virtio_crypto_hw
*hw
)
298 return VTPCI_OPS(hw
)->get_status(hw
);
302 vtpci_cryptodev_isr(struct virtio_crypto_hw
*hw
)
304 return VTPCI_OPS(hw
)->get_isr(hw
);
308 get_cfg_addr(struct rte_pci_device
*dev
, struct virtio_pci_cap
*cap
)
310 uint8_t bar
= cap
->bar
;
311 uint32_t length
= cap
->length
;
312 uint32_t offset
= cap
->offset
;
315 if (bar
>= PCI_MAX_RESOURCE
) {
316 VIRTIO_CRYPTO_INIT_LOG_ERR("invalid bar: %u", bar
);
320 if (offset
+ length
< offset
) {
321 VIRTIO_CRYPTO_INIT_LOG_ERR("offset(%u) + length(%u) overflows",
326 if (offset
+ length
> dev
->mem_resource
[bar
].len
) {
327 VIRTIO_CRYPTO_INIT_LOG_ERR(
328 "invalid cap: overflows bar space: %u > %" PRIu64
,
329 offset
+ length
, dev
->mem_resource
[bar
].len
);
333 base
= dev
->mem_resource
[bar
].addr
;
335 VIRTIO_CRYPTO_INIT_LOG_ERR("bar %u base addr is NULL", bar
);
339 return base
+ offset
;
342 #define PCI_MSIX_ENABLE 0x8000
345 virtio_read_caps(struct rte_pci_device
*dev
, struct virtio_crypto_hw
*hw
)
348 struct virtio_pci_cap cap
;
351 if (rte_pci_map_device(dev
)) {
352 VIRTIO_CRYPTO_INIT_LOG_DBG("failed to map pci device!");
356 ret
= rte_pci_read_config(dev
, &pos
, 1, PCI_CAPABILITY_LIST
);
358 VIRTIO_CRYPTO_INIT_LOG_DBG("failed to read pci capability list");
363 ret
= rte_pci_read_config(dev
, &cap
, sizeof(cap
), pos
);
365 VIRTIO_CRYPTO_INIT_LOG_ERR(
366 "failed to read pci cap at pos: %x", pos
);
370 if (cap
.cap_vndr
== PCI_CAP_ID_MSIX
) {
371 /* Transitional devices would also have this capability,
372 * that's why we also check if msix is enabled.
373 * 1st byte is cap ID; 2nd byte is the position of next
374 * cap; next two bytes are the flags.
376 uint16_t flags
= ((uint16_t *)&cap
)[1];
378 if (flags
& PCI_MSIX_ENABLE
)
379 hw
->use_msix
= VIRTIO_MSIX_ENABLED
;
381 hw
->use_msix
= VIRTIO_MSIX_DISABLED
;
384 if (cap
.cap_vndr
!= PCI_CAP_ID_VNDR
) {
385 VIRTIO_CRYPTO_INIT_LOG_DBG(
386 "[%2x] skipping non VNDR cap id: %02x",
391 VIRTIO_CRYPTO_INIT_LOG_DBG(
392 "[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u",
393 pos
, cap
.cfg_type
, cap
.bar
, cap
.offset
, cap
.length
);
395 switch (cap
.cfg_type
) {
396 case VIRTIO_PCI_CAP_COMMON_CFG
:
397 hw
->common_cfg
= get_cfg_addr(dev
, &cap
);
399 case VIRTIO_PCI_CAP_NOTIFY_CFG
:
400 ret
= rte_pci_read_config(dev
, &hw
->notify_off_multiplier
,
401 4, pos
+ sizeof(cap
));
403 VIRTIO_CRYPTO_INIT_LOG_ERR(
404 "failed to read notify_off_multiplier: ret %d", ret
);
406 hw
->notify_base
= get_cfg_addr(dev
, &cap
);
408 case VIRTIO_PCI_CAP_DEVICE_CFG
:
409 hw
->dev_cfg
= get_cfg_addr(dev
, &cap
);
411 case VIRTIO_PCI_CAP_ISR_CFG
:
412 hw
->isr
= get_cfg_addr(dev
, &cap
);
420 if (hw
->common_cfg
== NULL
|| hw
->notify_base
== NULL
||
421 hw
->dev_cfg
== NULL
|| hw
->isr
== NULL
) {
422 VIRTIO_CRYPTO_INIT_LOG_INFO("no modern virtio pci device found.");
426 VIRTIO_CRYPTO_INIT_LOG_INFO("found modern virtio pci device.");
428 VIRTIO_CRYPTO_INIT_LOG_DBG("common cfg mapped at: %p", hw
->common_cfg
);
429 VIRTIO_CRYPTO_INIT_LOG_DBG("device cfg mapped at: %p", hw
->dev_cfg
);
430 VIRTIO_CRYPTO_INIT_LOG_DBG("isr cfg mapped at: %p", hw
->isr
);
431 VIRTIO_CRYPTO_INIT_LOG_DBG("notify base: %p, notify off multiplier: %u",
432 hw
->notify_base
, hw
->notify_off_multiplier
);
439 * if there is error mapping with VFIO/UIO.
440 * if port map error when driver type is KDRV_NONE.
441 * if whitelisted but driver type is KDRV_UNKNOWN.
442 * Return 1 if kernel driver is managing the device.
443 * Return 0 on success.
446 vtpci_cryptodev_init(struct rte_pci_device
*dev
, struct virtio_crypto_hw
*hw
)
449 * Try if we can succeed reading virtio pci caps, which exists
450 * only on modern pci device. If failed, we fallback to legacy
453 if (virtio_read_caps(dev
, hw
) == 0) {
454 VIRTIO_CRYPTO_INIT_LOG_INFO("modern virtio pci detected.");
455 crypto_virtio_hw_internal
[hw
->dev_id
].vtpci_ops
=
456 &virtio_crypto_modern_ops
;
462 * virtio crypto conforms to virtio 1.0 and doesn't support