#ifndef CEPH_DPDK_DEV_H
#define CEPH_DPDK_DEV_H
-#include <memory>
#include <functional>
+#include <memory>
+#include <optional>
#include <rte_config.h>
#include <rte_common.h>
#include <rte_ethdev.h>
+#include <rte_ether.h>
#include <rte_malloc.h>
#include <rte_version.h>
#include "include/page.h"
-#include "common/Tub.h"
#include "common/perf_counters.h"
+#include "common/admin_socket.h"
#include "msg/async/Event.h"
#include "const.h"
#include "circular_buffer.h"
#include "net.h"
#include "toeplitz.h"
-
struct free_deleter {
void operator()(void* p) { ::free(p); }
};
class DPDKDevice;
class DPDKWorker;
+
+#ifndef MARKER
+typedef void *MARKER[0]; /**< generic marker for a point in a structure */
+#endif
+
class DPDKQueuePair {
- using packet_provider_type = std::function<Tub<Packet> ()>;
+ using packet_provider_type = std::function<std::optional<Packet> ()>;
public:
void configure_proxies(const std::map<unsigned, float>& cpu_weights);
// build REdirection TAble for cpu_weights map: target cpu -> weight
public:
tx_buf(tx_buf_factory& fc) : _fc(fc) {
- _buf_physaddr = _mbuf.buf_physaddr;
+ _buf_physaddr = _mbuf.buf_iova;
_data_off = _mbuf.data_off;
}
// Set the mbuf to point to our data
_mbuf.buf_addr = va;
- _mbuf.buf_physaddr = pa;
+ _mbuf.buf_iova = pa;
_mbuf.data_off = 0;
_is_zc = true;
}
// to call the "packet"'s destructor and reset the
// "optional" state to "nonengaged".
//
- _p.destroy();
+ _p.reset();
} else if (!_is_zc) {
return;
}
// Restore the rte_mbuf fields we trashed in set_zc_info()
- _mbuf.buf_physaddr = _buf_physaddr;
+ _mbuf.buf_iova = _buf_physaddr;
_mbuf.buf_addr = rte_mbuf_to_baddr(&_mbuf);
_mbuf.data_off = _data_off;
private:
struct rte_mbuf _mbuf;
MARKER private_start;
- Tub<Packet> _p;
+ std::optional<Packet> _p;
phys_addr_t _buf_physaddr;
uint16_t _data_off;
// TRUE if underlying mbuf has been used in the zero-copy flow
_ring.push_back(buf);
}
+ unsigned ring_size() const {
+ return _ring.size();
+ }
+
bool gc() {
for (int cnt = 0; cnt < gc_count; ++cnt) {
auto tx_buf_p = get_one_completed();
}
void rx_start() {
- _rx_poller.construct(this);
+ _rx_poller.emplace(this);
}
uint32_t send(circular_buffer<Packet>& pb) {
// actual data buffer.
//
m->buf_addr = (char*)data - RTE_PKTMBUF_HEADROOM;
- m->buf_physaddr = rte_mem_virt2phy(data) - RTE_PKTMBUF_HEADROOM;
+ m->buf_iova = rte_mem_virt2iova(data) - RTE_PKTMBUF_HEADROOM;
return true;
}
* @return a "optional" object representing the newly received data if in an
* "engaged" state or an error if in a "disengaged" state.
*/
- Tub<Packet> from_mbuf(rte_mbuf* m);
+ std::optional<Packet> from_mbuf(rte_mbuf* m);
/**
* Transform an LRO rte_mbuf cluster into the "packet" object.
* @return a "optional" object representing the newly received LRO packet if
* in an "engaged" state or an error if in a "disengaged" state.
*/
- Tub<Packet> from_mbuf_lro(rte_mbuf* m);
+ std::optional<Packet> from_mbuf_lro(rte_mbuf* m);
private:
CephContext *cct;
std::vector<packet_provider_type> _pkt_providers;
- Tub<std::array<uint8_t, 128>> _sw_reta;
+ std::optional<std::array<uint8_t, 128>> _sw_reta;
circular_buffer<Packet> _proxy_packetq;
stream<Packet> _rx_stream;
circular_buffer<Packet> _tx_packetq;
return qp->poll_rx_once();
}
};
- Tub<DPDKRXPoller> _rx_poller;
+ std::optional<DPDKRXPoller> _rx_poller;
class DPDKTXGCPoller : public EventCenter::Poller {
DPDKQueuePair *qp;
struct rte_flow *_flow = nullptr;
bool _is_i40e_device = false;
bool _is_vmxnet3_device = false;
+ std::unique_ptr<AdminSocketHook> dfx_hook;
public:
rte_eth_dev_info _dev_info = {};
*/
int init_port_fini();
+ void nic_stats_dump(Formatter *f);
+ void nic_xstats_dump(Formatter *f);
private:
/**
* Port initialization consists of 3 main stages:
/* now initialise the port we will use */
int ret = init_port_start();
if (ret != 0) {
- rte_exit(EXIT_FAILURE, "Cannot initialise port %u\n", _port_idx);
+ ceph_assert(false && "Cannot initialise port\n");
}
- string name(std::string("port") + std::to_string(port_idx));
+ std::string name(std::string("port") + std::to_string(port_idx));
PerfCountersBuilder plb(cct, name, l_dpdk_dev_first, l_dpdk_dev_last);
plb.add_u64_counter(l_dpdk_dev_rx_mcast, "dpdk_device_receive_multicast_packets", "DPDK received multicast packets");
}
~DPDKDevice() {
+ cct->get_admin_socket()->unregister_commands(dfx_hook.get());
+ dfx_hook.reset();
if (_flow)
rte_flow_destroy(_port_idx, _flow, nullptr);
rte_eth_dev_stop(_port_idx);
return sub;
}
ethernet_address hw_address() {
- struct ether_addr mac;
+ struct rte_ether_addr mac;
rte_eth_macaddr_get(_port_idx, &mac);
return mac.addr_bytes;
}
const rss_key_type& rss_key() const { return _rss_key; }
uint16_t hw_queues_count() { return _num_queues; }
- std::unique_ptr<DPDKQueuePair> init_local_queue(CephContext *c, EventCenter *center, string hugepages, uint16_t qid) {
+ std::unique_ptr<DPDKQueuePair> init_local_queue(CephContext *c,
+ EventCenter *center, std::string hugepages, uint16_t qid) {
std::unique_ptr<DPDKQueuePair> qp;
qp = std::unique_ptr<DPDKQueuePair>(new DPDKQueuePair(c, center, this, qid));
return qp;