]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/app/test-eventdev/evt_common.h
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / app / test-eventdev / evt_common.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
3 */
4
5 #ifndef _EVT_COMMON_
6 #define _EVT_COMMON_
7
8 #include <rte_common.h>
9 #include <rte_debug.h>
10 #include <rte_eventdev.h>
11 #include <rte_service.h>
12
13 #define CLNRM "\x1b[0m"
14 #define CLRED "\x1b[31m"
15 #define CLGRN "\x1b[32m"
16 #define CLYEL "\x1b[33m"
17
18 #define evt_err(fmt, args...) \
19 fprintf(stderr, CLRED"error: %s() "fmt CLNRM "\n", __func__, ## args)
20
21 #define evt_info(fmt, args...) \
22 fprintf(stdout, CLYEL""fmt CLNRM "\n", ## args)
23
24 #define EVT_STR_FMT 20
25
26 #define evt_dump(str, fmt, val...) \
27 printf("\t%-*s : "fmt"\n", EVT_STR_FMT, str, ## val)
28
29 #define evt_dump_begin(str) printf("\t%-*s : {", EVT_STR_FMT, str)
30
31 #define evt_dump_end printf("\b}\n")
32
33 #define EVT_MAX_STAGES 64
34 #define EVT_MAX_PORTS 256
35 #define EVT_MAX_QUEUES 256
36
37 enum evt_prod_type {
38 EVT_PROD_TYPE_NONE,
39 EVT_PROD_TYPE_SYNT, /* Producer type Synthetic i.e. CPU. */
40 EVT_PROD_TYPE_ETH_RX_ADPTR, /* Producer type Eth Rx Adapter. */
41 EVT_PROD_TYPE_EVENT_TIMER_ADPTR, /* Producer type Timer Adapter. */
42 EVT_PROD_TYPE_MAX,
43 };
44
45 struct evt_options {
46 #define EVT_TEST_NAME_MAX_LEN 32
47 char test_name[EVT_TEST_NAME_MAX_LEN];
48 bool plcores[RTE_MAX_LCORE];
49 bool wlcores[RTE_MAX_LCORE];
50 int pool_sz;
51 int socket_id;
52 int nb_stages;
53 int verbose_level;
54 uint8_t dev_id;
55 uint8_t timdev_cnt;
56 uint8_t nb_timer_adptrs;
57 uint8_t timdev_use_burst;
58 uint8_t sched_type_list[EVT_MAX_STAGES];
59 uint16_t mbuf_sz;
60 uint16_t wkr_deq_dep;
61 uint32_t nb_flows;
62 uint32_t tx_first;
63 uint32_t max_pkt_sz;
64 uint32_t deq_tmo_nsec;
65 uint32_t q_priority:1;
66 uint32_t fwd_latency:1;
67 uint64_t nb_pkts;
68 uint64_t nb_timers;
69 uint64_t expiry_nsec;
70 uint64_t max_tmo_nsec;
71 uint64_t timer_tick_nsec;
72 uint64_t optm_timer_tick_nsec;
73 enum evt_prod_type prod_type;
74 };
75
76 static inline bool
77 evt_has_distributed_sched(uint8_t dev_id)
78 {
79 struct rte_event_dev_info dev_info;
80
81 rte_event_dev_info_get(dev_id, &dev_info);
82 return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED) ?
83 true : false;
84 }
85
86 static inline bool
87 evt_has_burst_mode(uint8_t dev_id)
88 {
89 struct rte_event_dev_info dev_info;
90
91 rte_event_dev_info_get(dev_id, &dev_info);
92 return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) ?
93 true : false;
94 }
95
96
97 static inline bool
98 evt_has_all_types_queue(uint8_t dev_id)
99 {
100 struct rte_event_dev_info dev_info;
101
102 rte_event_dev_info_get(dev_id, &dev_info);
103 return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES) ?
104 true : false;
105 }
106
107 static inline int
108 evt_service_setup(uint32_t service_id)
109 {
110 int32_t core_cnt;
111 unsigned int lcore = 0;
112 uint32_t core_array[RTE_MAX_LCORE];
113 uint8_t cnt;
114 uint8_t min_cnt = UINT8_MAX;
115
116 if (!rte_service_lcore_count())
117 return -ENOENT;
118
119 core_cnt = rte_service_lcore_list(core_array,
120 RTE_MAX_LCORE);
121 if (core_cnt < 0)
122 return -ENOENT;
123 /* Get the core which has least number of services running. */
124 while (core_cnt--) {
125 /* Reset default mapping */
126 rte_service_map_lcore_set(service_id,
127 core_array[core_cnt], 0);
128 cnt = rte_service_lcore_count_services(
129 core_array[core_cnt]);
130 if (cnt < min_cnt) {
131 lcore = core_array[core_cnt];
132 min_cnt = cnt;
133 }
134 }
135 if (rte_service_map_lcore_set(service_id, lcore, 1))
136 return -ENOENT;
137
138 return 0;
139 }
140
141 static inline int
142 evt_configure_eventdev(struct evt_options *opt, uint8_t nb_queues,
143 uint8_t nb_ports)
144 {
145 struct rte_event_dev_info info;
146 int ret;
147
148 memset(&info, 0, sizeof(struct rte_event_dev_info));
149 ret = rte_event_dev_info_get(opt->dev_id, &info);
150 if (ret) {
151 evt_err("failed to get eventdev info %d", opt->dev_id);
152 return ret;
153 }
154
155 if (opt->deq_tmo_nsec) {
156 if (opt->deq_tmo_nsec < info.min_dequeue_timeout_ns) {
157 opt->deq_tmo_nsec = info.min_dequeue_timeout_ns;
158 evt_info("dequeue_timeout_ns too low, using %d",
159 opt->deq_tmo_nsec);
160 }
161 if (opt->deq_tmo_nsec > info.max_dequeue_timeout_ns) {
162 opt->deq_tmo_nsec = info.max_dequeue_timeout_ns;
163 evt_info("dequeue_timeout_ns too high, using %d",
164 opt->deq_tmo_nsec);
165 }
166 }
167
168 const struct rte_event_dev_config config = {
169 .dequeue_timeout_ns = opt->deq_tmo_nsec,
170 .nb_event_queues = nb_queues,
171 .nb_event_ports = nb_ports,
172 .nb_events_limit = info.max_num_events,
173 .nb_event_queue_flows = opt->nb_flows,
174 .nb_event_port_dequeue_depth =
175 info.max_event_port_dequeue_depth,
176 .nb_event_port_enqueue_depth =
177 info.max_event_port_enqueue_depth,
178 };
179
180 return rte_event_dev_configure(opt->dev_id, &config);
181 }
182
183 #endif /* _EVT_COMMON_*/