]>
Commit | Line | Data |
---|---|---|
11fdf7f2 TL |
1 | /* SPDX-License-Identifier: BSD-3-Clause |
2 | * | |
3 | * Copyright 2016 Freescale Semiconductor, Inc. All rights reserved. | |
f67539c2 | 4 | * Copyright 2017-2019 NXP |
11fdf7f2 TL |
5 | * |
6 | */ | |
7 | /* System headers */ | |
8 | #include <stdio.h> | |
9 | #include <inttypes.h> | |
10 | #include <unistd.h> | |
11 | #include <limits.h> | |
12 | #include <sched.h> | |
13 | #include <signal.h> | |
14 | #include <pthread.h> | |
15 | #include <sys/types.h> | |
16 | #include <sys/syscall.h> | |
17 | ||
9f95a23c | 18 | #include <rte_string_fns.h> |
11fdf7f2 TL |
19 | #include <rte_byteorder.h> |
20 | #include <rte_common.h> | |
21 | #include <rte_interrupts.h> | |
22 | #include <rte_log.h> | |
23 | #include <rte_debug.h> | |
24 | #include <rte_pci.h> | |
25 | #include <rte_atomic.h> | |
26 | #include <rte_branch_prediction.h> | |
27 | #include <rte_memory.h> | |
28 | #include <rte_tailq.h> | |
29 | #include <rte_eal.h> | |
30 | #include <rte_alarm.h> | |
31 | #include <rte_ether.h> | |
32 | #include <rte_ethdev_driver.h> | |
33 | #include <rte_malloc.h> | |
34 | #include <rte_ring.h> | |
35 | ||
36 | #include <rte_dpaa_bus.h> | |
37 | #include <rte_dpaa_logs.h> | |
38 | #include <dpaa_mempool.h> | |
39 | ||
40 | #include <dpaa_ethdev.h> | |
41 | #include <dpaa_rxtx.h> | |
42 | #include <rte_pmd_dpaa.h> | |
43 | ||
44 | #include <fsl_usd.h> | |
45 | #include <fsl_qman.h> | |
46 | #include <fsl_bman.h> | |
47 | #include <fsl_fman.h> | |
48 | ||
f67539c2 TL |
49 | int dpaa_logtype_pmd; |
50 | ||
11fdf7f2 TL |
51 | /* Supported Rx offloads */ |
52 | static uint64_t dev_rx_offloads_sup = | |
9f95a23c TL |
53 | DEV_RX_OFFLOAD_JUMBO_FRAME | |
54 | DEV_RX_OFFLOAD_SCATTER; | |
11fdf7f2 TL |
55 | |
56 | /* Rx offloads which cannot be disabled */ | |
57 | static uint64_t dev_rx_offloads_nodis = | |
58 | DEV_RX_OFFLOAD_IPV4_CKSUM | | |
59 | DEV_RX_OFFLOAD_UDP_CKSUM | | |
60 | DEV_RX_OFFLOAD_TCP_CKSUM | | |
f67539c2 TL |
61 | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | |
62 | DEV_RX_OFFLOAD_RSS_HASH; | |
11fdf7f2 TL |
63 | |
64 | /* Supported Tx offloads */ | |
f67539c2 TL |
65 | static uint64_t dev_tx_offloads_sup = |
66 | DEV_TX_OFFLOAD_MT_LOCKFREE | | |
67 | DEV_TX_OFFLOAD_MBUF_FAST_FREE; | |
11fdf7f2 TL |
68 | |
69 | /* Tx offloads which cannot be disabled */ | |
70 | static uint64_t dev_tx_offloads_nodis = | |
71 | DEV_TX_OFFLOAD_IPV4_CKSUM | | |
72 | DEV_TX_OFFLOAD_UDP_CKSUM | | |
73 | DEV_TX_OFFLOAD_TCP_CKSUM | | |
74 | DEV_TX_OFFLOAD_SCTP_CKSUM | | |
75 | DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | | |
f67539c2 | 76 | DEV_TX_OFFLOAD_MULTI_SEGS; |
11fdf7f2 TL |
77 | |
78 | /* Keep track of whether QMAN and BMAN have been globally initialized */ | |
79 | static int is_global_init; | |
80 | static int default_q; /* use default queue - FMC is not executed*/ | |
81 | /* At present we only allow up to 4 push mode queues as default - as each of | |
82 | * this queue need dedicated portal and we are short of portals. | |
83 | */ | |
84 | #define DPAA_MAX_PUSH_MODE_QUEUE 8 | |
85 | #define DPAA_DEFAULT_PUSH_MODE_QUEUE 4 | |
86 | ||
87 | static int dpaa_push_mode_max_queue = DPAA_DEFAULT_PUSH_MODE_QUEUE; | |
88 | static int dpaa_push_queue_idx; /* Queue index which are in push mode*/ | |
89 | ||
90 | ||
91 | /* Per FQ Taildrop in frame count */ | |
92 | static unsigned int td_threshold = CGR_RX_PERFQ_THRESH; | |
93 | ||
94 | struct rte_dpaa_xstats_name_off { | |
95 | char name[RTE_ETH_XSTATS_NAME_SIZE]; | |
96 | uint32_t offset; | |
97 | }; | |
98 | ||
99 | static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = { | |
100 | {"rx_align_err", | |
101 | offsetof(struct dpaa_if_stats, raln)}, | |
102 | {"rx_valid_pause", | |
103 | offsetof(struct dpaa_if_stats, rxpf)}, | |
104 | {"rx_fcs_err", | |
105 | offsetof(struct dpaa_if_stats, rfcs)}, | |
106 | {"rx_vlan_frame", | |
107 | offsetof(struct dpaa_if_stats, rvlan)}, | |
108 | {"rx_frame_err", | |
109 | offsetof(struct dpaa_if_stats, rerr)}, | |
110 | {"rx_drop_err", | |
111 | offsetof(struct dpaa_if_stats, rdrp)}, | |
112 | {"rx_undersized", | |
113 | offsetof(struct dpaa_if_stats, rund)}, | |
114 | {"rx_oversize_err", | |
115 | offsetof(struct dpaa_if_stats, rovr)}, | |
116 | {"rx_fragment_pkt", | |
117 | offsetof(struct dpaa_if_stats, rfrg)}, | |
118 | {"tx_valid_pause", | |
119 | offsetof(struct dpaa_if_stats, txpf)}, | |
120 | {"tx_fcs_err", | |
121 | offsetof(struct dpaa_if_stats, terr)}, | |
122 | {"tx_vlan_frame", | |
123 | offsetof(struct dpaa_if_stats, tvlan)}, | |
124 | {"rx_undersized", | |
125 | offsetof(struct dpaa_if_stats, tund)}, | |
126 | }; | |
127 | ||
128 | static struct rte_dpaa_driver rte_dpaa_pmd; | |
129 | ||
f67539c2 | 130 | static int |
11fdf7f2 TL |
131 | dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); |
132 | ||
133 | static inline void | |
134 | dpaa_poll_queue_default_config(struct qm_mcc_initfq *opts) | |
135 | { | |
136 | memset(opts, 0, sizeof(struct qm_mcc_initfq)); | |
137 | opts->we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA; | |
138 | opts->fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING | | |
139 | QM_FQCTRL_PREFERINCACHE; | |
140 | opts->fqd.context_a.stashing.exclusive = 0; | |
141 | if (dpaa_svr_family != SVR_LS1046A_FAMILY) | |
142 | opts->fqd.context_a.stashing.annotation_cl = | |
143 | DPAA_IF_RX_ANNOTATION_STASH; | |
144 | opts->fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH; | |
145 | opts->fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH; | |
146 | } | |
147 | ||
148 | static int | |
149 | dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) | |
150 | { | |
151 | struct dpaa_if *dpaa_intf = dev->data->dev_private; | |
f67539c2 | 152 | uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN |
11fdf7f2 | 153 | + VLAN_TAG_SIZE; |
9f95a23c | 154 | uint32_t buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM; |
11fdf7f2 TL |
155 | |
156 | PMD_INIT_FUNC_TRACE(); | |
157 | ||
f67539c2 | 158 | if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN) |
11fdf7f2 | 159 | return -EINVAL; |
9f95a23c TL |
160 | /* |
161 | * Refuse mtu that requires the support of scattered packets | |
162 | * when this feature has not been enabled before. | |
163 | */ | |
164 | if (dev->data->min_rx_buf_size && | |
165 | !dev->data->scattered_rx && frame_size > buffsz) { | |
166 | DPAA_PMD_ERR("SG not enabled, will not fit in one buffer"); | |
167 | return -EINVAL; | |
168 | } | |
169 | ||
170 | /* check <seg size> * <max_seg> >= max_frame */ | |
171 | if (dev->data->min_rx_buf_size && dev->data->scattered_rx && | |
172 | (frame_size > buffsz * DPAA_SGT_MAX_ENTRIES)) { | |
173 | DPAA_PMD_ERR("Too big to fit for Max SG list %d", | |
174 | buffsz * DPAA_SGT_MAX_ENTRIES); | |
175 | return -EINVAL; | |
176 | } | |
177 | ||
f67539c2 TL |
178 | if (frame_size > RTE_ETHER_MAX_LEN) |
179 | dev->data->dev_conf.rxmode.offloads |= | |
11fdf7f2 TL |
180 | DEV_RX_OFFLOAD_JUMBO_FRAME; |
181 | else | |
182 | dev->data->dev_conf.rxmode.offloads &= | |
183 | ~DEV_RX_OFFLOAD_JUMBO_FRAME; | |
184 | ||
185 | dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; | |
186 | ||
187 | fman_if_set_maxfrm(dpaa_intf->fif, frame_size); | |
188 | ||
189 | return 0; | |
190 | } | |
191 | ||
192 | static int | |
193 | dpaa_eth_dev_configure(struct rte_eth_dev *dev) | |
194 | { | |
195 | struct dpaa_if *dpaa_intf = dev->data->dev_private; | |
196 | struct rte_eth_conf *eth_conf = &dev->data->dev_conf; | |
197 | uint64_t rx_offloads = eth_conf->rxmode.offloads; | |
198 | uint64_t tx_offloads = eth_conf->txmode.offloads; | |
199 | ||
200 | PMD_INIT_FUNC_TRACE(); | |
201 | ||
f67539c2 | 202 | /* Rx offloads which are enabled by default */ |
11fdf7f2 | 203 | if (dev_rx_offloads_nodis & ~rx_offloads) { |
f67539c2 TL |
204 | DPAA_PMD_INFO( |
205 | "Some of rx offloads enabled by default - requested 0x%" PRIx64 | |
206 | " fixed are 0x%" PRIx64, | |
207 | rx_offloads, dev_rx_offloads_nodis); | |
11fdf7f2 TL |
208 | } |
209 | ||
f67539c2 | 210 | /* Tx offloads which are enabled by default */ |
11fdf7f2 | 211 | if (dev_tx_offloads_nodis & ~tx_offloads) { |
f67539c2 TL |
212 | DPAA_PMD_INFO( |
213 | "Some of tx offloads enabled by default - requested 0x%" PRIx64 | |
214 | " fixed are 0x%" PRIx64, | |
215 | tx_offloads, dev_tx_offloads_nodis); | |
11fdf7f2 TL |
216 | } |
217 | ||
218 | if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { | |
9f95a23c TL |
219 | uint32_t max_len; |
220 | ||
221 | DPAA_PMD_DEBUG("enabling jumbo"); | |
222 | ||
11fdf7f2 | 223 | if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= |
9f95a23c TL |
224 | DPAA_MAX_RX_PKT_LEN) |
225 | max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len; | |
226 | else { | |
227 | DPAA_PMD_INFO("enabling jumbo override conf max len=%d " | |
228 | "supported is %d", | |
229 | dev->data->dev_conf.rxmode.max_rx_pkt_len, | |
230 | DPAA_MAX_RX_PKT_LEN); | |
231 | max_len = DPAA_MAX_RX_PKT_LEN; | |
11fdf7f2 | 232 | } |
9f95a23c TL |
233 | |
234 | fman_if_set_maxfrm(dpaa_intf->fif, max_len); | |
235 | dev->data->mtu = max_len | |
f67539c2 | 236 | - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE; |
9f95a23c TL |
237 | } |
238 | ||
239 | if (rx_offloads & DEV_RX_OFFLOAD_SCATTER) { | |
240 | DPAA_PMD_DEBUG("enabling scatter mode"); | |
241 | fman_if_set_sg(dpaa_intf->fif, 1); | |
242 | dev->data->scattered_rx = 1; | |
11fdf7f2 | 243 | } |
9f95a23c | 244 | |
11fdf7f2 TL |
245 | return 0; |
246 | } | |
247 | ||
248 | static const uint32_t * | |
249 | dpaa_supported_ptypes_get(struct rte_eth_dev *dev) | |
250 | { | |
251 | static const uint32_t ptypes[] = { | |
11fdf7f2 | 252 | RTE_PTYPE_L2_ETHER, |
9f95a23c TL |
253 | RTE_PTYPE_L2_ETHER_VLAN, |
254 | RTE_PTYPE_L2_ETHER_ARP, | |
255 | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, | |
256 | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, | |
257 | RTE_PTYPE_L4_ICMP, | |
258 | RTE_PTYPE_L4_TCP, | |
259 | RTE_PTYPE_L4_UDP, | |
260 | RTE_PTYPE_L4_FRAG, | |
11fdf7f2 TL |
261 | RTE_PTYPE_L4_TCP, |
262 | RTE_PTYPE_L4_UDP, | |
263 | RTE_PTYPE_L4_SCTP | |
264 | }; | |
265 | ||
266 | PMD_INIT_FUNC_TRACE(); | |
267 | ||
268 | if (dev->rx_pkt_burst == dpaa_eth_queue_rx) | |
269 | return ptypes; | |
270 | return NULL; | |
271 | } | |
272 | ||
273 | static int dpaa_eth_dev_start(struct rte_eth_dev *dev) | |
274 | { | |
275 | struct dpaa_if *dpaa_intf = dev->data->dev_private; | |
276 | ||
277 | PMD_INIT_FUNC_TRACE(); | |
278 | ||
279 | /* Change tx callback to the real one */ | |
280 | dev->tx_pkt_burst = dpaa_eth_queue_tx; | |
281 | fman_if_enable_rx(dpaa_intf->fif); | |
282 | ||
283 | return 0; | |
284 | } | |
285 | ||
286 | static void dpaa_eth_dev_stop(struct rte_eth_dev *dev) | |
287 | { | |
288 | struct dpaa_if *dpaa_intf = dev->data->dev_private; | |
289 | ||
290 | PMD_INIT_FUNC_TRACE(); | |
291 | ||
292 | fman_if_disable_rx(dpaa_intf->fif); | |
293 | dev->tx_pkt_burst = dpaa_eth_tx_drop_all; | |
294 | } | |
295 | ||
296 | static void dpaa_eth_dev_close(struct rte_eth_dev *dev) | |
297 | { | |
298 | PMD_INIT_FUNC_TRACE(); | |
299 | ||
300 | dpaa_eth_dev_stop(dev); | |
301 | } | |
302 | ||
303 | static int | |
304 | dpaa_fw_version_get(struct rte_eth_dev *dev __rte_unused, | |
305 | char *fw_version, | |
306 | size_t fw_size) | |
307 | { | |
308 | int ret; | |
309 | FILE *svr_file = NULL; | |
310 | unsigned int svr_ver = 0; | |
311 | ||
312 | PMD_INIT_FUNC_TRACE(); | |
313 | ||
314 | svr_file = fopen(DPAA_SOC_ID_FILE, "r"); | |
315 | if (!svr_file) { | |
316 | DPAA_PMD_ERR("Unable to open SoC device"); | |
317 | return -ENOTSUP; /* Not supported on this infra */ | |
318 | } | |
319 | if (fscanf(svr_file, "svr:%x", &svr_ver) > 0) | |
320 | dpaa_svr_family = svr_ver & SVR_MASK; | |
321 | else | |
322 | DPAA_PMD_ERR("Unable to read SoC device"); | |
323 | ||
324 | fclose(svr_file); | |
325 | ||
326 | ret = snprintf(fw_version, fw_size, "SVR:%x-fman-v%x", | |
327 | svr_ver, fman_ip_rev); | |
328 | ret += 1; /* add the size of '\0' */ | |
329 | ||
330 | if (fw_size < (uint32_t)ret) | |
331 | return ret; | |
332 | else | |
333 | return 0; | |
334 | } | |
335 | ||
f67539c2 TL |
336 | static int dpaa_eth_dev_info(struct rte_eth_dev *dev, |
337 | struct rte_eth_dev_info *dev_info) | |
11fdf7f2 TL |
338 | { |
339 | struct dpaa_if *dpaa_intf = dev->data->dev_private; | |
340 | ||
f67539c2 | 341 | DPAA_PMD_DEBUG(": %s", dpaa_intf->name); |
11fdf7f2 TL |
342 | |
343 | dev_info->max_rx_queues = dpaa_intf->nb_rx_queues; | |
344 | dev_info->max_tx_queues = dpaa_intf->nb_tx_queues; | |
11fdf7f2 TL |
345 | dev_info->max_rx_pktlen = DPAA_MAX_RX_PKT_LEN; |
346 | dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER; | |
347 | dev_info->max_hash_mac_addrs = 0; | |
348 | dev_info->max_vfs = 0; | |
349 | dev_info->max_vmdq_pools = ETH_16_POOLS; | |
350 | dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL; | |
9f95a23c | 351 | |
f67539c2 | 352 | if (dpaa_intf->fif->mac_type == fman_mac_1g) { |
9f95a23c | 353 | dev_info->speed_capa = ETH_LINK_SPEED_1G; |
f67539c2 | 354 | } else if (dpaa_intf->fif->mac_type == fman_mac_10g) { |
9f95a23c | 355 | dev_info->speed_capa = (ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G); |
f67539c2 | 356 | } else { |
9f95a23c TL |
357 | DPAA_PMD_ERR("invalid link_speed: %s, %d", |
358 | dpaa_intf->name, dpaa_intf->fif->mac_type); | |
f67539c2 TL |
359 | return -EINVAL; |
360 | } | |
9f95a23c | 361 | |
11fdf7f2 TL |
362 | dev_info->rx_offload_capa = dev_rx_offloads_sup | |
363 | dev_rx_offloads_nodis; | |
364 | dev_info->tx_offload_capa = dev_tx_offloads_sup | | |
365 | dev_tx_offloads_nodis; | |
366 | dev_info->default_rxportconf.burst_size = DPAA_DEF_RX_BURST_SIZE; | |
367 | dev_info->default_txportconf.burst_size = DPAA_DEF_TX_BURST_SIZE; | |
f67539c2 TL |
368 | dev_info->default_rxportconf.nb_queues = 1; |
369 | dev_info->default_txportconf.nb_queues = 1; | |
370 | dev_info->default_txportconf.ring_size = CGR_TX_CGR_THRESH; | |
371 | dev_info->default_rxportconf.ring_size = CGR_RX_PERFQ_THRESH; | |
372 | ||
373 | return 0; | |
11fdf7f2 TL |
374 | } |
375 | ||
376 | static int dpaa_eth_link_update(struct rte_eth_dev *dev, | |
377 | int wait_to_complete __rte_unused) | |
378 | { | |
379 | struct dpaa_if *dpaa_intf = dev->data->dev_private; | |
380 | struct rte_eth_link *link = &dev->data->dev_link; | |
381 | ||
382 | PMD_INIT_FUNC_TRACE(); | |
383 | ||
384 | if (dpaa_intf->fif->mac_type == fman_mac_1g) | |
385 | link->link_speed = ETH_SPEED_NUM_1G; | |
386 | else if (dpaa_intf->fif->mac_type == fman_mac_10g) | |
387 | link->link_speed = ETH_SPEED_NUM_10G; | |
388 | else | |
389 | DPAA_PMD_ERR("invalid link_speed: %s, %d", | |
390 | dpaa_intf->name, dpaa_intf->fif->mac_type); | |
391 | ||
392 | link->link_status = dpaa_intf->valid; | |
393 | link->link_duplex = ETH_LINK_FULL_DUPLEX; | |
394 | link->link_autoneg = ETH_LINK_AUTONEG; | |
395 | return 0; | |
396 | } | |
397 | ||
398 | static int dpaa_eth_stats_get(struct rte_eth_dev *dev, | |
399 | struct rte_eth_stats *stats) | |
400 | { | |
401 | struct dpaa_if *dpaa_intf = dev->data->dev_private; | |
402 | ||
403 | PMD_INIT_FUNC_TRACE(); | |
404 | ||
405 | fman_if_stats_get(dpaa_intf->fif, stats); | |
406 | return 0; | |
407 | } | |
408 | ||
f67539c2 | 409 | static int dpaa_eth_stats_reset(struct rte_eth_dev *dev) |
11fdf7f2 TL |
410 | { |
411 | struct dpaa_if *dpaa_intf = dev->data->dev_private; | |
412 | ||
413 | PMD_INIT_FUNC_TRACE(); | |
414 | ||
415 | fman_if_stats_reset(dpaa_intf->fif); | |
f67539c2 TL |
416 | |
417 | return 0; | |
11fdf7f2 TL |
418 | } |
419 | ||
420 | static int | |
421 | dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, | |
422 | unsigned int n) | |
423 | { | |
424 | struct dpaa_if *dpaa_intf = dev->data->dev_private; | |
425 | unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings); | |
426 | uint64_t values[sizeof(struct dpaa_if_stats) / 8]; | |
427 | ||
428 | if (n < num) | |
429 | return num; | |
430 | ||
431 | if (xstats == NULL) | |
432 | return 0; | |
433 | ||
434 | fman_if_stats_get_all(dpaa_intf->fif, values, | |
435 | sizeof(struct dpaa_if_stats) / 8); | |
436 | ||
437 | for (i = 0; i < num; i++) { | |
438 | xstats[i].id = i; | |
439 | xstats[i].value = values[dpaa_xstats_strings[i].offset / 8]; | |
440 | } | |
441 | return i; | |
442 | } | |
443 | ||
444 | static int | |
445 | dpaa_xstats_get_names(__rte_unused struct rte_eth_dev *dev, | |
446 | struct rte_eth_xstat_name *xstats_names, | |
447 | unsigned int limit) | |
448 | { | |
449 | unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings); | |
450 | ||
451 | if (limit < stat_cnt) | |
452 | return stat_cnt; | |
453 | ||
454 | if (xstats_names != NULL) | |
455 | for (i = 0; i < stat_cnt; i++) | |
9f95a23c TL |
456 | strlcpy(xstats_names[i].name, |
457 | dpaa_xstats_strings[i].name, | |
458 | sizeof(xstats_names[i].name)); | |
11fdf7f2 TL |
459 | |
460 | return stat_cnt; | |
461 | } | |
462 | ||
463 | static int | |
464 | dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, | |
465 | uint64_t *values, unsigned int n) | |
466 | { | |
467 | unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings); | |
468 | uint64_t values_copy[sizeof(struct dpaa_if_stats) / 8]; | |
469 | ||
470 | if (!ids) { | |
471 | struct dpaa_if *dpaa_intf = dev->data->dev_private; | |
472 | ||
473 | if (n < stat_cnt) | |
474 | return stat_cnt; | |
475 | ||
476 | if (!values) | |
477 | return 0; | |
478 | ||
479 | fman_if_stats_get_all(dpaa_intf->fif, values_copy, | |
480 | sizeof(struct dpaa_if_stats) / 8); | |
481 | ||
482 | for (i = 0; i < stat_cnt; i++) | |
483 | values[i] = | |
484 | values_copy[dpaa_xstats_strings[i].offset / 8]; | |
485 | ||
486 | return stat_cnt; | |
487 | } | |
488 | ||
489 | dpaa_xstats_get_by_id(dev, NULL, values_copy, stat_cnt); | |
490 | ||
491 | for (i = 0; i < n; i++) { | |
492 | if (ids[i] >= stat_cnt) { | |
493 | DPAA_PMD_ERR("id value isn't valid"); | |
494 | return -1; | |
495 | } | |
496 | values[i] = values_copy[ids[i]]; | |
497 | } | |
498 | return n; | |
499 | } | |
500 | ||
501 | static int | |
502 | dpaa_xstats_get_names_by_id( | |
503 | struct rte_eth_dev *dev, | |
504 | struct rte_eth_xstat_name *xstats_names, | |
505 | const uint64_t *ids, | |
506 | unsigned int limit) | |
507 | { | |
508 | unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings); | |
509 | struct rte_eth_xstat_name xstats_names_copy[stat_cnt]; | |
510 | ||
511 | if (!ids) | |
512 | return dpaa_xstats_get_names(dev, xstats_names, limit); | |
513 | ||
514 | dpaa_xstats_get_names(dev, xstats_names_copy, limit); | |
515 | ||
516 | for (i = 0; i < limit; i++) { | |
517 | if (ids[i] >= stat_cnt) { | |
518 | DPAA_PMD_ERR("id value isn't valid"); | |
519 | return -1; | |
520 | } | |
521 | strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name); | |
522 | } | |
523 | return limit; | |
524 | } | |
525 | ||
f67539c2 | 526 | static int dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev) |
11fdf7f2 TL |
527 | { |
528 | struct dpaa_if *dpaa_intf = dev->data->dev_private; | |
529 | ||
530 | PMD_INIT_FUNC_TRACE(); | |
531 | ||
532 | fman_if_promiscuous_enable(dpaa_intf->fif); | |
f67539c2 TL |
533 | |
534 | return 0; | |
11fdf7f2 TL |
535 | } |
536 | ||
f67539c2 | 537 | static int dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev) |
11fdf7f2 TL |
538 | { |
539 | struct dpaa_if *dpaa_intf = dev->data->dev_private; | |
540 | ||
541 | PMD_INIT_FUNC_TRACE(); | |
542 | ||
543 | fman_if_promiscuous_disable(dpaa_intf->fif); | |
f67539c2 TL |
544 | |
545 | return 0; | |
11fdf7f2 TL |
546 | } |
547 | ||
f67539c2 | 548 | static int dpaa_eth_multicast_enable(struct rte_eth_dev *dev) |
11fdf7f2 TL |
549 | { |
550 | struct dpaa_if *dpaa_intf = dev->data->dev_private; | |
551 | ||
552 | PMD_INIT_FUNC_TRACE(); | |
553 | ||
554 | fman_if_set_mcast_filter_table(dpaa_intf->fif); | |
f67539c2 TL |
555 | |
556 | return 0; | |
11fdf7f2 TL |
557 | } |
558 | ||
f67539c2 | 559 | static int dpaa_eth_multicast_disable(struct rte_eth_dev *dev) |
11fdf7f2 TL |
560 | { |
561 | struct dpaa_if *dpaa_intf = dev->data->dev_private; | |
562 | ||
563 | PMD_INIT_FUNC_TRACE(); | |
564 | ||
565 | fman_if_reset_mcast_filter_table(dpaa_intf->fif); | |
f67539c2 TL |
566 | |
567 | return 0; | |
11fdf7f2 TL |
568 | } |
569 | ||
570 | static | |
571 | int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, | |
572 | uint16_t nb_desc, | |
573 | unsigned int socket_id __rte_unused, | |
574 | const struct rte_eth_rxconf *rx_conf __rte_unused, | |
575 | struct rte_mempool *mp) | |
576 | { | |
577 | struct dpaa_if *dpaa_intf = dev->data->dev_private; | |
578 | struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_idx]; | |
579 | struct qm_mcc_initfq opts = {0}; | |
580 | u32 flags = 0; | |
581 | int ret; | |
9f95a23c | 582 | u32 buffsz = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM; |
11fdf7f2 TL |
583 | |
584 | PMD_INIT_FUNC_TRACE(); | |
585 | ||
586 | if (queue_idx >= dev->data->nb_rx_queues) { | |
587 | rte_errno = EOVERFLOW; | |
588 | DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)", | |
589 | (void *)dev, queue_idx, dev->data->nb_rx_queues); | |
590 | return -rte_errno; | |
591 | } | |
592 | ||
593 | DPAA_PMD_INFO("Rx queue setup for queue index: %d fq_id (0x%x)", | |
594 | queue_idx, rxq->fqid); | |
595 | ||
9f95a23c TL |
596 | /* Max packet can fit in single buffer */ |
597 | if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= buffsz) { | |
598 | ; | |
599 | } else if (dev->data->dev_conf.rxmode.offloads & | |
600 | DEV_RX_OFFLOAD_SCATTER) { | |
601 | if (dev->data->dev_conf.rxmode.max_rx_pkt_len > | |
602 | buffsz * DPAA_SGT_MAX_ENTRIES) { | |
603 | DPAA_PMD_ERR("max RxPkt size %d too big to fit " | |
604 | "MaxSGlist %d", | |
605 | dev->data->dev_conf.rxmode.max_rx_pkt_len, | |
606 | buffsz * DPAA_SGT_MAX_ENTRIES); | |
607 | rte_errno = EOVERFLOW; | |
608 | return -rte_errno; | |
609 | } | |
610 | } else { | |
611 | DPAA_PMD_WARN("The requested maximum Rx packet size (%u) is" | |
612 | " larger than a single mbuf (%u) and scattered" | |
613 | " mode has not been requested", | |
614 | dev->data->dev_conf.rxmode.max_rx_pkt_len, | |
615 | buffsz - RTE_PKTMBUF_HEADROOM); | |
616 | } | |
617 | ||
11fdf7f2 TL |
618 | if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) { |
619 | struct fman_if_ic_params icp; | |
620 | uint32_t fd_offset; | |
621 | uint32_t bp_size; | |
622 | ||
623 | if (!mp->pool_data) { | |
624 | DPAA_PMD_ERR("Not an offloaded buffer pool!"); | |
625 | return -1; | |
626 | } | |
627 | dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp); | |
628 | ||
629 | memset(&icp, 0, sizeof(icp)); | |
630 | /* set ICEOF for to the default value , which is 0*/ | |
631 | icp.iciof = DEFAULT_ICIOF; | |
632 | icp.iceof = DEFAULT_RX_ICEOF; | |
633 | icp.icsz = DEFAULT_ICSZ; | |
634 | fman_if_set_ic_params(dpaa_intf->fif, &icp); | |
635 | ||
636 | fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE; | |
637 | fman_if_set_fdoff(dpaa_intf->fif, fd_offset); | |
638 | ||
639 | /* Buffer pool size should be equal to Dataroom Size*/ | |
640 | bp_size = rte_pktmbuf_data_room_size(mp); | |
641 | fman_if_set_bp(dpaa_intf->fif, mp->size, | |
642 | dpaa_intf->bp_info->bpid, bp_size); | |
643 | dpaa_intf->valid = 1; | |
9f95a23c TL |
644 | DPAA_PMD_DEBUG("if:%s fd_offset = %d offset = %d", |
645 | dpaa_intf->name, fd_offset, | |
646 | fman_if_get_fdoff(dpaa_intf->fif)); | |
11fdf7f2 | 647 | } |
9f95a23c TL |
648 | DPAA_PMD_DEBUG("if:%s sg_on = %d, max_frm =%d", dpaa_intf->name, |
649 | fman_if_get_sg_enable(dpaa_intf->fif), | |
650 | dev->data->dev_conf.rxmode.max_rx_pkt_len); | |
11fdf7f2 | 651 | /* checking if push mode only, no error check for now */ |
f67539c2 TL |
652 | if (!rxq->is_static && |
653 | dpaa_push_mode_max_queue > dpaa_push_queue_idx) { | |
654 | struct qman_portal *qp; | |
655 | int q_fd; | |
656 | ||
11fdf7f2 TL |
657 | dpaa_push_queue_idx++; |
658 | opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA; | |
659 | opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | | |
660 | QM_FQCTRL_CTXASTASHING | | |
661 | QM_FQCTRL_PREFERINCACHE; | |
662 | opts.fqd.context_a.stashing.exclusive = 0; | |
663 | /* In muticore scenario stashing becomes a bottleneck on LS1046. | |
664 | * So do not enable stashing in this case | |
665 | */ | |
666 | if (dpaa_svr_family != SVR_LS1046A_FAMILY) | |
667 | opts.fqd.context_a.stashing.annotation_cl = | |
668 | DPAA_IF_RX_ANNOTATION_STASH; | |
669 | opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH; | |
670 | opts.fqd.context_a.stashing.context_cl = | |
671 | DPAA_IF_RX_CONTEXT_STASH; | |
672 | ||
673 | /*Create a channel and associate given queue with the channel*/ | |
674 | qman_alloc_pool_range((u32 *)&rxq->ch_id, 1, 1, 0); | |
675 | opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ; | |
676 | opts.fqd.dest.channel = rxq->ch_id; | |
677 | opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY; | |
678 | flags = QMAN_INITFQ_FLAG_SCHED; | |
679 | ||
680 | /* Configure tail drop */ | |
681 | if (dpaa_intf->cgr_rx) { | |
682 | opts.we_mask |= QM_INITFQ_WE_CGID; | |
683 | opts.fqd.cgid = dpaa_intf->cgr_rx[queue_idx].cgrid; | |
684 | opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; | |
685 | } | |
686 | ret = qman_init_fq(rxq, flags, &opts); | |
687 | if (ret) { | |
688 | DPAA_PMD_ERR("Channel/Q association failed. fqid 0x%x " | |
689 | "ret:%d(%s)", rxq->fqid, ret, strerror(ret)); | |
690 | return ret; | |
691 | } | |
9f95a23c TL |
692 | if (dpaa_svr_family == SVR_LS1043A_FAMILY) { |
693 | rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb_no_prefetch; | |
694 | } else { | |
695 | rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb; | |
696 | rxq->cb.dqrr_prepare = dpaa_rx_cb_prepare; | |
697 | } | |
698 | ||
11fdf7f2 | 699 | rxq->is_static = true; |
f67539c2 TL |
700 | |
701 | /* Allocate qman specific portals */ | |
702 | qp = fsl_qman_fq_portal_create(&q_fd); | |
703 | if (!qp) { | |
704 | DPAA_PMD_ERR("Unable to alloc fq portal"); | |
705 | return -1; | |
706 | } | |
707 | rxq->qp = qp; | |
708 | ||
709 | /* Set up the device interrupt handler */ | |
710 | if (!dev->intr_handle) { | |
711 | struct rte_dpaa_device *dpaa_dev; | |
712 | struct rte_device *rdev = dev->device; | |
713 | ||
714 | dpaa_dev = container_of(rdev, struct rte_dpaa_device, | |
715 | device); | |
716 | dev->intr_handle = &dpaa_dev->intr_handle; | |
717 | dev->intr_handle->intr_vec = rte_zmalloc(NULL, | |
718 | dpaa_push_mode_max_queue, 0); | |
719 | if (!dev->intr_handle->intr_vec) { | |
720 | DPAA_PMD_ERR("intr_vec alloc failed"); | |
721 | return -ENOMEM; | |
722 | } | |
723 | dev->intr_handle->nb_efd = dpaa_push_mode_max_queue; | |
724 | dev->intr_handle->max_intr = dpaa_push_mode_max_queue; | |
725 | } | |
726 | ||
727 | dev->intr_handle->type = RTE_INTR_HANDLE_EXT; | |
728 | dev->intr_handle->intr_vec[queue_idx] = queue_idx + 1; | |
729 | dev->intr_handle->efds[queue_idx] = q_fd; | |
730 | rxq->q_fd = q_fd; | |
11fdf7f2 | 731 | } |
9f95a23c | 732 | rxq->bp_array = rte_dpaa_bpid_info; |
11fdf7f2 TL |
733 | dev->data->rx_queues[queue_idx] = rxq; |
734 | ||
735 | /* configure the CGR size as per the desc size */ | |
736 | if (dpaa_intf->cgr_rx) { | |
737 | struct qm_mcc_initcgr cgr_opts = {0}; | |
738 | ||
739 | /* Enable tail drop with cgr on this queue */ | |
740 | qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, nb_desc, 0); | |
741 | ret = qman_modify_cgr(dpaa_intf->cgr_rx, 0, &cgr_opts); | |
742 | if (ret) { | |
743 | DPAA_PMD_WARN( | |
744 | "rx taildrop modify fail on fqid %d (ret=%d)", | |
745 | rxq->fqid, ret); | |
746 | } | |
747 | } | |
748 | ||
749 | return 0; | |
750 | } | |
751 | ||
752 | int | |
753 | dpaa_eth_eventq_attach(const struct rte_eth_dev *dev, | |
754 | int eth_rx_queue_id, | |
755 | u16 ch_id, | |
756 | const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) | |
757 | { | |
758 | int ret; | |
759 | u32 flags = 0; | |
760 | struct dpaa_if *dpaa_intf = dev->data->dev_private; | |
761 | struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id]; | |
762 | struct qm_mcc_initfq opts = {0}; | |
763 | ||
764 | if (dpaa_push_mode_max_queue) | |
9f95a23c TL |
765 | DPAA_PMD_WARN("PUSH mode q and EVENTDEV are not compatible\n" |
766 | "PUSH mode already enabled for first %d queues.\n" | |
11fdf7f2 TL |
767 | "To disable set DPAA_PUSH_QUEUES_NUMBER to 0\n", |
768 | dpaa_push_mode_max_queue); | |
769 | ||
770 | dpaa_poll_queue_default_config(&opts); | |
771 | ||
772 | switch (queue_conf->ev.sched_type) { | |
773 | case RTE_SCHED_TYPE_ATOMIC: | |
774 | opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE; | |
775 | /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary | |
776 | * configuration with HOLD_ACTIVE setting | |
777 | */ | |
778 | opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK); | |
779 | rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_atomic; | |
780 | break; | |
781 | case RTE_SCHED_TYPE_ORDERED: | |
782 | DPAA_PMD_ERR("Ordered queue schedule type is not supported\n"); | |
783 | return -1; | |
784 | default: | |
785 | opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK; | |
786 | rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_parallel; | |
787 | break; | |
788 | } | |
789 | ||
790 | opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ; | |
791 | opts.fqd.dest.channel = ch_id; | |
792 | opts.fqd.dest.wq = queue_conf->ev.priority; | |
793 | ||
794 | if (dpaa_intf->cgr_rx) { | |
795 | opts.we_mask |= QM_INITFQ_WE_CGID; | |
796 | opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid; | |
797 | opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; | |
798 | } | |
799 | ||
800 | flags = QMAN_INITFQ_FLAG_SCHED; | |
801 | ||
802 | ret = qman_init_fq(rxq, flags, &opts); | |
803 | if (ret) { | |
804 | DPAA_PMD_ERR("Ev-Channel/Q association failed. fqid 0x%x " | |
805 | "ret:%d(%s)", rxq->fqid, ret, strerror(ret)); | |
806 | return ret; | |
807 | } | |
808 | ||
809 | /* copy configuration which needs to be filled during dequeue */ | |
810 | memcpy(&rxq->ev, &queue_conf->ev, sizeof(struct rte_event)); | |
811 | dev->data->rx_queues[eth_rx_queue_id] = rxq; | |
812 | ||
813 | return ret; | |
814 | } | |
815 | ||
816 | int | |
817 | dpaa_eth_eventq_detach(const struct rte_eth_dev *dev, | |
818 | int eth_rx_queue_id) | |
819 | { | |
820 | struct qm_mcc_initfq opts; | |
821 | int ret; | |
822 | u32 flags = 0; | |
823 | struct dpaa_if *dpaa_intf = dev->data->dev_private; | |
824 | struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id]; | |
825 | ||
826 | dpaa_poll_queue_default_config(&opts); | |
827 | ||
828 | if (dpaa_intf->cgr_rx) { | |
829 | opts.we_mask |= QM_INITFQ_WE_CGID; | |
830 | opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid; | |
831 | opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; | |
832 | } | |
833 | ||
834 | ret = qman_init_fq(rxq, flags, &opts); | |
835 | if (ret) { | |
836 | DPAA_PMD_ERR("init rx fqid %d failed with ret: %d", | |
837 | rxq->fqid, ret); | |
838 | } | |
839 | ||
840 | rxq->cb.dqrr_dpdk_cb = NULL; | |
841 | dev->data->rx_queues[eth_rx_queue_id] = NULL; | |
842 | ||
843 | return 0; | |
844 | } | |
845 | ||
846 | static | |
847 | void dpaa_eth_rx_queue_release(void *rxq __rte_unused) | |
848 | { | |
849 | PMD_INIT_FUNC_TRACE(); | |
850 | } | |
851 | ||
852 | static | |
853 | int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, | |
854 | uint16_t nb_desc __rte_unused, | |
855 | unsigned int socket_id __rte_unused, | |
856 | const struct rte_eth_txconf *tx_conf __rte_unused) | |
857 | { | |
858 | struct dpaa_if *dpaa_intf = dev->data->dev_private; | |
859 | ||
860 | PMD_INIT_FUNC_TRACE(); | |
861 | ||
862 | if (queue_idx >= dev->data->nb_tx_queues) { | |
863 | rte_errno = EOVERFLOW; | |
864 | DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)", | |
865 | (void *)dev, queue_idx, dev->data->nb_tx_queues); | |
866 | return -rte_errno; | |
867 | } | |
868 | ||
869 | DPAA_PMD_INFO("Tx queue setup for queue index: %d fq_id (0x%x)", | |
870 | queue_idx, dpaa_intf->tx_queues[queue_idx].fqid); | |
871 | dev->data->tx_queues[queue_idx] = &dpaa_intf->tx_queues[queue_idx]; | |
872 | return 0; | |
873 | } | |
874 | ||
875 | static void dpaa_eth_tx_queue_release(void *txq __rte_unused) | |
876 | { | |
877 | PMD_INIT_FUNC_TRACE(); | |
878 | } | |
879 | ||
880 | static uint32_t | |
881 | dpaa_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) | |
882 | { | |
883 | struct dpaa_if *dpaa_intf = dev->data->dev_private; | |
884 | struct qman_fq *rxq = &dpaa_intf->rx_queues[rx_queue_id]; | |
885 | u32 frm_cnt = 0; | |
886 | ||
887 | PMD_INIT_FUNC_TRACE(); | |
888 | ||
889 | if (qman_query_fq_frm_cnt(rxq, &frm_cnt) == 0) { | |
f67539c2 TL |
890 | DPAA_PMD_DEBUG("RX frame count for q(%d) is %u", |
891 | rx_queue_id, frm_cnt); | |
11fdf7f2 TL |
892 | } |
893 | return frm_cnt; | |
894 | } | |
895 | ||
896 | static int dpaa_link_down(struct rte_eth_dev *dev) | |
897 | { | |
898 | PMD_INIT_FUNC_TRACE(); | |
899 | ||
900 | dpaa_eth_dev_stop(dev); | |
901 | return 0; | |
902 | } | |
903 | ||
904 | static int dpaa_link_up(struct rte_eth_dev *dev) | |
905 | { | |
906 | PMD_INIT_FUNC_TRACE(); | |
907 | ||
908 | dpaa_eth_dev_start(dev); | |
909 | return 0; | |
910 | } | |
911 | ||
912 | static int | |
913 | dpaa_flow_ctrl_set(struct rte_eth_dev *dev, | |
914 | struct rte_eth_fc_conf *fc_conf) | |
915 | { | |
916 | struct dpaa_if *dpaa_intf = dev->data->dev_private; | |
917 | struct rte_eth_fc_conf *net_fc; | |
918 | ||
919 | PMD_INIT_FUNC_TRACE(); | |
920 | ||
921 | if (!(dpaa_intf->fc_conf)) { | |
922 | dpaa_intf->fc_conf = rte_zmalloc(NULL, | |
923 | sizeof(struct rte_eth_fc_conf), MAX_CACHELINE); | |
924 | if (!dpaa_intf->fc_conf) { | |
925 | DPAA_PMD_ERR("unable to save flow control info"); | |
926 | return -ENOMEM; | |
927 | } | |
928 | } | |
929 | net_fc = dpaa_intf->fc_conf; | |
930 | ||
931 | if (fc_conf->high_water < fc_conf->low_water) { | |
932 | DPAA_PMD_ERR("Incorrect Flow Control Configuration"); | |
933 | return -EINVAL; | |
934 | } | |
935 | ||
936 | if (fc_conf->mode == RTE_FC_NONE) { | |
937 | return 0; | |
938 | } else if (fc_conf->mode == RTE_FC_TX_PAUSE || | |
939 | fc_conf->mode == RTE_FC_FULL) { | |
940 | fman_if_set_fc_threshold(dpaa_intf->fif, fc_conf->high_water, | |
941 | fc_conf->low_water, | |
942 | dpaa_intf->bp_info->bpid); | |
943 | if (fc_conf->pause_time) | |
944 | fman_if_set_fc_quanta(dpaa_intf->fif, | |
945 | fc_conf->pause_time); | |
946 | } | |
947 | ||
948 | /* Save the information in dpaa device */ | |
949 | net_fc->pause_time = fc_conf->pause_time; | |
950 | net_fc->high_water = fc_conf->high_water; | |
951 | net_fc->low_water = fc_conf->low_water; | |
952 | net_fc->send_xon = fc_conf->send_xon; | |
953 | net_fc->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd; | |
954 | net_fc->mode = fc_conf->mode; | |
955 | net_fc->autoneg = fc_conf->autoneg; | |
956 | ||
957 | return 0; | |
958 | } | |
959 | ||
960 | static int | |
961 | dpaa_flow_ctrl_get(struct rte_eth_dev *dev, | |
962 | struct rte_eth_fc_conf *fc_conf) | |
963 | { | |
964 | struct dpaa_if *dpaa_intf = dev->data->dev_private; | |
965 | struct rte_eth_fc_conf *net_fc = dpaa_intf->fc_conf; | |
966 | int ret; | |
967 | ||
968 | PMD_INIT_FUNC_TRACE(); | |
969 | ||
970 | if (net_fc) { | |
971 | fc_conf->pause_time = net_fc->pause_time; | |
972 | fc_conf->high_water = net_fc->high_water; | |
973 | fc_conf->low_water = net_fc->low_water; | |
974 | fc_conf->send_xon = net_fc->send_xon; | |
975 | fc_conf->mac_ctrl_frame_fwd = net_fc->mac_ctrl_frame_fwd; | |
976 | fc_conf->mode = net_fc->mode; | |
977 | fc_conf->autoneg = net_fc->autoneg; | |
978 | return 0; | |
979 | } | |
980 | ret = fman_if_get_fc_threshold(dpaa_intf->fif); | |
981 | if (ret) { | |
982 | fc_conf->mode = RTE_FC_TX_PAUSE; | |
983 | fc_conf->pause_time = fman_if_get_fc_quanta(dpaa_intf->fif); | |
984 | } else { | |
985 | fc_conf->mode = RTE_FC_NONE; | |
986 | } | |
987 | ||
988 | return 0; | |
989 | } | |
990 | ||
991 | static int | |
992 | dpaa_dev_add_mac_addr(struct rte_eth_dev *dev, | |
f67539c2 | 993 | struct rte_ether_addr *addr, |
11fdf7f2 TL |
994 | uint32_t index, |
995 | __rte_unused uint32_t pool) | |
996 | { | |
997 | int ret; | |
998 | struct dpaa_if *dpaa_intf = dev->data->dev_private; | |
999 | ||
1000 | PMD_INIT_FUNC_TRACE(); | |
1001 | ||
1002 | ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, index); | |
1003 | ||
1004 | if (ret) | |
f67539c2 | 1005 | DPAA_PMD_ERR("Adding the MAC ADDR failed: err = %d", ret); |
11fdf7f2 TL |
1006 | return 0; |
1007 | } | |
1008 | ||
1009 | static void | |
1010 | dpaa_dev_remove_mac_addr(struct rte_eth_dev *dev, | |
1011 | uint32_t index) | |
1012 | { | |
1013 | struct dpaa_if *dpaa_intf = dev->data->dev_private; | |
1014 | ||
1015 | PMD_INIT_FUNC_TRACE(); | |
1016 | ||
1017 | fman_if_clear_mac_addr(dpaa_intf->fif, index); | |
1018 | } | |
1019 | ||
1020 | static int | |
1021 | dpaa_dev_set_mac_addr(struct rte_eth_dev *dev, | |
f67539c2 | 1022 | struct rte_ether_addr *addr) |
11fdf7f2 TL |
1023 | { |
1024 | int ret; | |
1025 | struct dpaa_if *dpaa_intf = dev->data->dev_private; | |
1026 | ||
1027 | PMD_INIT_FUNC_TRACE(); | |
1028 | ||
1029 | ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, 0); | |
1030 | if (ret) | |
f67539c2 | 1031 | DPAA_PMD_ERR("Setting the MAC ADDR failed %d", ret); |
11fdf7f2 TL |
1032 | |
1033 | return ret; | |
1034 | } | |
1035 | ||
f67539c2 TL |
1036 | static int dpaa_dev_queue_intr_enable(struct rte_eth_dev *dev, |
1037 | uint16_t queue_id) | |
1038 | { | |
1039 | struct dpaa_if *dpaa_intf = dev->data->dev_private; | |
1040 | struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_id]; | |
1041 | ||
1042 | if (!rxq->is_static) | |
1043 | return -EINVAL; | |
1044 | ||
1045 | return qman_fq_portal_irqsource_add(rxq->qp, QM_PIRQ_DQRI); | |
1046 | } | |
1047 | ||
1048 | static int dpaa_dev_queue_intr_disable(struct rte_eth_dev *dev, | |
1049 | uint16_t queue_id) | |
1050 | { | |
1051 | struct dpaa_if *dpaa_intf = dev->data->dev_private; | |
1052 | struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_id]; | |
1053 | uint32_t temp; | |
1054 | ssize_t temp1; | |
1055 | ||
1056 | if (!rxq->is_static) | |
1057 | return -EINVAL; | |
1058 | ||
1059 | qman_fq_portal_irqsource_remove(rxq->qp, ~0); | |
1060 | ||
1061 | temp1 = read(rxq->q_fd, &temp, sizeof(temp)); | |
1062 | if (temp1 != sizeof(temp)) | |
1063 | DPAA_PMD_ERR("irq read error"); | |
1064 | ||
1065 | qman_fq_portal_thread_irq(rxq->qp); | |
1066 | ||
1067 | return 0; | |
1068 | } | |
1069 | ||
11fdf7f2 TL |
1070 | static struct eth_dev_ops dpaa_devops = { |
1071 | .dev_configure = dpaa_eth_dev_configure, | |
1072 | .dev_start = dpaa_eth_dev_start, | |
1073 | .dev_stop = dpaa_eth_dev_stop, | |
1074 | .dev_close = dpaa_eth_dev_close, | |
1075 | .dev_infos_get = dpaa_eth_dev_info, | |
1076 | .dev_supported_ptypes_get = dpaa_supported_ptypes_get, | |
1077 | ||
1078 | .rx_queue_setup = dpaa_eth_rx_queue_setup, | |
1079 | .tx_queue_setup = dpaa_eth_tx_queue_setup, | |
1080 | .rx_queue_release = dpaa_eth_rx_queue_release, | |
1081 | .tx_queue_release = dpaa_eth_tx_queue_release, | |
1082 | .rx_queue_count = dpaa_dev_rx_queue_count, | |
1083 | ||
1084 | .flow_ctrl_get = dpaa_flow_ctrl_get, | |
1085 | .flow_ctrl_set = dpaa_flow_ctrl_set, | |
1086 | ||
1087 | .link_update = dpaa_eth_link_update, | |
1088 | .stats_get = dpaa_eth_stats_get, | |
1089 | .xstats_get = dpaa_dev_xstats_get, | |
1090 | .xstats_get_by_id = dpaa_xstats_get_by_id, | |
1091 | .xstats_get_names_by_id = dpaa_xstats_get_names_by_id, | |
1092 | .xstats_get_names = dpaa_xstats_get_names, | |
1093 | .xstats_reset = dpaa_eth_stats_reset, | |
1094 | .stats_reset = dpaa_eth_stats_reset, | |
1095 | .promiscuous_enable = dpaa_eth_promiscuous_enable, | |
1096 | .promiscuous_disable = dpaa_eth_promiscuous_disable, | |
1097 | .allmulticast_enable = dpaa_eth_multicast_enable, | |
1098 | .allmulticast_disable = dpaa_eth_multicast_disable, | |
1099 | .mtu_set = dpaa_mtu_set, | |
1100 | .dev_set_link_down = dpaa_link_down, | |
1101 | .dev_set_link_up = dpaa_link_up, | |
1102 | .mac_addr_add = dpaa_dev_add_mac_addr, | |
1103 | .mac_addr_remove = dpaa_dev_remove_mac_addr, | |
1104 | .mac_addr_set = dpaa_dev_set_mac_addr, | |
1105 | ||
1106 | .fw_version_get = dpaa_fw_version_get, | |
f67539c2 TL |
1107 | |
1108 | .rx_queue_intr_enable = dpaa_dev_queue_intr_enable, | |
1109 | .rx_queue_intr_disable = dpaa_dev_queue_intr_disable, | |
11fdf7f2 TL |
1110 | }; |
1111 | ||
1112 | static bool | |
1113 | is_device_supported(struct rte_eth_dev *dev, struct rte_dpaa_driver *drv) | |
1114 | { | |
1115 | if (strcmp(dev->device->driver->name, | |
1116 | drv->driver.name)) | |
1117 | return false; | |
1118 | ||
1119 | return true; | |
1120 | } | |
1121 | ||
1122 | static bool | |
1123 | is_dpaa_supported(struct rte_eth_dev *dev) | |
1124 | { | |
1125 | return is_device_supported(dev, &rte_dpaa_pmd); | |
1126 | } | |
1127 | ||
1128 | int | |
1129 | rte_pmd_dpaa_set_tx_loopback(uint8_t port, uint8_t on) | |
1130 | { | |
1131 | struct rte_eth_dev *dev; | |
1132 | struct dpaa_if *dpaa_intf; | |
1133 | ||
1134 | RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); | |
1135 | ||
1136 | dev = &rte_eth_devices[port]; | |
1137 | ||
1138 | if (!is_dpaa_supported(dev)) | |
1139 | return -ENOTSUP; | |
1140 | ||
1141 | dpaa_intf = dev->data->dev_private; | |
1142 | ||
1143 | if (on) | |
1144 | fman_if_loopback_enable(dpaa_intf->fif); | |
1145 | else | |
1146 | fman_if_loopback_disable(dpaa_intf->fif); | |
1147 | ||
1148 | return 0; | |
1149 | } | |
1150 | ||
1151 | static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf) | |
1152 | { | |
1153 | struct rte_eth_fc_conf *fc_conf; | |
1154 | int ret; | |
1155 | ||
1156 | PMD_INIT_FUNC_TRACE(); | |
1157 | ||
1158 | if (!(dpaa_intf->fc_conf)) { | |
1159 | dpaa_intf->fc_conf = rte_zmalloc(NULL, | |
1160 | sizeof(struct rte_eth_fc_conf), MAX_CACHELINE); | |
1161 | if (!dpaa_intf->fc_conf) { | |
1162 | DPAA_PMD_ERR("unable to save flow control info"); | |
1163 | return -ENOMEM; | |
1164 | } | |
1165 | } | |
1166 | fc_conf = dpaa_intf->fc_conf; | |
1167 | ret = fman_if_get_fc_threshold(dpaa_intf->fif); | |
1168 | if (ret) { | |
1169 | fc_conf->mode = RTE_FC_TX_PAUSE; | |
1170 | fc_conf->pause_time = fman_if_get_fc_quanta(dpaa_intf->fif); | |
1171 | } else { | |
1172 | fc_conf->mode = RTE_FC_NONE; | |
1173 | } | |
1174 | ||
1175 | return 0; | |
1176 | } | |
1177 | ||
1178 | /* Initialise an Rx FQ */ | |
1179 | static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx, | |
1180 | uint32_t fqid) | |
1181 | { | |
1182 | struct qm_mcc_initfq opts = {0}; | |
1183 | int ret; | |
9f95a23c | 1184 | u32 flags = QMAN_FQ_FLAG_NO_ENQUEUE; |
11fdf7f2 TL |
1185 | struct qm_mcc_initcgr cgr_opts = { |
1186 | .we_mask = QM_CGR_WE_CS_THRES | | |
1187 | QM_CGR_WE_CSTD_EN | | |
1188 | QM_CGR_WE_MODE, | |
1189 | .cgr = { | |
1190 | .cstd_en = QM_CGR_EN, | |
1191 | .mode = QMAN_CGR_MODE_FRAME | |
1192 | } | |
1193 | }; | |
1194 | ||
9f95a23c TL |
1195 | if (fqid) { |
1196 | ret = qman_reserve_fqid(fqid); | |
1197 | if (ret) { | |
1198 | DPAA_PMD_ERR("reserve rx fqid 0x%x failed with ret: %d", | |
1199 | fqid, ret); | |
1200 | return -EINVAL; | |
1201 | } | |
1202 | } else { | |
1203 | flags |= QMAN_FQ_FLAG_DYNAMIC_FQID; | |
11fdf7f2 | 1204 | } |
11fdf7f2 | 1205 | DPAA_PMD_DEBUG("creating rx fq %p, fqid 0x%x", fq, fqid); |
9f95a23c | 1206 | ret = qman_create_fq(fqid, flags, fq); |
11fdf7f2 TL |
1207 | if (ret) { |
1208 | DPAA_PMD_ERR("create rx fqid 0x%x failed with ret: %d", | |
1209 | fqid, ret); | |
1210 | return ret; | |
1211 | } | |
1212 | fq->is_static = false; | |
1213 | ||
1214 | dpaa_poll_queue_default_config(&opts); | |
1215 | ||
1216 | if (cgr_rx) { | |
1217 | /* Enable tail drop with cgr on this queue */ | |
1218 | qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, td_threshold, 0); | |
1219 | cgr_rx->cb = NULL; | |
1220 | ret = qman_create_cgr(cgr_rx, QMAN_CGR_FLAG_USE_INIT, | |
1221 | &cgr_opts); | |
1222 | if (ret) { | |
1223 | DPAA_PMD_WARN( | |
1224 | "rx taildrop init fail on rx fqid 0x%x(ret=%d)", | |
9f95a23c | 1225 | fq->fqid, ret); |
11fdf7f2 TL |
1226 | goto without_cgr; |
1227 | } | |
1228 | opts.we_mask |= QM_INITFQ_WE_CGID; | |
1229 | opts.fqd.cgid = cgr_rx->cgrid; | |
1230 | opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; | |
1231 | } | |
1232 | without_cgr: | |
9f95a23c | 1233 | ret = qman_init_fq(fq, 0, &opts); |
11fdf7f2 TL |
1234 | if (ret) |
1235 | DPAA_PMD_ERR("init rx fqid 0x%x failed with ret:%d", fqid, ret); | |
1236 | return ret; | |
1237 | } | |
1238 | ||
1239 | /* Initialise a Tx FQ */ | |
1240 | static int dpaa_tx_queue_init(struct qman_fq *fq, | |
1241 | struct fman_if *fman_intf) | |
1242 | { | |
1243 | struct qm_mcc_initfq opts = {0}; | |
1244 | int ret; | |
1245 | ||
11fdf7f2 TL |
1246 | ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID | |
1247 | QMAN_FQ_FLAG_TO_DCPORTAL, fq); | |
1248 | if (ret) { | |
1249 | DPAA_PMD_ERR("create tx fq failed with ret: %d", ret); | |
1250 | return ret; | |
1251 | } | |
1252 | opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL | | |
1253 | QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA; | |
1254 | opts.fqd.dest.channel = fman_intf->tx_channel_id; | |
1255 | opts.fqd.dest.wq = DPAA_IF_TX_PRIORITY; | |
1256 | opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE; | |
1257 | opts.fqd.context_b = 0; | |
1258 | /* no tx-confirmation */ | |
1259 | opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi; | |
1260 | opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo; | |
1261 | DPAA_PMD_DEBUG("init tx fq %p, fqid 0x%x", fq, fq->fqid); | |
1262 | ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts); | |
1263 | if (ret) | |
1264 | DPAA_PMD_ERR("init tx fqid 0x%x failed %d", fq->fqid, ret); | |
1265 | return ret; | |
1266 | } | |
1267 | ||
1268 | #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER | |
1269 | /* Initialise a DEBUG FQ ([rt]x_error, rx_default). */ | |
1270 | static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid) | |
1271 | { | |
1272 | struct qm_mcc_initfq opts = {0}; | |
1273 | int ret; | |
1274 | ||
1275 | PMD_INIT_FUNC_TRACE(); | |
1276 | ||
1277 | ret = qman_reserve_fqid(fqid); | |
1278 | if (ret) { | |
1279 | DPAA_PMD_ERR("Reserve debug fqid %d failed with ret: %d", | |
1280 | fqid, ret); | |
1281 | return -EINVAL; | |
1282 | } | |
1283 | /* "map" this Rx FQ to one of the interfaces Tx FQID */ | |
1284 | DPAA_PMD_DEBUG("Creating debug fq %p, fqid %d", fq, fqid); | |
1285 | ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq); | |
1286 | if (ret) { | |
1287 | DPAA_PMD_ERR("create debug fqid %d failed with ret: %d", | |
1288 | fqid, ret); | |
1289 | return ret; | |
1290 | } | |
1291 | opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL; | |
1292 | opts.fqd.dest.wq = DPAA_IF_DEBUG_PRIORITY; | |
1293 | ret = qman_init_fq(fq, 0, &opts); | |
1294 | if (ret) | |
1295 | DPAA_PMD_ERR("init debug fqid %d failed with ret: %d", | |
1296 | fqid, ret); | |
1297 | return ret; | |
1298 | } | |
1299 | #endif | |
1300 | ||
1301 | /* Initialise a network interface */ | |
1302 | static int | |
1303 | dpaa_dev_init(struct rte_eth_dev *eth_dev) | |
1304 | { | |
f67539c2 | 1305 | int num_rx_fqs, fqid; |
11fdf7f2 TL |
1306 | int loop, ret = 0; |
1307 | int dev_id; | |
1308 | struct rte_dpaa_device *dpaa_device; | |
1309 | struct dpaa_if *dpaa_intf; | |
1310 | struct fm_eth_port_cfg *cfg; | |
1311 | struct fman_if *fman_intf; | |
1312 | struct fman_if_bpool *bp, *tmp_bp; | |
1313 | uint32_t cgrid[DPAA_MAX_NUM_PCD_QUEUES]; | |
f67539c2 | 1314 | char eth_buf[RTE_ETHER_ADDR_FMT_SIZE]; |
11fdf7f2 TL |
1315 | |
1316 | PMD_INIT_FUNC_TRACE(); | |
1317 | ||
9f95a23c | 1318 | dpaa_intf = eth_dev->data->dev_private; |
11fdf7f2 | 1319 | /* For secondary processes, the primary has done all the work */ |
9f95a23c TL |
1320 | if (rte_eal_process_type() != RTE_PROC_PRIMARY) { |
1321 | eth_dev->dev_ops = &dpaa_devops; | |
1322 | /* Plugging of UCODE burst API not supported in Secondary */ | |
1323 | eth_dev->rx_pkt_burst = dpaa_eth_queue_rx; | |
1324 | eth_dev->tx_pkt_burst = dpaa_eth_queue_tx; | |
1325 | #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP | |
1326 | qman_set_fq_lookup_table( | |
1327 | dpaa_intf->rx_queues->qman_fq_lookup_table); | |
1328 | #endif | |
11fdf7f2 | 1329 | return 0; |
9f95a23c | 1330 | } |
11fdf7f2 TL |
1331 | |
1332 | dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device); | |
1333 | dev_id = dpaa_device->id.dev_id; | |
1334 | dpaa_intf = eth_dev->data->dev_private; | |
f67539c2 | 1335 | cfg = dpaa_get_eth_port_cfg(dev_id); |
11fdf7f2 TL |
1336 | fman_intf = cfg->fman_if; |
1337 | ||
1338 | dpaa_intf->name = dpaa_device->name; | |
1339 | ||
1340 | /* save fman_if & cfg in the interface struture */ | |
1341 | dpaa_intf->fif = fman_intf; | |
1342 | dpaa_intf->ifid = dev_id; | |
1343 | dpaa_intf->cfg = cfg; | |
1344 | ||
1345 | /* Initialize Rx FQ's */ | |
1346 | if (default_q) { | |
1347 | num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES; | |
1348 | } else { | |
1349 | if (getenv("DPAA_NUM_RX_QUEUES")) | |
1350 | num_rx_fqs = atoi(getenv("DPAA_NUM_RX_QUEUES")); | |
1351 | else | |
1352 | num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES; | |
1353 | } | |
1354 | ||
1355 | ||
1356 | /* Each device can not have more than DPAA_MAX_NUM_PCD_QUEUES RX | |
1357 | * queues. | |
1358 | */ | |
1359 | if (num_rx_fqs <= 0 || num_rx_fqs > DPAA_MAX_NUM_PCD_QUEUES) { | |
1360 | DPAA_PMD_ERR("Invalid number of RX queues\n"); | |
1361 | return -EINVAL; | |
1362 | } | |
1363 | ||
1364 | dpaa_intf->rx_queues = rte_zmalloc(NULL, | |
1365 | sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE); | |
1366 | if (!dpaa_intf->rx_queues) { | |
1367 | DPAA_PMD_ERR("Failed to alloc mem for RX queues\n"); | |
1368 | return -ENOMEM; | |
1369 | } | |
1370 | ||
1371 | /* If congestion control is enabled globally*/ | |
1372 | if (td_threshold) { | |
1373 | dpaa_intf->cgr_rx = rte_zmalloc(NULL, | |
1374 | sizeof(struct qman_cgr) * num_rx_fqs, MAX_CACHELINE); | |
1375 | if (!dpaa_intf->cgr_rx) { | |
1376 | DPAA_PMD_ERR("Failed to alloc mem for cgr_rx\n"); | |
1377 | ret = -ENOMEM; | |
1378 | goto free_rx; | |
1379 | } | |
1380 | ||
1381 | ret = qman_alloc_cgrid_range(&cgrid[0], num_rx_fqs, 1, 0); | |
1382 | if (ret != num_rx_fqs) { | |
1383 | DPAA_PMD_WARN("insufficient CGRIDs available"); | |
1384 | ret = -EINVAL; | |
1385 | goto free_rx; | |
1386 | } | |
1387 | } else { | |
1388 | dpaa_intf->cgr_rx = NULL; | |
1389 | } | |
1390 | ||
1391 | for (loop = 0; loop < num_rx_fqs; loop++) { | |
1392 | if (default_q) | |
1393 | fqid = cfg->rx_def; | |
1394 | else | |
9f95a23c | 1395 | fqid = DPAA_PCD_FQID_START + dpaa_intf->fif->mac_idx * |
11fdf7f2 TL |
1396 | DPAA_PCD_FQID_MULTIPLIER + loop; |
1397 | ||
1398 | if (dpaa_intf->cgr_rx) | |
1399 | dpaa_intf->cgr_rx[loop].cgrid = cgrid[loop]; | |
1400 | ||
1401 | ret = dpaa_rx_queue_init(&dpaa_intf->rx_queues[loop], | |
1402 | dpaa_intf->cgr_rx ? &dpaa_intf->cgr_rx[loop] : NULL, | |
1403 | fqid); | |
1404 | if (ret) | |
1405 | goto free_rx; | |
1406 | dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf; | |
1407 | } | |
1408 | dpaa_intf->nb_rx_queues = num_rx_fqs; | |
1409 | ||
1410 | /* Initialise Tx FQs.free_rx Have as many Tx FQ's as number of cores */ | |
11fdf7f2 | 1411 | dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) * |
f67539c2 | 1412 | MAX_DPAA_CORES, MAX_CACHELINE); |
11fdf7f2 TL |
1413 | if (!dpaa_intf->tx_queues) { |
1414 | DPAA_PMD_ERR("Failed to alloc mem for TX queues\n"); | |
1415 | ret = -ENOMEM; | |
1416 | goto free_rx; | |
1417 | } | |
1418 | ||
f67539c2 | 1419 | for (loop = 0; loop < MAX_DPAA_CORES; loop++) { |
11fdf7f2 TL |
1420 | ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop], |
1421 | fman_intf); | |
1422 | if (ret) | |
1423 | goto free_tx; | |
1424 | dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf; | |
1425 | } | |
f67539c2 | 1426 | dpaa_intf->nb_tx_queues = MAX_DPAA_CORES; |
11fdf7f2 TL |
1427 | |
1428 | #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER | |
1429 | dpaa_debug_queue_init(&dpaa_intf->debug_queues[ | |
1430 | DPAA_DEBUG_FQ_RX_ERROR], fman_intf->fqid_rx_err); | |
1431 | dpaa_intf->debug_queues[DPAA_DEBUG_FQ_RX_ERROR].dpaa_intf = dpaa_intf; | |
1432 | dpaa_debug_queue_init(&dpaa_intf->debug_queues[ | |
1433 | DPAA_DEBUG_FQ_TX_ERROR], fman_intf->fqid_tx_err); | |
1434 | dpaa_intf->debug_queues[DPAA_DEBUG_FQ_TX_ERROR].dpaa_intf = dpaa_intf; | |
1435 | #endif | |
1436 | ||
1437 | DPAA_PMD_DEBUG("All frame queues created"); | |
1438 | ||
1439 | /* Get the initial configuration for flow control */ | |
1440 | dpaa_fc_set_default(dpaa_intf); | |
1441 | ||
1442 | /* reset bpool list, initialize bpool dynamically */ | |
1443 | list_for_each_entry_safe(bp, tmp_bp, &cfg->fman_if->bpool_list, node) { | |
1444 | list_del(&bp->node); | |
9f95a23c | 1445 | rte_free(bp); |
11fdf7f2 TL |
1446 | } |
1447 | ||
1448 | /* Populate ethdev structure */ | |
1449 | eth_dev->dev_ops = &dpaa_devops; | |
1450 | eth_dev->rx_pkt_burst = dpaa_eth_queue_rx; | |
1451 | eth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all; | |
1452 | ||
1453 | /* Allocate memory for storing MAC addresses */ | |
1454 | eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", | |
f67539c2 | 1455 | RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0); |
11fdf7f2 TL |
1456 | if (eth_dev->data->mac_addrs == NULL) { |
1457 | DPAA_PMD_ERR("Failed to allocate %d bytes needed to " | |
1458 | "store MAC addresses", | |
f67539c2 | 1459 | RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER); |
11fdf7f2 TL |
1460 | ret = -ENOMEM; |
1461 | goto free_tx; | |
1462 | } | |
1463 | ||
1464 | /* copy the primary mac address */ | |
f67539c2 TL |
1465 | rte_ether_addr_copy(&fman_intf->mac_addr, ð_dev->data->mac_addrs[0]); |
1466 | rte_ether_format_addr(eth_buf, sizeof(eth_buf), &fman_intf->mac_addr); | |
11fdf7f2 | 1467 | |
f67539c2 | 1468 | DPAA_PMD_INFO("net: dpaa: %s: %s", dpaa_device->name, eth_buf); |
11fdf7f2 TL |
1469 | |
1470 | /* Disable RX mode */ | |
1471 | fman_if_discard_rx_errors(fman_intf); | |
1472 | fman_if_disable_rx(fman_intf); | |
1473 | /* Disable promiscuous mode */ | |
1474 | fman_if_promiscuous_disable(fman_intf); | |
1475 | /* Disable multicast */ | |
1476 | fman_if_reset_mcast_filter_table(fman_intf); | |
1477 | /* Reset interface statistics */ | |
1478 | fman_if_stats_reset(fman_intf); | |
9f95a23c TL |
1479 | /* Disable SG by default */ |
1480 | fman_if_set_sg(fman_intf, 0); | |
f67539c2 | 1481 | fman_if_set_maxfrm(fman_intf, RTE_ETHER_MAX_LEN + VLAN_TAG_SIZE); |
11fdf7f2 TL |
1482 | |
1483 | return 0; | |
1484 | ||
1485 | free_tx: | |
1486 | rte_free(dpaa_intf->tx_queues); | |
1487 | dpaa_intf->tx_queues = NULL; | |
1488 | dpaa_intf->nb_tx_queues = 0; | |
1489 | ||
1490 | free_rx: | |
1491 | rte_free(dpaa_intf->cgr_rx); | |
1492 | rte_free(dpaa_intf->rx_queues); | |
1493 | dpaa_intf->rx_queues = NULL; | |
1494 | dpaa_intf->nb_rx_queues = 0; | |
1495 | return ret; | |
1496 | } | |
1497 | ||
1498 | static int | |
1499 | dpaa_dev_uninit(struct rte_eth_dev *dev) | |
1500 | { | |
1501 | struct dpaa_if *dpaa_intf = dev->data->dev_private; | |
1502 | int loop; | |
1503 | ||
1504 | PMD_INIT_FUNC_TRACE(); | |
1505 | ||
1506 | if (rte_eal_process_type() != RTE_PROC_PRIMARY) | |
1507 | return -EPERM; | |
1508 | ||
1509 | if (!dpaa_intf) { | |
1510 | DPAA_PMD_WARN("Already closed or not started"); | |
1511 | return -1; | |
1512 | } | |
1513 | ||
1514 | dpaa_eth_dev_close(dev); | |
1515 | ||
1516 | /* release configuration memory */ | |
1517 | if (dpaa_intf->fc_conf) | |
1518 | rte_free(dpaa_intf->fc_conf); | |
1519 | ||
1520 | /* Release RX congestion Groups */ | |
1521 | if (dpaa_intf->cgr_rx) { | |
1522 | for (loop = 0; loop < dpaa_intf->nb_rx_queues; loop++) | |
1523 | qman_delete_cgr(&dpaa_intf->cgr_rx[loop]); | |
1524 | ||
1525 | qman_release_cgrid_range(dpaa_intf->cgr_rx[loop].cgrid, | |
1526 | dpaa_intf->nb_rx_queues); | |
1527 | } | |
1528 | ||
1529 | rte_free(dpaa_intf->cgr_rx); | |
1530 | dpaa_intf->cgr_rx = NULL; | |
1531 | ||
1532 | rte_free(dpaa_intf->rx_queues); | |
1533 | dpaa_intf->rx_queues = NULL; | |
1534 | ||
1535 | rte_free(dpaa_intf->tx_queues); | |
1536 | dpaa_intf->tx_queues = NULL; | |
1537 | ||
11fdf7f2 TL |
1538 | dev->dev_ops = NULL; |
1539 | dev->rx_pkt_burst = NULL; | |
1540 | dev->tx_pkt_burst = NULL; | |
1541 | ||
1542 | return 0; | |
1543 | } | |
1544 | ||
1545 | static int | |
9f95a23c | 1546 | rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused, |
11fdf7f2 TL |
1547 | struct rte_dpaa_device *dpaa_dev) |
1548 | { | |
1549 | int diag; | |
1550 | int ret; | |
1551 | struct rte_eth_dev *eth_dev; | |
1552 | ||
1553 | PMD_INIT_FUNC_TRACE(); | |
1554 | ||
f67539c2 TL |
1555 | if ((DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE) > |
1556 | RTE_PKTMBUF_HEADROOM) { | |
1557 | DPAA_PMD_ERR( | |
1558 | "RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA Annotation req(%d)", | |
1559 | RTE_PKTMBUF_HEADROOM, | |
1560 | DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE); | |
1561 | ||
1562 | return -1; | |
1563 | } | |
1564 | ||
11fdf7f2 TL |
1565 | /* In case of secondary process, the device is already configured |
1566 | * and no further action is required, except portal initialization | |
1567 | * and verifying secondary attachment to port name. | |
1568 | */ | |
1569 | if (rte_eal_process_type() != RTE_PROC_PRIMARY) { | |
1570 | eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name); | |
1571 | if (!eth_dev) | |
1572 | return -ENOMEM; | |
1573 | eth_dev->device = &dpaa_dev->device; | |
1574 | eth_dev->dev_ops = &dpaa_devops; | |
1575 | rte_eth_dev_probing_finish(eth_dev); | |
1576 | return 0; | |
1577 | } | |
1578 | ||
f67539c2 | 1579 | if (!is_global_init && (rte_eal_process_type() == RTE_PROC_PRIMARY)) { |
11fdf7f2 | 1580 | if (access("/tmp/fmc.bin", F_OK) == -1) { |
f67539c2 | 1581 | DPAA_PMD_INFO("* FMC not configured.Enabling default mode"); |
11fdf7f2 TL |
1582 | default_q = 1; |
1583 | } | |
1584 | ||
1585 | /* disabling the default push mode for LS1043 */ | |
1586 | if (dpaa_svr_family == SVR_LS1043A_FAMILY) | |
1587 | dpaa_push_mode_max_queue = 0; | |
1588 | ||
1589 | /* if push mode queues to be enabled. Currenly we are allowing | |
1590 | * only one queue per thread. | |
1591 | */ | |
1592 | if (getenv("DPAA_PUSH_QUEUES_NUMBER")) { | |
1593 | dpaa_push_mode_max_queue = | |
1594 | atoi(getenv("DPAA_PUSH_QUEUES_NUMBER")); | |
1595 | if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE) | |
1596 | dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE; | |
1597 | } | |
1598 | ||
1599 | is_global_init = 1; | |
1600 | } | |
1601 | ||
1602 | if (unlikely(!RTE_PER_LCORE(dpaa_io))) { | |
1603 | ret = rte_dpaa_portal_init((void *)1); | |
1604 | if (ret) { | |
1605 | DPAA_PMD_ERR("Unable to initialize portal"); | |
1606 | return ret; | |
1607 | } | |
1608 | } | |
1609 | ||
f67539c2 TL |
1610 | /* In case of secondary process, the device is already configured |
1611 | * and no further action is required, except portal initialization | |
1612 | * and verifying secondary attachment to port name. | |
1613 | */ | |
1614 | if (rte_eal_process_type() != RTE_PROC_PRIMARY) { | |
1615 | eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name); | |
1616 | if (!eth_dev) | |
1617 | return -ENOMEM; | |
1618 | } else { | |
1619 | eth_dev = rte_eth_dev_allocate(dpaa_dev->name); | |
1620 | if (eth_dev == NULL) | |
1621 | return -ENOMEM; | |
11fdf7f2 | 1622 | |
f67539c2 TL |
1623 | eth_dev->data->dev_private = rte_zmalloc( |
1624 | "ethdev private structure", | |
1625 | sizeof(struct dpaa_if), | |
1626 | RTE_CACHE_LINE_SIZE); | |
1627 | if (!eth_dev->data->dev_private) { | |
1628 | DPAA_PMD_ERR("Cannot allocate memzone for port data"); | |
1629 | rte_eth_dev_release_port(eth_dev); | |
1630 | return -ENOMEM; | |
1631 | } | |
11fdf7f2 | 1632 | } |
11fdf7f2 | 1633 | eth_dev->device = &dpaa_dev->device; |
11fdf7f2 TL |
1634 | dpaa_dev->eth_dev = eth_dev; |
1635 | ||
1636 | /* Invoke PMD device initialization function */ | |
1637 | diag = dpaa_dev_init(eth_dev); | |
1638 | if (diag == 0) { | |
1639 | rte_eth_dev_probing_finish(eth_dev); | |
1640 | return 0; | |
1641 | } | |
1642 | ||
11fdf7f2 TL |
1643 | rte_eth_dev_release_port(eth_dev); |
1644 | return diag; | |
1645 | } | |
1646 | ||
1647 | static int | |
1648 | rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev) | |
1649 | { | |
1650 | struct rte_eth_dev *eth_dev; | |
1651 | ||
1652 | PMD_INIT_FUNC_TRACE(); | |
1653 | ||
1654 | eth_dev = dpaa_dev->eth_dev; | |
1655 | dpaa_dev_uninit(eth_dev); | |
1656 | ||
11fdf7f2 TL |
1657 | rte_eth_dev_release_port(eth_dev); |
1658 | ||
1659 | return 0; | |
1660 | } | |
1661 | ||
1662 | static struct rte_dpaa_driver rte_dpaa_pmd = { | |
1663 | .drv_type = FSL_DPAA_ETH, | |
1664 | .probe = rte_dpaa_probe, | |
1665 | .remove = rte_dpaa_remove, | |
1666 | }; | |
1667 | ||
1668 | RTE_PMD_REGISTER_DPAA(net_dpaa, rte_dpaa_pmd); | |
f67539c2 TL |
1669 | RTE_INIT(dpaa_net_init_log) |
1670 | { | |
1671 | dpaa_logtype_pmd = rte_log_register("pmd.net.dpaa"); | |
1672 | if (dpaa_logtype_pmd >= 0) | |
1673 | rte_log_set_level(dpaa_logtype_pmd, RTE_LOG_NOTICE); | |
1674 | } |