]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/dpdk/drivers/net/hinic/base/hinic_pmd_hwdev.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / hinic / base / hinic_pmd_hwdev.c
CommitLineData
f67539c2
TL
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Huawei Technologies Co., Ltd
3 */
4
5#include<rte_ethdev_driver.h>
6#include <rte_bus_pci.h>
7#include <rte_hash.h>
8#include <rte_jhash.h>
9
10#include "hinic_compat.h"
11#include "hinic_csr.h"
12#include "hinic_pmd_hwdev.h"
13#include "hinic_pmd_hwif.h"
14#include "hinic_pmd_wq.h"
15#include "hinic_pmd_cmdq.h"
16#include "hinic_pmd_mgmt.h"
17#include "hinic_pmd_niccfg.h"
18#include "hinic_pmd_mbox.h"
19
20#define HINIC_DEAULT_EQ_MSIX_PENDING_LIMIT 0
21#define HINIC_DEAULT_EQ_MSIX_COALESC_TIMER_CFG 0xFF
22#define HINIC_DEAULT_EQ_MSIX_RESEND_TIMER_CFG 7
23
24#define HINIC_FLR_TIMEOUT 1000
25
26#define FFM_RECORD_NUM_MAX 32
27
28#define HINIC_DMA_ATTR_ENTRY_ST_SHIFT 0
29#define HINIC_DMA_ATTR_ENTRY_AT_SHIFT 8
30#define HINIC_DMA_ATTR_ENTRY_PH_SHIFT 10
31#define HINIC_DMA_ATTR_ENTRY_NO_SNOOPING_SHIFT 12
32#define HINIC_DMA_ATTR_ENTRY_TPH_EN_SHIFT 13
33
34#define HINIC_DMA_ATTR_ENTRY_ST_MASK 0xFF
35#define HINIC_DMA_ATTR_ENTRY_AT_MASK 0x3
36#define HINIC_DMA_ATTR_ENTRY_PH_MASK 0x3
37#define HINIC_DMA_ATTR_ENTRY_NO_SNOOPING_MASK 0x1
38#define HINIC_DMA_ATTR_ENTRY_TPH_EN_MASK 0x1
39
40#define HINIC_DMA_ATTR_ENTRY_SET(val, member) \
41 (((u32)(val) & HINIC_DMA_ATTR_ENTRY_##member##_MASK) << \
42 HINIC_DMA_ATTR_ENTRY_##member##_SHIFT)
43
44#define HINIC_DMA_ATTR_ENTRY_CLEAR(val, member) \
45 ((val) & (~(HINIC_DMA_ATTR_ENTRY_##member##_MASK \
46 << HINIC_DMA_ATTR_ENTRY_##member##_SHIFT)))
47
48#define HINIC_PCIE_ST_DISABLE 0
49#define HINIC_PCIE_AT_DISABLE 0
50#define HINIC_PCIE_PH_DISABLE 0
51#define PCIE_MSIX_ATTR_ENTRY 0
52
53#define HINIC_HASH_FUNC rte_jhash
54#define HINIC_HASH_KEY_LEN (sizeof(dma_addr_t))
55#define HINIC_HASH_FUNC_INIT_VAL 0
56
57static const char *__hw_to_char_fec[HILINK_FEC_MAX_TYPE] = {
58 "RS-FEC", "BASE-FEC", "NO-FEC"};
59
60static const char *__hw_to_char_port_type[LINK_PORT_MAX_TYPE] = {
61 "Unknown", "Fibre", "Electric", "Direct Attach Copper", "AOC",
62 "Back plane", "BaseT"
63};
64
65static const char *hinic_module_link_err[LINK_ERR_NUM] = {
66 "Unrecognized module",
67};
68
69struct hinic_vf_dma_attr_table {
70 struct hinic_mgmt_msg_head mgmt_msg_head;
71
72 u16 func_idx;
73 u8 func_dma_entry_num;
74 u8 entry_idx;
75 u8 st;
76 u8 at;
77 u8 ph;
78 u8 no_snooping;
79 u8 tph_en;
80 u8 resv1[3];
81};
82
83/**
84 * hinic_cpu_to_be32 - convert data to big endian 32 bit format
85 * @data: the data to convert
86 * @len: length of data to convert, must be Multiple of 4B
87 */
88void hinic_cpu_to_be32(void *data, u32 len)
89{
90 u32 i;
91 u32 *mem = (u32 *)data;
92
93 for (i = 0; i < (len >> 2); i++) {
94 *mem = cpu_to_be32(*mem);
95 mem++;
96 }
97}
98
99/**
100 * hinic_be32_to_cpu - convert data from big endian 32 bit format
101 * @data: the data to convert
102 * @len: length of data to convert, must be Multiple of 4B
103 */
104void hinic_be32_to_cpu(void *data, u32 len)
105{
106 u32 i;
107 u32 *mem = (u32 *)data;
108
109 for (i = 0; i < (len >> 2); i++) {
110 *mem = be32_to_cpu(*mem);
111 mem++;
112 }
113}
114
115static void *hinic_dma_mem_zalloc(struct hinic_hwdev *hwdev, size_t size,
116 dma_addr_t *dma_handle, unsigned int align,
117 unsigned int socket_id)
118{
119 int rc, alloc_cnt;
120 const struct rte_memzone *mz;
121 char z_name[RTE_MEMZONE_NAMESIZE];
122 hash_sig_t sig;
123 rte_iova_t iova;
124
125 if (dma_handle == NULL || 0 == size)
126 return NULL;
127
128 alloc_cnt = rte_atomic32_add_return(&hwdev->os_dep.dma_alloc_cnt, 1);
129 snprintf(z_name, sizeof(z_name), "%s_%d",
130 hwdev->pcidev_hdl->name, alloc_cnt);
131
132 mz = rte_memzone_reserve_aligned(z_name, size, socket_id,
133 RTE_MEMZONE_IOVA_CONTIG, align);
134 if (!mz) {
135 PMD_DRV_LOG(ERR, "Alloc dma able memory failed, errno: %d, ma_name: %s, size: 0x%zx",
136 rte_errno, z_name, size);
137 return NULL;
138 }
139
140 iova = mz->iova;
141
142 /* check if phys_addr already exist */
143 sig = HINIC_HASH_FUNC(&iova, HINIC_HASH_KEY_LEN,
144 HINIC_HASH_FUNC_INIT_VAL);
145 rc = rte_hash_lookup_with_hash(hwdev->os_dep.dma_addr_hash,
146 &iova, sig);
147 if (rc >= 0) {
148 PMD_DRV_LOG(ERR, "Dma addr: %p already in hash table, error: %d, mz_name: %s",
149 (void *)iova, rc, z_name);
150 goto phys_addr_hash_err;
151 }
152
153 /* record paddr in hash table */
154 rte_spinlock_lock(&hwdev->os_dep.dma_hash_lock);
155 rc = rte_hash_add_key_with_hash_data(hwdev->os_dep.dma_addr_hash,
156 &iova, sig,
157 (void *)(u64)mz);
158 rte_spinlock_unlock(&hwdev->os_dep.dma_hash_lock);
159 if (rc) {
160 PMD_DRV_LOG(ERR, "Insert dma addr: %p hash failed, error: %d, mz_name: %s",
161 (void *)iova, rc, z_name);
162 goto phys_addr_hash_err;
163 }
164 *dma_handle = iova;
165 memset(mz->addr, 0, size);
166
167 return mz->addr;
168
169phys_addr_hash_err:
170 (void)rte_memzone_free(mz);
171
172 return NULL;
173}
174
175static void
176hinic_dma_mem_free(struct hinic_hwdev *hwdev, size_t size,
177 void *virt, dma_addr_t phys)
178{
179 int rc;
180 struct rte_memzone *mz = NULL;
181 struct rte_hash *hash;
182 hash_sig_t sig;
183
184 if (virt == NULL || phys == 0)
185 return;
186
187 hash = hwdev->os_dep.dma_addr_hash;
188 sig = HINIC_HASH_FUNC(&phys, HINIC_HASH_KEY_LEN,
189 HINIC_HASH_FUNC_INIT_VAL);
190 rc = rte_hash_lookup_with_hash_data(hash, &phys, sig, (void **)&mz);
191 if (rc < 0) {
192 PMD_DRV_LOG(ERR, "Can not find phys_addr: %p, error: %d",
193 (void *)phys, rc);
194 return;
195 }
196
197 if (virt != mz->addr || size > mz->len) {
198 PMD_DRV_LOG(ERR, "Match mz_info failed: "
199 "mz.name: %s, mz.phys: %p, mz.virt: %p, mz.len: %zu, "
200 "phys: %p, virt: %p, size: %zu",
201 mz->name, (void *)mz->iova, mz->addr, mz->len,
202 (void *)phys, virt, size);
203 }
204
205 rte_spinlock_lock(&hwdev->os_dep.dma_hash_lock);
206 (void)rte_hash_del_key_with_hash(hash, &phys, sig);
207 rte_spinlock_unlock(&hwdev->os_dep.dma_hash_lock);
208
209 (void)rte_memzone_free(mz);
210}
211
212void *dma_zalloc_coherent(void *hwdev, size_t size, dma_addr_t *dma_handle,
213 unsigned int socket_id)
214{
215 return hinic_dma_mem_zalloc(hwdev, size, dma_handle,
216 RTE_CACHE_LINE_SIZE, socket_id);
217}
218
219void *dma_zalloc_coherent_aligned(void *hwdev, size_t size,
220 dma_addr_t *dma_handle, unsigned int socket_id)
221{
222 return hinic_dma_mem_zalloc(hwdev, size, dma_handle, HINIC_PAGE_SIZE,
223 socket_id);
224}
225
226void *dma_zalloc_coherent_aligned256k(void *hwdev, size_t size,
227 dma_addr_t *dma_handle,
228 unsigned int socket_id)
229{
230 return hinic_dma_mem_zalloc(hwdev, size, dma_handle,
231 HINIC_PAGE_SIZE * 64, socket_id);
232}
233
234void dma_free_coherent(void *hwdev, size_t size, void *virt, dma_addr_t phys)
235{
236 hinic_dma_mem_free(hwdev, size, virt, phys);
237}
238
239void dma_free_coherent_volatile(void *hwdev, size_t size,
240 volatile void *virt, dma_addr_t phys)
241{
242 int rc;
243 struct rte_memzone *mz = NULL;
244 struct hinic_hwdev *dev = hwdev;
245 struct rte_hash *hash;
246 hash_sig_t sig;
247
248 if (virt == NULL || phys == 0)
249 return;
250
251 hash = dev->os_dep.dma_addr_hash;
252 sig = HINIC_HASH_FUNC(&phys, HINIC_HASH_KEY_LEN,
253 HINIC_HASH_FUNC_INIT_VAL);
254 rc = rte_hash_lookup_with_hash_data(hash, &phys, sig, (void **)&mz);
255 if (rc < 0) {
256 PMD_DRV_LOG(ERR, "Can not find phys_addr: %p, error: %d",
257 (void *)phys, rc);
258 return;
259 }
260
261 if (virt != mz->addr || size > mz->len) {
262 PMD_DRV_LOG(ERR, "Match mz_info failed: "
263 "mz.name:%s, mz.phys:%p, mz.virt:%p, mz.len:%zu, "
264 "phys:%p, virt:%p, size:%zu",
265 mz->name, (void *)mz->iova, mz->addr, mz->len,
266 (void *)phys, virt, size);
267 }
268
269 rte_spinlock_lock(&dev->os_dep.dma_hash_lock);
270 (void)rte_hash_del_key_with_hash(hash, &phys, sig);
271 rte_spinlock_unlock(&dev->os_dep.dma_hash_lock);
272
273 (void)rte_memzone_free(mz);
274}
275
276struct dma_pool *dma_pool_create(const char *name, void *dev,
277 size_t size, size_t align, size_t boundary)
278{
279 struct pci_pool *pool;
280
281 pool = rte_zmalloc(NULL, sizeof(*pool), HINIC_MEM_ALLOC_ALIGN_MIN);
282 if (!pool)
283 return NULL;
284
285 rte_atomic32_set(&pool->inuse, 0);
286 pool->elem_size = size;
287 pool->align = align;
288 pool->boundary = boundary;
289 pool->hwdev = dev;
290 strncpy(pool->name, name, (sizeof(pool->name) - 1));
291
292 return pool;
293}
294
295void dma_pool_destroy(struct dma_pool *pool)
296{
297 if (!pool)
298 return;
299
300 if (rte_atomic32_read(&pool->inuse) != 0) {
301 PMD_DRV_LOG(ERR, "Leak memory, dma_pool: %s, inuse_count: %d",
302 pool->name, rte_atomic32_read(&pool->inuse));
303 }
304
305 rte_free(pool);
306}
307
308void *dma_pool_alloc(struct pci_pool *pool, dma_addr_t *dma_addr)
309{
310 void *buf;
311
312 buf = hinic_dma_mem_zalloc(pool->hwdev, pool->elem_size, dma_addr,
313 (u32)pool->align, SOCKET_ID_ANY);
314 if (buf)
315 rte_atomic32_inc(&pool->inuse);
316
317 return buf;
318}
319
320void dma_pool_free(struct pci_pool *pool, void *vaddr, dma_addr_t dma)
321{
322 rte_atomic32_dec(&pool->inuse);
323 hinic_dma_mem_free(pool->hwdev, pool->elem_size, vaddr, dma);
324}
325
326#define HINIC_MAX_DMA_ENTRIES 8192
327int hinic_osdep_init(struct hinic_hwdev *hwdev)
328{
329 struct rte_hash_parameters dh_params = { 0 };
330 struct rte_hash *paddr_hash = NULL;
331
332 rte_atomic32_set(&hwdev->os_dep.dma_alloc_cnt, 0);
333 rte_spinlock_init(&hwdev->os_dep.dma_hash_lock);
334
335 dh_params.name = hwdev->pcidev_hdl->name;
336 dh_params.entries = HINIC_MAX_DMA_ENTRIES;
337 dh_params.key_len = HINIC_HASH_KEY_LEN;
338 dh_params.hash_func = HINIC_HASH_FUNC;
339 dh_params.hash_func_init_val = HINIC_HASH_FUNC_INIT_VAL;
340 dh_params.socket_id = SOCKET_ID_ANY;
341
342 paddr_hash = rte_hash_find_existing(dh_params.name);
343 if (paddr_hash == NULL) {
344 paddr_hash = rte_hash_create(&dh_params);
345 if (paddr_hash == NULL) {
346 PMD_DRV_LOG(ERR, "Create nic_dev phys_addr hash table failed");
347 return -ENOMEM;
348 }
349 } else {
350 PMD_DRV_LOG(INFO, "Using existing dma hash table %s",
351 dh_params.name);
352 }
353 hwdev->os_dep.dma_addr_hash = paddr_hash;
354
355 return 0;
356}
357
358void hinic_osdep_deinit(struct hinic_hwdev *hwdev)
359{
360 uint32_t iter = 0;
361 dma_addr_t key_pa;
362 struct rte_memzone *data_mz = NULL;
363 struct rte_hash *paddr_hash = hwdev->os_dep.dma_addr_hash;
364
365 if (paddr_hash) {
366 /* iterate through the hash table */
367 while (rte_hash_iterate(paddr_hash, (const void **)&key_pa,
368 (void **)&data_mz, &iter) >= 0) {
369 if (data_mz) {
370 PMD_DRV_LOG(WARNING, "Free leaked dma_addr: %p, mz: %s",
371 (void *)key_pa, data_mz->name);
372 (void)rte_memzone_free(data_mz);
373 }
374 }
375
376 /* free phys_addr hash table */
377 rte_hash_free(paddr_hash);
378 }
379}
380
381/**
382 * hinic_set_ci_table - set ci attribute table
383 * @hwdev: the hardware interface of a nic device
384 * @q_id: Queue id of SQ
385 * @attr: Point to SQ CI attribute table
386 * @return
387 * 0 on success and ci attribute table is filled,
388 * negative error value otherwise.
389 */
390int hinic_set_ci_table(void *hwdev, u16 q_id, struct hinic_sq_attr *attr)
391{
392 struct hinic_cons_idx_attr cons_idx_attr;
393
394 memset(&cons_idx_attr, 0, sizeof(cons_idx_attr));
395 cons_idx_attr.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
396 cons_idx_attr.func_idx = hinic_global_func_id(hwdev);
397 cons_idx_attr.dma_attr_off = attr->dma_attr_off;
398 cons_idx_attr.pending_limit = attr->pending_limit;
399 cons_idx_attr.coalescing_time = attr->coalescing_time;
400 if (attr->intr_en) {
401 cons_idx_attr.intr_en = attr->intr_en;
402 cons_idx_attr.intr_idx = attr->intr_idx;
403 }
404
405 cons_idx_attr.l2nic_sqn = attr->l2nic_sqn;
406 cons_idx_attr.sq_id = q_id;
407 cons_idx_attr.ci_addr = attr->ci_dma_base;
408
409 return hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
410 HINIC_MGMT_CMD_L2NIC_SQ_CI_ATTR_SET,
411 &cons_idx_attr, sizeof(cons_idx_attr),
412 NULL, NULL, 0);
413}
414
415/**
416 * hinic_set_pagesize - set page size to vat table
417 * @hwdev: the hardware interface of a nic device
418 * @page_size: vat page size
419 * @return
420 * 0 on success,
421 * negative error value otherwise.
422 */
423int hinic_set_pagesize(void *hwdev, u8 page_size)
424{
425 struct hinic_page_size cmd;
426
427 if (page_size > HINIC_PAGE_SIZE_MAX) {
428 PMD_DRV_LOG(ERR, "Invalid page_size %u, bigger than %u",
429 page_size, HINIC_PAGE_SIZE_MAX);
430 return -EINVAL;
431 }
432
433 memset(&cmd, 0, sizeof(cmd));
434 cmd.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
435 cmd.func_idx = hinic_global_func_id(hwdev);
436 cmd.ppf_idx = hinic_ppf_idx(hwdev);
437 cmd.page_size = page_size;
438
439 return hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
440 HINIC_MGMT_CMD_PAGESIZE_SET,
441 &cmd, sizeof(cmd),
442 NULL, NULL, 0);
443}
444
445static int wait_for_flr_finish(struct hinic_hwif *hwif)
446{
447 unsigned long end;
448 enum hinic_pf_status status;
449
450 end = jiffies + msecs_to_jiffies(HINIC_FLR_TIMEOUT);
451 do {
452 status = hinic_get_pf_status(hwif);
453 if (status == HINIC_PF_STATUS_FLR_FINISH_FLAG) {
454 return 0;
455 }
456
457 rte_delay_ms(10);
458 } while (time_before(jiffies, end));
459
460 return -EFAULT;
461}
462
463#define HINIC_WAIT_CMDQ_IDLE_TIMEOUT 1000
464
465static int wait_cmdq_stop(struct hinic_hwdev *hwdev)
466{
467 enum hinic_cmdq_type cmdq_type;
468 struct hinic_cmdqs *cmdqs = hwdev->cmdqs;
469 unsigned long end;
470 int err = 0;
471
472 if (!(cmdqs->status & HINIC_CMDQ_ENABLE))
473 return 0;
474
475 cmdqs->status &= ~HINIC_CMDQ_ENABLE;
476
477 end = jiffies + msecs_to_jiffies(HINIC_WAIT_CMDQ_IDLE_TIMEOUT);
478 do {
479 err = 0;
480 cmdq_type = HINIC_CMDQ_SYNC;
481 for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
482 if (!hinic_cmdq_idle(&cmdqs->cmdq[cmdq_type])) {
483 err = -EBUSY;
484 break;
485 }
486 }
487
488 if (!err)
489 return 0;
490
491 rte_delay_ms(1);
492 } while (time_before(jiffies, end));
493
494 cmdqs->status |= HINIC_CMDQ_ENABLE;
495
496 return err;
497}
498
499static int hinic_vf_rx_tx_flush(struct hinic_hwdev *hwdev)
500{
501 struct hinic_clear_resource clr_res;
502 int err;
503
504 err = wait_cmdq_stop(hwdev);
505 if (err) {
506 PMD_DRV_LOG(WARNING, "Cmdq is still working");
507 return err;
508 }
509
510 memset(&clr_res, 0, sizeof(clr_res));
511 clr_res.func_idx = HINIC_HWIF_GLOBAL_IDX(hwdev->hwif);
512 clr_res.ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif);
513 err = hinic_mbox_to_pf_no_ack(hwdev, HINIC_MOD_COMM,
514 HINIC_MGMT_CMD_START_FLR, &clr_res, sizeof(clr_res));
515 if (err)
516 PMD_DRV_LOG(WARNING, "Notice flush message failed");
517
518 /*
519 * PF firstly set VF doorbell flush csr to be disabled. After PF finish
520 * VF resources flush, PF will set VF doorbell flush csr to be enabled.
521 */
522 err = wait_until_doorbell_flush_states(hwdev->hwif, DISABLE_DOORBELL);
523 if (err)
524 PMD_DRV_LOG(WARNING, "Wait doorbell flush disable timeout");
525
526 err = wait_until_doorbell_flush_states(hwdev->hwif, ENABLE_DOORBELL);
527 if (err)
528 PMD_DRV_LOG(WARNING, "Wait doorbell flush enable timeout");
529
530 err = hinic_reinit_cmdq_ctxts(hwdev);
531 if (err)
532 PMD_DRV_LOG(WARNING, "Reinit cmdq failed when vf flush");
533
534 return err;
535}
536
537/**
538 * hinic_pf_rx_tx_flush - clean up hardware resource
539 * @hwdev: the hardware interface of a nic device
540 * @return
541 * 0 on success,
542 * negative error value otherwise.
543 */
544static int hinic_pf_rx_tx_flush(struct hinic_hwdev *hwdev)
545{
546 struct hinic_hwif *hwif = hwdev->hwif;
547 struct hinic_clear_doorbell clear_db;
548 struct hinic_clear_resource clr_res;
549 int err;
550
551 rte_delay_ms(100);
552
553 err = wait_cmdq_stop(hwdev);
554 if (err) {
555 PMD_DRV_LOG(ERR, "Cmdq is still working");
556 return err;
557 }
558
559 hinic_disable_doorbell(hwif);
560 memset(&clear_db, 0, sizeof(clear_db));
561 clear_db.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
562 clear_db.func_idx = HINIC_HWIF_GLOBAL_IDX(hwif);
563 clear_db.ppf_idx = HINIC_HWIF_PPF_IDX(hwif);
564 err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
565 HINIC_MGMT_CMD_FLUSH_DOORBELL, &clear_db,
566 sizeof(clear_db), NULL, NULL, 0);
567 if (err)
568 PMD_DRV_LOG(WARNING, "Flush doorbell failed");
569
570 hinic_set_pf_status(hwif, HINIC_PF_STATUS_FLR_START_FLAG);
571 memset(&clr_res, 0, sizeof(clr_res));
572 clr_res.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
573 clr_res.func_idx = HINIC_HWIF_GLOBAL_IDX(hwif);
574 clr_res.ppf_idx = HINIC_HWIF_PPF_IDX(hwif);
575
576 err = hinic_msg_to_mgmt_no_ack(hwdev, HINIC_MOD_COMM,
577 HINIC_MGMT_CMD_START_FLR, &clr_res,
578 sizeof(clr_res), NULL, NULL);
579 if (err)
580 PMD_DRV_LOG(WARNING, "Notice flush message failed");
581
582 err = wait_for_flr_finish(hwif);
583 if (err)
584 PMD_DRV_LOG(WARNING, "Wait firmware FLR timeout");
585
586 hinic_enable_doorbell(hwif);
587
588 err = hinic_reinit_cmdq_ctxts(hwdev);
589 if (err)
590 PMD_DRV_LOG(WARNING, "Reinit cmdq failed when pf flush");
591
592 return 0;
593}
594
595int hinic_func_rx_tx_flush(struct hinic_hwdev *hwdev)
596{
597 if (HINIC_FUNC_TYPE(hwdev) == TYPE_VF)
598 return hinic_vf_rx_tx_flush(hwdev);
599 else
600 return hinic_pf_rx_tx_flush(hwdev);
601}
602
603/**
604 * hinic_get_interrupt_cfg - get interrupt configuration from NIC
605 * @hwdev: the hardware interface of a nic device
606 * @interrupt_info: Information of Interrupt aggregation
607 * Return: 0 on success, negative error value otherwise.
608 */
609static int hinic_get_interrupt_cfg(struct hinic_hwdev *hwdev,
610 struct nic_interrupt_info *interrupt_info)
611{
612 struct hinic_msix_config msix_cfg;
613 u16 out_size = sizeof(msix_cfg);
614 int err;
615
616 memset(&msix_cfg, 0, sizeof(msix_cfg));
617 msix_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
618 msix_cfg.func_id = hinic_global_func_id(hwdev);
619 msix_cfg.msix_index = interrupt_info->msix_index;
620
621 err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
622 HINIC_MGMT_CMD_MSI_CTRL_REG_RD_BY_UP,
623 &msix_cfg, sizeof(msix_cfg),
624 &msix_cfg, &out_size, 0);
625 if (err || !out_size || msix_cfg.mgmt_msg_head.status) {
626 PMD_DRV_LOG(ERR, "Get interrupt config failed, ret: %d",
627 msix_cfg.mgmt_msg_head.status);
628 return -EINVAL;
629 }
630
631 interrupt_info->lli_credit_limit = msix_cfg.lli_credit_cnt;
632 interrupt_info->lli_timer_cfg = msix_cfg.lli_tmier_cnt;
633 interrupt_info->pending_limt = msix_cfg.pending_cnt;
634 interrupt_info->coalesc_timer_cfg = msix_cfg.coalesct_timer_cnt;
635 interrupt_info->resend_timer_cfg = msix_cfg.resend_timer_cnt;
636 return 0;
637}
638
639/**
640 * hinic_set_interrupt_cfg - set interrupt configuration to NIC
641 * @hwdev: the hardware interface of a nic device
642 * @interrupt_info: Information of Interrupt aggregation
643 * Return: 0 on success, negative error value otherwise.
644 */
645int hinic_set_interrupt_cfg(struct hinic_hwdev *hwdev,
646 struct nic_interrupt_info interrupt_info)
647{
648 struct hinic_msix_config msix_cfg;
649 struct nic_interrupt_info temp_info;
650 u16 out_size = sizeof(msix_cfg);
651 int err;
652
653 memset(&msix_cfg, 0, sizeof(msix_cfg));
654 msix_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
655 msix_cfg.func_id = hinic_global_func_id(hwdev);
656 msix_cfg.msix_index = (u16)interrupt_info.msix_index;
657
658 temp_info.msix_index = interrupt_info.msix_index;
659
660 err = hinic_get_interrupt_cfg(hwdev, &temp_info);
661 if (err)
662 return -EINVAL;
663
664 msix_cfg.lli_credit_cnt = temp_info.lli_credit_limit;
665 msix_cfg.lli_tmier_cnt = temp_info.lli_timer_cfg;
666 msix_cfg.pending_cnt = temp_info.pending_limt;
667 msix_cfg.coalesct_timer_cnt = temp_info.coalesc_timer_cfg;
668 msix_cfg.resend_timer_cnt = temp_info.resend_timer_cfg;
669
670 if (interrupt_info.lli_set) {
671 msix_cfg.lli_credit_cnt = interrupt_info.lli_credit_limit;
672 msix_cfg.lli_tmier_cnt = interrupt_info.lli_timer_cfg;
673 }
674
675 if (interrupt_info.interrupt_coalesc_set) {
676 msix_cfg.pending_cnt = interrupt_info.pending_limt;
677 msix_cfg.coalesct_timer_cnt = interrupt_info.coalesc_timer_cfg;
678 msix_cfg.resend_timer_cnt = interrupt_info.resend_timer_cfg;
679 }
680
681 err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
682 HINIC_MGMT_CMD_MSI_CTRL_REG_WR_BY_UP,
683 &msix_cfg, sizeof(msix_cfg),
684 &msix_cfg, &out_size, 0);
685 if (err || !out_size || msix_cfg.mgmt_msg_head.status) {
686 PMD_DRV_LOG(ERR, "Set interrupt config failed, ret: %d",
687 msix_cfg.mgmt_msg_head.status);
688 return -EINVAL;
689 }
690
691 return 0;
692}
693
694/**
695 * init_aeqs_msix_attr - Init interrupt attributes of aeq
696 * @hwdev: the hardware interface of a nic device
697 * @return
698 * 0 on success,
699 * negative error value otherwise.
700 */
701int init_aeqs_msix_attr(void *hwdev)
702{
703 struct hinic_hwdev *nic_hwdev = hwdev;
704 struct hinic_aeqs *aeqs = nic_hwdev->aeqs;
705 struct nic_interrupt_info info = {0};
706 struct hinic_eq *eq;
707 u16 q_id;
708 int err;
709
710 info.lli_set = 0;
711 info.interrupt_coalesc_set = 1;
712 info.pending_limt = HINIC_DEAULT_EQ_MSIX_PENDING_LIMIT;
713 info.coalesc_timer_cfg = HINIC_DEAULT_EQ_MSIX_COALESC_TIMER_CFG;
714 info.resend_timer_cfg = HINIC_DEAULT_EQ_MSIX_RESEND_TIMER_CFG;
715
716 for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) {
717 eq = &aeqs->aeq[q_id];
718 info.msix_index = eq->eq_irq.msix_entry_idx;
719 err = hinic_set_interrupt_cfg(hwdev, info);
720 if (err) {
721 PMD_DRV_LOG(ERR, "Set msix attr for aeq %d failed",
722 q_id);
723 return -EFAULT;
724 }
725 }
726
727 return 0;
728}
729
730/**
731 * set_pf_dma_attr_entry - set the dma attributes for entry
732 * @hwdev: the pointer to the private hardware device object
733 * @entry_idx: the entry index in the dma table
734 * @st: PCIE TLP steering tag
735 * @at: PCIE TLP AT field
736 * @ph: PCIE TLP Processing Hint field
737 * @no_snooping: PCIE TLP No snooping
738 * @tph_en: PCIE TLP Processing Hint Enable
739 */
740static void set_pf_dma_attr_entry(struct hinic_hwdev *hwdev, u32 entry_idx,
741 u8 st, u8 at, u8 ph,
742 enum hinic_pcie_nosnoop no_snooping,
743 enum hinic_pcie_tph tph_en)
744{
745 u32 addr, val, dma_attr_entry;
746
747 /* Read Modify Write */
748 addr = HINIC_CSR_DMA_ATTR_TBL_ADDR(entry_idx);
749
750 val = hinic_hwif_read_reg(hwdev->hwif, addr);
751 val = HINIC_DMA_ATTR_ENTRY_CLEAR(val, ST) &
752 HINIC_DMA_ATTR_ENTRY_CLEAR(val, AT) &
753 HINIC_DMA_ATTR_ENTRY_CLEAR(val, PH) &
754 HINIC_DMA_ATTR_ENTRY_CLEAR(val, NO_SNOOPING) &
755 HINIC_DMA_ATTR_ENTRY_CLEAR(val, TPH_EN);
756
757 dma_attr_entry = HINIC_DMA_ATTR_ENTRY_SET(st, ST) |
758 HINIC_DMA_ATTR_ENTRY_SET(at, AT) |
759 HINIC_DMA_ATTR_ENTRY_SET(ph, PH) |
760 HINIC_DMA_ATTR_ENTRY_SET(no_snooping, NO_SNOOPING) |
761 HINIC_DMA_ATTR_ENTRY_SET(tph_en, TPH_EN);
762
763 val |= dma_attr_entry;
764 hinic_hwif_write_reg(hwdev->hwif, addr, val);
765}
766
767static int set_vf_dma_attr_entry(struct hinic_hwdev *hwdev, u8 entry_idx,
768 u8 st, u8 at, u8 ph,
769 enum hinic_pcie_nosnoop no_snooping,
770 enum hinic_pcie_tph tph_en)
771{
772 struct hinic_vf_dma_attr_table attr;
773
774 memset(&attr, 0, sizeof(attr));
775 attr.func_idx = hinic_global_func_id(hwdev);
776 attr.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
777 attr.func_dma_entry_num = hinic_dma_attr_entry_num(hwdev);
778 attr.entry_idx = entry_idx;
779 attr.st = st;
780 attr.at = at;
781 attr.ph = ph;
782 attr.no_snooping = no_snooping;
783 attr.tph_en = tph_en;
784
785 return hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
786 HINIC_MGMT_CMD_DMA_ATTR_SET,
787 &attr, sizeof(attr), NULL, NULL, 0);
788}
789
790/**
791 * dma_attr_table_init - initialize the the default dma attributes
792 * @hwdev: the pointer to the private hardware device object
793 */
794static int dma_attr_table_init(struct hinic_hwdev *hwdev)
795{
796 int err = 0;
797
798 if (HINIC_IS_VF(hwdev))
799 err = set_vf_dma_attr_entry(hwdev, PCIE_MSIX_ATTR_ENTRY,
800 HINIC_PCIE_ST_DISABLE, HINIC_PCIE_AT_DISABLE,
801 HINIC_PCIE_PH_DISABLE, HINIC_PCIE_SNOOP,
802 HINIC_PCIE_TPH_DISABLE);
803 else
804 set_pf_dma_attr_entry(hwdev, PCIE_MSIX_ATTR_ENTRY,
805 HINIC_PCIE_ST_DISABLE, HINIC_PCIE_AT_DISABLE,
806 HINIC_PCIE_PH_DISABLE, HINIC_PCIE_SNOOP,
807 HINIC_PCIE_TPH_DISABLE);
808
809 return err;
810}
811
812/**
813 * hinic_init_attr_table - init dma and aeq msix attribute table
814 * @hwdev: the pointer to the private hardware device object
815 */
816int hinic_init_attr_table(struct hinic_hwdev *hwdev)
817{
818 int err;
819
820 err = dma_attr_table_init(hwdev);
821 if (err) {
822 PMD_DRV_LOG(ERR, "Initialize dma attribute table failed, err: %d",
823 err);
824 return err;
825 }
826
827 err = init_aeqs_msix_attr(hwdev);
828 if (err) {
829 PMD_DRV_LOG(ERR, "Initialize aeqs msix attribute failed, err: %d",
830 err);
831 return err;
832 }
833
834 return 0;
835}
836
837#define FAULT_SHOW_STR_LEN 16
838static void fault_report_show(struct hinic_hwdev *hwdev,
839 struct hinic_fault_event *event)
840{
841 char fault_type[FAULT_TYPE_MAX][FAULT_SHOW_STR_LEN + 1] = {
842 "chip", "ucode", "mem rd timeout", "mem wr timeout",
843 "reg rd timeout", "reg wr timeout"};
844 char fault_level[FAULT_LEVEL_MAX][FAULT_SHOW_STR_LEN + 1] = {
845 "fatal", "reset", "flr", "general", "suggestion"};
846 char type_str[FAULT_SHOW_STR_LEN + 1] = { 0 };
847 char level_str[FAULT_SHOW_STR_LEN + 1] = { 0 };
848 u8 err_level;
849
850 PMD_DRV_LOG(WARNING, "Fault event report received, func_id: %d",
851 hinic_global_func_id(hwdev));
852
853 if (event->type < FAULT_TYPE_MAX)
854 strncpy(type_str, fault_type[event->type], FAULT_SHOW_STR_LEN);
855 else
856 strncpy(type_str, "unknown", FAULT_SHOW_STR_LEN);
857 PMD_DRV_LOG(WARNING, "fault type: %d [%s]",
858 event->type, type_str);
859 PMD_DRV_LOG(WARNING, "fault val[0]: 0x%08x",
860 event->event.val[0]);
861 PMD_DRV_LOG(WARNING, "fault val[1]: 0x%08x",
862 event->event.val[1]);
863 PMD_DRV_LOG(WARNING, "fault val[2]: 0x%08x",
864 event->event.val[2]);
865 PMD_DRV_LOG(WARNING, "fault val[3]: 0x%08x",
866 event->event.val[3]);
867
868 switch (event->type) {
869 case FAULT_TYPE_CHIP:
870 err_level = event->event.chip.err_level;
871 if (err_level < FAULT_LEVEL_MAX)
872 strncpy(level_str, fault_level[err_level],
873 FAULT_SHOW_STR_LEN);
874 else
875 strncpy(level_str, "unknown",
876 FAULT_SHOW_STR_LEN);
877
878 PMD_DRV_LOG(WARNING, "err_level: %d [%s]",
879 err_level, level_str);
880
881 if (err_level == FAULT_LEVEL_SERIOUS_FLR) {
882 PMD_DRV_LOG(WARNING, "flr func_id: %d",
883 event->event.chip.func_id);
884 } else {
885 PMD_DRV_LOG(WARNING, "node_id: %d",
886 event->event.chip.node_id);
887 PMD_DRV_LOG(WARNING, "err_type: %d",
888 event->event.chip.err_type);
889 PMD_DRV_LOG(WARNING, "err_csr_addr: %d",
890 event->event.chip.err_csr_addr);
891 PMD_DRV_LOG(WARNING, "err_csr_value: %d",
892 event->event.chip.err_csr_value);
893 }
894 break;
895 case FAULT_TYPE_UCODE:
896 PMD_DRV_LOG(WARNING, "cause_id: %d",
897 event->event.ucode.cause_id);
898 PMD_DRV_LOG(WARNING, "core_id: %d",
899 event->event.ucode.core_id);
900 PMD_DRV_LOG(WARNING, "c_id: %d",
901 event->event.ucode.c_id);
902 PMD_DRV_LOG(WARNING, "epc: %d",
903 event->event.ucode.epc);
904 break;
905 case FAULT_TYPE_MEM_RD_TIMEOUT:
906 case FAULT_TYPE_MEM_WR_TIMEOUT:
907 PMD_DRV_LOG(WARNING, "err_csr_ctrl: %d",
908 event->event.mem_timeout.err_csr_ctrl);
909 PMD_DRV_LOG(WARNING, "err_csr_data: %d",
910 event->event.mem_timeout.err_csr_data);
911 PMD_DRV_LOG(WARNING, "ctrl_tab: %d",
912 event->event.mem_timeout.ctrl_tab);
913 PMD_DRV_LOG(WARNING, "mem_index: %d",
914 event->event.mem_timeout.mem_index);
915 break;
916 case FAULT_TYPE_REG_RD_TIMEOUT:
917 case FAULT_TYPE_REG_WR_TIMEOUT:
918 PMD_DRV_LOG(WARNING, "err_csr: %d",
919 event->event.reg_timeout.err_csr);
920 break;
921 default:
922 break;
923 }
924}
925
926static int resources_state_set(struct hinic_hwdev *hwdev,
927 enum hinic_res_state state)
928{
929 struct hinic_hwif *hwif = hwdev->hwif;
930 struct hinic_cmd_set_res_state res_state;
931
932 memset(&res_state, 0, sizeof(res_state));
933 res_state.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
934 res_state.func_idx = HINIC_HWIF_GLOBAL_IDX(hwif);
935 res_state.state = state;
936
937 return hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
938 HINIC_MGMT_CMD_RES_STATE_SET,
939 &res_state, sizeof(res_state), NULL, NULL, 0);
940}
941
942/**
943 * hinic_activate_hwdev_state - Active host nic state and notify mgmt channel
944 * that host nic is ready.
945 * @hwdev: the hardware interface of a nic device
946 * @return
947 * 0 on success,
948 * negative error value otherwise.
949 */
950int hinic_activate_hwdev_state(struct hinic_hwdev *hwdev)
951{
952 int rc = HINIC_OK;
953
954 if (!hwdev)
955 return -EINVAL;
956
957 hinic_set_pf_status(hwdev->hwif, HINIC_PF_STATUS_ACTIVE_FLAG);
958
959 rc = resources_state_set(hwdev, HINIC_RES_ACTIVE);
960 if (rc) {
961 PMD_DRV_LOG(ERR, "Initialize resources state failed");
962 return rc;
963 }
964
965 return 0;
966}
967
968/**
969 * hinic_deactivate_hwdev_state - Deactivate host nic state and notify mgmt
970 * channel that host nic is not ready.
971 * @hwdev: the pointer to the private hardware device object
972 */
973void hinic_deactivate_hwdev_state(struct hinic_hwdev *hwdev)
974{
975 int rc = HINIC_OK;
976
977 if (!hwdev)
978 return;
979
980 rc = resources_state_set(hwdev, HINIC_RES_CLEAN);
981 if (rc)
982 PMD_DRV_LOG(ERR, "Deinit resources state failed");
983
984 hinic_set_pf_status(hwdev->hwif, HINIC_PF_STATUS_INIT);
985}
986
987int hinic_get_board_info(void *hwdev, struct hinic_board_info *info)
988{
989 struct hinic_comm_board_info board_info;
990 u16 out_size = sizeof(board_info);
991 int err;
992
993 if (!hwdev || !info)
994 return -EINVAL;
995
996 memset(&board_info, 0, sizeof(board_info));
997 board_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
998 err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
999 HINIC_MGMT_CMD_GET_BOARD_INFO,
1000 &board_info, sizeof(board_info),
1001 &board_info, &out_size, 0);
1002 if (err || board_info.mgmt_msg_head.status || !out_size) {
1003 PMD_DRV_LOG(ERR, "Failed to get board info, err: %d, status: 0x%x, out size: 0x%x",
1004 err, board_info.mgmt_msg_head.status, out_size);
1005 return -EFAULT;
1006 }
1007
1008 memcpy(info, &board_info.info, sizeof(*info));
1009 return 0;
1010}
1011
1012/**
1013 * hinic_l2nic_reset - Restore the initial state of NIC
1014 * @hwdev: the hardware interface of a nic device
1015 * @return
1016 * 0 on success,
1017 * negative error value otherwise.
1018 */
1019int hinic_l2nic_reset(struct hinic_hwdev *hwdev)
1020{
1021 struct hinic_hwif *hwif = hwdev->hwif;
1022 struct hinic_l2nic_reset l2nic_reset;
1023 int err = 0;
1024
1025 err = hinic_set_vport_enable(hwdev, false);
1026 if (err) {
1027 PMD_DRV_LOG(ERR, "Set vport disable failed");
1028 return err;
1029 }
1030
1031 rte_delay_ms(100);
1032
1033 memset(&l2nic_reset, 0, sizeof(l2nic_reset));
1034 l2nic_reset.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
1035 l2nic_reset.func_id = HINIC_HWIF_GLOBAL_IDX(hwif);
1036 err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
1037 HINIC_MGMT_CMD_L2NIC_RESET,
1038 &l2nic_reset, sizeof(l2nic_reset),
1039 NULL, NULL, 0);
1040 if (err || l2nic_reset.mgmt_msg_head.status) {
1041 PMD_DRV_LOG(ERR, "Reset L2NIC resources failed");
1042 return -EFAULT;
1043 }
1044
1045 return 0;
1046}
1047
1048static void
1049hinic_show_sw_watchdog_timeout_info(void *buf_in, u16 in_size,
1050 void *buf_out, u16 *out_size)
1051{
1052 struct hinic_mgmt_watchdog_info *watchdog_info;
1053 u32 *dump_addr, *reg, stack_len, i, j;
1054
1055 if (in_size != sizeof(*watchdog_info)) {
1056 PMD_DRV_LOG(ERR, "Invalid mgmt watchdog report, length: %d, should be %zu",
1057 in_size, sizeof(*watchdog_info));
1058 return;
1059 }
1060
1061 watchdog_info = (struct hinic_mgmt_watchdog_info *)buf_in;
1062
1063 PMD_DRV_LOG(ERR, "Mgmt deadloop time: 0x%x 0x%x, task id: 0x%x, sp: 0x%x",
1064 watchdog_info->curr_time_h, watchdog_info->curr_time_l,
1065 watchdog_info->task_id, watchdog_info->sp);
1066 PMD_DRV_LOG(ERR, "Stack current used: 0x%x, peak used: 0x%x, overflow flag: 0x%x, top: 0x%x, bottom: 0x%x",
1067 watchdog_info->curr_used, watchdog_info->peak_used,
1068 watchdog_info->is_overflow, watchdog_info->stack_top,
1069 watchdog_info->stack_bottom);
1070
1071 PMD_DRV_LOG(ERR, "Mgmt pc: 0x%08x, lr: 0x%08x, cpsr: 0x%08x",
1072 watchdog_info->pc, watchdog_info->lr, watchdog_info->cpsr);
1073
1074 PMD_DRV_LOG(ERR, "Mgmt register info");
1075
1076 for (i = 0; i < 3; i++) {
1077 reg = watchdog_info->reg + (u64)(u32)(4 * i);
1078 PMD_DRV_LOG(ERR, "0x%08x 0x%08x 0x%08x 0x%08x",
1079 *(reg), *(reg + 1), *(reg + 2), *(reg + 3));
1080 }
1081
1082 PMD_DRV_LOG(ERR, "0x%08x", watchdog_info->reg[12]);
1083
1084 if (watchdog_info->stack_actlen <= 1024) {
1085 stack_len = watchdog_info->stack_actlen;
1086 } else {
1087 PMD_DRV_LOG(ERR, "Oops stack length: 0x%x is wrong",
1088 watchdog_info->stack_actlen);
1089 stack_len = 1024;
1090 }
1091
1092 PMD_DRV_LOG(ERR, "Mgmt dump stack, 16Bytes per line(start from sp)");
1093 for (i = 0; i < (stack_len / 16); i++) {
1094 dump_addr = (u32 *)(watchdog_info->data + ((u64)(u32)(i * 16)));
1095 PMD_DRV_LOG(ERR, "0x%08x 0x%08x 0x%08x 0x%08x",
1096 *dump_addr, *(dump_addr + 1), *(dump_addr + 2),
1097 *(dump_addr + 3));
1098 }
1099
1100 for (j = 0; j < ((stack_len % 16) / 4); j++) {
1101 dump_addr = (u32 *)(watchdog_info->data +
1102 ((u64)(u32)(i * 16 + j * 4)));
1103 PMD_DRV_LOG(ERR, "0x%08x", *dump_addr);
1104 }
1105
1106 *out_size = sizeof(*watchdog_info);
1107 watchdog_info = (struct hinic_mgmt_watchdog_info *)buf_out;
1108 watchdog_info->mgmt_msg_head.status = 0;
1109}
1110
1111static void hinic_show_pcie_dfx_info(struct hinic_hwdev *hwdev,
1112 void *buf_in, u16 in_size,
1113 void *buf_out, u16 *out_size)
1114{
1115 struct hinic_pcie_dfx_ntc *notice_info =
1116 (struct hinic_pcie_dfx_ntc *)buf_in;
1117 struct hinic_pcie_dfx_info dfx_info;
1118 u16 size = 0;
1119 u16 cnt = 0;
1120 u32 num = 0;
1121 u32 i, j;
1122 int err;
1123 u32 *reg;
1124
1125 if (in_size != sizeof(*notice_info)) {
1126 PMD_DRV_LOG(ERR, "Invalid pcie dfx notice info, length: %d, should be %zu.",
1127 in_size, sizeof(*notice_info));
1128 return;
1129 }
1130
1131 ((struct hinic_pcie_dfx_ntc *)buf_out)->mgmt_msg_head.status = 0;
1132 *out_size = sizeof(*notice_info);
1133 memset(&dfx_info, 0, sizeof(dfx_info));
1134 num = (u32)(notice_info->len / 1024);
1135 PMD_DRV_LOG(INFO, "INFO LEN: %d", notice_info->len);
1136 PMD_DRV_LOG(INFO, "PCIE DFX:");
1137 dfx_info.host_id = 0;
1138 dfx_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
1139 for (i = 0; i < num; i++) {
1140 dfx_info.offset = i * MAX_PCIE_DFX_BUF_SIZE;
1141 if (i == (num - 1))
1142 dfx_info.last = 1;
1143 size = sizeof(dfx_info);
1144 err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
1145 HINIC_MGMT_CMD_PCIE_DFX_GET,
1146 &dfx_info, sizeof(dfx_info),
1147 &dfx_info, &size, 0);
1148 if (err || dfx_info.mgmt_msg_head.status || !size) {
1149 PMD_DRV_LOG(ERR, "Failed to get pcie dfx info, err: %d, status: 0x%x, out size: 0x%x",
1150 err, dfx_info.mgmt_msg_head.status, size);
1151 return;
1152 }
1153
1154 reg = (u32 *)dfx_info.data;
1155 for (j = 0; j < 256; j = j + 8) {
1156 PMD_DRV_LOG(ERR, "0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x",
1157 cnt, reg[j], reg[(u32)(j + 1)],
1158 reg[(u32)(j + 2)], reg[(u32)(j + 3)],
1159 reg[(u32)(j + 4)], reg[(u32)(j + 5)],
1160 reg[(u32)(j + 6)], reg[(u32)(j + 7)]);
1161 cnt = cnt + 32;
1162 }
1163 memset(dfx_info.data, 0, MAX_PCIE_DFX_BUF_SIZE);
1164 }
1165}
1166
1167static void
1168hinic_show_ffm_info(struct hinic_hwdev *hwdev, void *buf_in, u16 in_size)
1169{
1170 struct ffm_intr_info *intr;
1171
1172 if (in_size != sizeof(struct ffm_intr_info)) {
1173 PMD_DRV_LOG(ERR, "Invalid input buffer len, length: %d, should be %zu.",
1174 in_size, sizeof(struct ffm_intr_info));
1175 return;
1176 }
1177
1178 if (hwdev->ffm_num < FFM_RECORD_NUM_MAX) {
1179 hwdev->ffm_num++;
1180 intr = (struct ffm_intr_info *)buf_in;
1181 PMD_DRV_LOG(WARNING, "node_id(%d),err_csr_addr(0x%x),err_csr_val(0x%x),err_level(0x%x),err_type(0x%x)",
1182 intr->node_id,
1183 intr->err_csr_addr,
1184 intr->err_csr_value,
1185 intr->err_level,
1186 intr->err_type);
1187 }
1188}
1189
1190void hinic_comm_async_event_handle(struct hinic_hwdev *hwdev, u8 cmd,
1191 void *buf_in, u16 in_size,
1192 void *buf_out, u16 *out_size)
1193{
1194 struct hinic_cmd_fault_event *fault_event, *ret_fault_event;
1195
1196 if (!hwdev)
1197 return;
1198
1199 *out_size = 0;
1200
1201 switch (cmd) {
1202 case HINIC_MGMT_CMD_FAULT_REPORT:
1203 if (in_size != sizeof(*fault_event)) {
1204 PMD_DRV_LOG(ERR, "Invalid fault event report, length: %d, should be %zu",
1205 in_size, sizeof(*fault_event));
1206 return;
1207 }
1208
1209 fault_event = (struct hinic_cmd_fault_event *)buf_in;
1210 fault_report_show(hwdev, &fault_event->event);
1211
1212 if (hinic_func_type(hwdev) != TYPE_VF) {
1213 ret_fault_event =
1214 (struct hinic_cmd_fault_event *)buf_out;
1215 ret_fault_event->mgmt_msg_head.status = 0;
1216 *out_size = sizeof(*ret_fault_event);
1217 }
1218 break;
1219
1220 case HINIC_MGMT_CMD_WATCHDOG_INFO:
1221 hinic_show_sw_watchdog_timeout_info(buf_in, in_size,
1222 buf_out, out_size);
1223 break;
1224
1225 case HINIC_MGMT_CMD_PCIE_DFX_NTC:
1226 hinic_show_pcie_dfx_info(hwdev, buf_in, in_size,
1227 buf_out, out_size);
1228 break;
1229
1230 case HINIC_MGMT_CMD_FFM_SET:
1231 hinic_show_ffm_info(hwdev, buf_in, in_size);
1232 break;
1233
1234 default:
1235 break;
1236 }
1237}
1238
1239static void
1240hinic_cable_status_event(u8 cmd, void *buf_in, __rte_unused u16 in_size,
1241 void *buf_out, u16 *out_size)
1242{
1243 struct hinic_cable_plug_event *plug_event;
1244 struct hinic_link_err_event *link_err;
1245
1246 if (cmd == HINIC_PORT_CMD_CABLE_PLUG_EVENT) {
1247 plug_event = (struct hinic_cable_plug_event *)buf_in;
1248 PMD_DRV_LOG(INFO, "Port module event: Cable %s",
1249 plug_event->plugged ? "plugged" : "unplugged");
1250
1251 *out_size = sizeof(*plug_event);
1252 plug_event = (struct hinic_cable_plug_event *)buf_out;
1253 plug_event->mgmt_msg_head.status = 0;
1254 } else if (cmd == HINIC_PORT_CMD_LINK_ERR_EVENT) {
1255 link_err = (struct hinic_link_err_event *)buf_in;
1256 if (link_err->err_type >= LINK_ERR_NUM) {
1257 PMD_DRV_LOG(ERR, "Link failed, Unknown type: 0x%x",
1258 link_err->err_type);
1259 } else {
1260 PMD_DRV_LOG(INFO, "Link failed, type: 0x%x: %s",
1261 link_err->err_type,
1262 hinic_module_link_err[link_err->err_type]);
1263 }
1264
1265 *out_size = sizeof(*link_err);
1266 link_err = (struct hinic_link_err_event *)buf_out;
1267 link_err->mgmt_msg_head.status = 0;
1268 }
1269}
1270
1271static int hinic_link_event_process(struct hinic_hwdev *hwdev,
1272 struct rte_eth_dev *eth_dev, u8 status)
1273{
1274 uint32_t port_speed[LINK_SPEED_MAX] = {ETH_SPEED_NUM_10M,
1275 ETH_SPEED_NUM_100M, ETH_SPEED_NUM_1G,
1276 ETH_SPEED_NUM_10G, ETH_SPEED_NUM_25G,
1277 ETH_SPEED_NUM_40G, ETH_SPEED_NUM_100G};
1278 struct nic_port_info port_info;
1279 struct rte_eth_link link;
1280 int rc = HINIC_OK;
1281
1282 if (!status) {
1283 link.link_status = ETH_LINK_DOWN;
1284 link.link_speed = 0;
1285 link.link_duplex = ETH_LINK_HALF_DUPLEX;
1286 link.link_autoneg = ETH_LINK_FIXED;
1287 } else {
1288 link.link_status = ETH_LINK_UP;
1289
1290 memset(&port_info, 0, sizeof(port_info));
1291 rc = hinic_get_port_info(hwdev, &port_info);
1292 if (rc) {
1293 link.link_speed = ETH_SPEED_NUM_NONE;
1294 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1295 link.link_autoneg = ETH_LINK_FIXED;
1296 } else {
1297 link.link_speed = port_speed[port_info.speed %
1298 LINK_SPEED_MAX];
1299 link.link_duplex = port_info.duplex;
1300 link.link_autoneg = port_info.autoneg_state;
1301 }
1302 }
1303 (void)rte_eth_linkstatus_set(eth_dev, &link);
1304
1305 return rc;
1306}
1307
1308static void hinic_lsc_process(struct hinic_hwdev *hwdev,
1309 struct rte_eth_dev *rte_dev, u8 status)
1310{
1311 int ret;
1312
1313 ret = hinic_link_event_process(hwdev, rte_dev, status);
1314 /* check if link has changed, notify callback */
1315 if (ret == 0)
1316 _rte_eth_dev_callback_process(rte_dev,
1317 RTE_ETH_EVENT_INTR_LSC,
1318 NULL);
1319}
1320
1321void hinic_l2nic_async_event_handle(struct hinic_hwdev *hwdev,
1322 void *param, u8 cmd,
1323 void *buf_in, u16 in_size,
1324 void *buf_out, u16 *out_size)
1325{
1326 struct hinic_port_link_status *in_link;
1327 struct rte_eth_dev *eth_dev;
1328
1329 if (!hwdev)
1330 return;
1331
1332 *out_size = 0;
1333
1334 switch (cmd) {
1335 case HINIC_PORT_CMD_LINK_STATUS_REPORT:
1336 eth_dev = param;
1337 in_link = (struct hinic_port_link_status *)buf_in;
1338 PMD_DRV_LOG(INFO, "Link status event report, dev_name: %s, port_id: %d, link_status: %s",
1339 eth_dev->data->name, eth_dev->data->port_id,
1340 in_link->link ? "UP" : "DOWN");
1341
1342 hinic_lsc_process(hwdev, eth_dev, in_link->link);
1343 break;
1344
1345 case HINIC_PORT_CMD_CABLE_PLUG_EVENT:
1346 case HINIC_PORT_CMD_LINK_ERR_EVENT:
1347 hinic_cable_status_event(cmd, buf_in, in_size,
1348 buf_out, out_size);
1349 break;
1350
1351 case HINIC_PORT_CMD_MGMT_RESET:
1352 PMD_DRV_LOG(WARNING, "Mgmt is reset");
1353 break;
1354
1355 default:
1356 PMD_DRV_LOG(ERR, "Unsupported event %d to process",
1357 cmd);
1358 break;
1359 }
1360}
1361
1362static void print_cable_info(struct hinic_link_info *info)
1363{
1364 char tmp_str[512] = {0};
1365 char tmp_vendor[17] = {0};
1366 const char *port_type = "Unknown port type";
1367 int i;
1368
1369 if (info->cable_absent) {
1370 PMD_DRV_LOG(INFO, "Cable unpresent");
1371 return;
1372 }
1373
1374 if (info->port_type < LINK_PORT_MAX_TYPE)
1375 port_type = __hw_to_char_port_type[info->port_type];
1376 else
1377 PMD_DRV_LOG(INFO, "Unknown port type: %u",
1378 info->port_type);
1379 if (info->port_type == LINK_PORT_FIBRE) {
1380 if (info->port_sub_type == FIBRE_SUBTYPE_SR)
1381 port_type = "Fibre-SR";
1382 else if (info->port_sub_type == FIBRE_SUBTYPE_LR)
1383 port_type = "Fibre-LR";
1384 }
1385
1386 for (i = sizeof(info->vendor_name) - 1; i >= 0; i--) {
1387 if (info->vendor_name[i] == ' ')
1388 info->vendor_name[i] = '\0';
1389 else
1390 break;
1391 }
1392
1393 memcpy(tmp_vendor, info->vendor_name, sizeof(info->vendor_name));
1394 snprintf(tmp_str, sizeof(tmp_str),
1395 "Vendor: %s, %s, %s, length: %um, max_speed: %uGbps",
1396 tmp_vendor, info->sfp_type ? "SFP" : "QSFP", port_type,
1397 info->cable_length, info->cable_max_speed);
1398 if (info->port_type != LINK_PORT_COPPER)
1399 snprintf(tmp_str + strlen(tmp_str),
1400 sizeof(tmp_str) - strlen(tmp_str),
1401 ", Temperature: %u", info->cable_temp);
1402
1403 PMD_DRV_LOG(INFO, "Cable information: %s", tmp_str);
1404}
1405
1406static void print_hi30_status(struct hinic_link_info *info)
1407{
1408 struct hi30_ffe_data *ffe_data;
1409 struct hi30_ctle_data *ctle_data;
1410
1411 ffe_data = (struct hi30_ffe_data *)info->hi30_ffe;
1412 ctle_data = (struct hi30_ctle_data *)info->hi30_ctle;
1413
1414 PMD_DRV_LOG(INFO, "TX_FFE: PRE2=%s%d; PRE1=%s%d; MAIN=%d; POST1=%s%d; POST1X=%s%d",
1415 (ffe_data->PRE1 & 0x10) ? "-" : "",
1416 (int)(ffe_data->PRE1 & 0xf),
1417 (ffe_data->PRE2 & 0x10) ? "-" : "",
1418 (int)(ffe_data->PRE2 & 0xf),
1419 (int)ffe_data->MAIN,
1420 (ffe_data->POST1 & 0x10) ? "-" : "",
1421 (int)(ffe_data->POST1 & 0xf),
1422 (ffe_data->POST2 & 0x10) ? "-" : "",
1423 (int)(ffe_data->POST2 & 0xf));
1424 PMD_DRV_LOG(INFO, "RX_CTLE: Gain1~3=%u %u %u; Boost1~3=%u %u %u; Zero1~3=%u %u %u; Squelch1~3=%u %u %u",
1425 ctle_data->ctlebst[0], ctle_data->ctlebst[1],
1426 ctle_data->ctlebst[2], ctle_data->ctlecmband[0],
1427 ctle_data->ctlecmband[1], ctle_data->ctlecmband[2],
1428 ctle_data->ctlermband[0], ctle_data->ctlermband[1],
1429 ctle_data->ctlermband[2], ctle_data->ctleza[0],
1430 ctle_data->ctleza[1], ctle_data->ctleza[2]);
1431}
1432
1433static void print_link_info(struct hinic_link_info *info,
1434 enum hilink_info_print_event type)
1435{
1436 const char *fec = "None";
1437
1438 if (info->fec < HILINK_FEC_MAX_TYPE)
1439 fec = __hw_to_char_fec[info->fec];
1440 else
1441 PMD_DRV_LOG(INFO, "Unknown fec type: %u",
1442 info->fec);
1443
1444 if (type == HILINK_EVENT_LINK_UP || !info->an_state) {
1445 PMD_DRV_LOG(INFO, "Link information: speed %dGbps, %s, autoneg %s",
1446 info->speed, fec, info->an_state ? "on" : "off");
1447 } else {
1448 PMD_DRV_LOG(INFO, "Link information: antoneg: %s",
1449 info->an_state ? "on" : "off");
1450 }
1451}
1452
1453static const char *hilink_info_report_type[HILINK_EVENT_MAX_TYPE] = {
1454 "", "link up", "link down", "cable plugged"
1455};
1456
1457static void hinic_print_hilink_info(void *buf_in, u16 in_size,
1458 void *buf_out, u16 *out_size)
1459{
1460 struct hinic_hilink_link_info *hilink_info =
1461 (struct hinic_hilink_link_info *)buf_in;
1462 struct hinic_link_info *info;
1463 enum hilink_info_print_event type;
1464
1465 if (in_size != sizeof(*hilink_info)) {
1466 PMD_DRV_LOG(ERR, "Invalid hilink info message size %d, should be %zu",
1467 in_size, sizeof(*hilink_info));
1468 return;
1469 }
1470
1471 ((struct hinic_hilink_link_info *)buf_out)->mgmt_msg_head.status = 0;
1472 *out_size = sizeof(*hilink_info);
1473
1474 info = &hilink_info->info;
1475 type = hilink_info->info_type;
1476
1477 if (type < HILINK_EVENT_LINK_UP || type >= HILINK_EVENT_MAX_TYPE) {
1478 PMD_DRV_LOG(INFO, "Invalid hilink info report, type: %d",
1479 type);
1480 return;
1481 }
1482
1483 PMD_DRV_LOG(INFO, "Hilink info report after %s",
1484 hilink_info_report_type[type]);
1485
1486 print_cable_info(info);
1487
1488 print_link_info(info, type);
1489
1490 print_hi30_status(info);
1491
1492 if (type == HILINK_EVENT_LINK_UP)
1493 return;
1494
1495 if (type == HILINK_EVENT_CABLE_PLUGGED) {
1496 PMD_DRV_LOG(INFO, "alos: %u, rx_los: %u",
1497 info->alos, info->rx_los);
1498 return;
1499 }
1500
1501 PMD_DRV_LOG(INFO, "PMA ctrl: %s, MAC tx %s, MAC rx %s, PMA debug inforeg: 0x%x, PMA signal ok reg: 0x%x, RF/LF status reg: 0x%x",
1502 info->pma_status ? "on" : "off",
1503 info->mac_tx_en ? "enable" : "disable",
1504 info->mac_rx_en ? "enable" : "disable", info->pma_dbg_info_reg,
1505 info->pma_signal_ok_reg, info->rf_lf_status_reg);
1506 PMD_DRV_LOG(INFO, "alos: %u, rx_los: %u, PCS block counter reg: 0x%x,PCS link: 0x%x, MAC link: 0x%x PCS_err_cnt: 0x%x",
1507 info->alos, info->rx_los, info->pcs_err_blk_cnt_reg,
1508 info->pcs_link_reg, info->mac_link_reg, info->pcs_err_cnt);
1509}
1510
1511void hinic_hilink_async_event_handle(struct hinic_hwdev *hwdev, u8 cmd,
1512 void *buf_in, u16 in_size,
1513 void *buf_out, u16 *out_size)
1514{
1515 if (!hwdev)
1516 return;
1517
1518 *out_size = 0;
1519
1520 switch (cmd) {
1521 case HINIC_HILINK_CMD_GET_LINK_INFO:
1522 hinic_print_hilink_info(buf_in, in_size, buf_out,
1523 out_size);
1524 break;
1525
1526 default:
1527 PMD_DRV_LOG(ERR, "Unsupported event %d to process",
1528 cmd);
1529 break;
1530 }
1531}