1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/etherdevice.h>
5 #include "hclgevf_cmd.h"
6 #include "hclgevf_main.h"
10 #define HCLGEVF_NAME "hclgevf"
12 static struct hnae3_ae_algo ae_algovf
;
14 static const struct pci_device_id ae_algovf_pci_tbl
[] = {
15 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_VF
), 0},
16 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF
), 0},
17 /* required last entry */
21 static inline struct hclgevf_dev
*hclgevf_ae_get_hdev(
22 struct hnae3_handle
*handle
)
24 return container_of(handle
, struct hclgevf_dev
, nic
);
27 static int hclgevf_tqps_update_stats(struct hnae3_handle
*handle
)
29 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
30 struct hnae3_queue
*queue
;
31 struct hclgevf_desc desc
;
32 struct hclgevf_tqp
*tqp
;
36 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
37 queue
= handle
->kinfo
.tqp
[i
];
38 tqp
= container_of(queue
, struct hclgevf_tqp
, q
);
39 hclgevf_cmd_setup_basic_desc(&desc
,
40 HCLGEVF_OPC_QUERY_RX_STATUS
,
43 desc
.data
[0] = cpu_to_le32(tqp
->index
& 0x1ff);
44 status
= hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
46 dev_err(&hdev
->pdev
->dev
,
47 "Query tqp stat fail, status = %d,queue = %d\n",
51 tqp
->tqp_stats
.rcb_rx_ring_pktnum_rcd
+=
52 le32_to_cpu(desc
.data
[4]);
54 hclgevf_cmd_setup_basic_desc(&desc
, HCLGEVF_OPC_QUERY_TX_STATUS
,
57 desc
.data
[0] = cpu_to_le32(tqp
->index
& 0x1ff);
58 status
= hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
60 dev_err(&hdev
->pdev
->dev
,
61 "Query tqp stat fail, status = %d,queue = %d\n",
65 tqp
->tqp_stats
.rcb_tx_ring_pktnum_rcd
+=
66 le32_to_cpu(desc
.data
[4]);
72 static u64
*hclgevf_tqps_get_stats(struct hnae3_handle
*handle
, u64
*data
)
74 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
75 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
76 struct hclgevf_tqp
*tqp
;
80 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
81 tqp
= container_of(handle
->kinfo
.tqp
[i
], struct hclgevf_tqp
, q
);
82 *buff
++ = tqp
->tqp_stats
.rcb_tx_ring_pktnum_rcd
;
84 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
85 tqp
= container_of(handle
->kinfo
.tqp
[i
], struct hclgevf_tqp
, q
);
86 *buff
++ = tqp
->tqp_stats
.rcb_rx_ring_pktnum_rcd
;
92 static int hclgevf_tqps_get_sset_count(struct hnae3_handle
*handle
, int strset
)
94 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
96 return hdev
->num_tqps
* 2;
99 static u8
*hclgevf_tqps_get_strings(struct hnae3_handle
*handle
, u8
*data
)
101 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
105 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
106 struct hclgevf_tqp
*tqp
= container_of(handle
->kinfo
.tqp
[i
],
107 struct hclgevf_tqp
, q
);
108 snprintf(buff
, ETH_GSTRING_LEN
, "rcb_q%d_tx_pktnum_rcd",
110 buff
+= ETH_GSTRING_LEN
;
113 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
114 struct hclgevf_tqp
*tqp
= container_of(handle
->kinfo
.tqp
[i
],
115 struct hclgevf_tqp
, q
);
116 snprintf(buff
, ETH_GSTRING_LEN
, "rcb_q%d_rx_pktnum_rcd",
118 buff
+= ETH_GSTRING_LEN
;
124 static void hclgevf_update_stats(struct hnae3_handle
*handle
,
125 struct net_device_stats
*net_stats
)
127 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
130 status
= hclgevf_tqps_update_stats(handle
);
132 dev_err(&hdev
->pdev
->dev
,
133 "VF update of TQPS stats fail, status = %d.\n",
137 static int hclgevf_get_sset_count(struct hnae3_handle
*handle
, int strset
)
139 if (strset
== ETH_SS_TEST
)
141 else if (strset
== ETH_SS_STATS
)
142 return hclgevf_tqps_get_sset_count(handle
, strset
);
147 static void hclgevf_get_strings(struct hnae3_handle
*handle
, u32 strset
,
150 u8
*p
= (char *)data
;
152 if (strset
== ETH_SS_STATS
)
153 p
= hclgevf_tqps_get_strings(handle
, p
);
156 static void hclgevf_get_stats(struct hnae3_handle
*handle
, u64
*data
)
158 hclgevf_tqps_get_stats(handle
, data
);
161 static int hclgevf_get_tc_info(struct hclgevf_dev
*hdev
)
166 status
= hclgevf_send_mbx_msg(hdev
, HCLGE_MBX_GET_TCINFO
, 0, NULL
, 0,
167 true, &resp_msg
, sizeof(u8
));
169 dev_err(&hdev
->pdev
->dev
,
170 "VF request to get TC info from PF failed %d",
175 hdev
->hw_tc_map
= resp_msg
;
180 static int hclge_get_queue_info(struct hclgevf_dev
*hdev
)
182 #define HCLGEVF_TQPS_RSS_INFO_LEN 8
183 u8 resp_msg
[HCLGEVF_TQPS_RSS_INFO_LEN
];
186 status
= hclgevf_send_mbx_msg(hdev
, HCLGE_MBX_GET_QINFO
, 0, NULL
, 0,
188 HCLGEVF_TQPS_RSS_INFO_LEN
);
190 dev_err(&hdev
->pdev
->dev
,
191 "VF request to get tqp info from PF failed %d",
196 memcpy(&hdev
->num_tqps
, &resp_msg
[0], sizeof(u16
));
197 memcpy(&hdev
->rss_size_max
, &resp_msg
[2], sizeof(u16
));
198 memcpy(&hdev
->num_desc
, &resp_msg
[4], sizeof(u16
));
199 memcpy(&hdev
->rx_buf_len
, &resp_msg
[6], sizeof(u16
));
204 static int hclgevf_enable_tso(struct hclgevf_dev
*hdev
, int enable
)
206 struct hclgevf_cfg_tso_status_cmd
*req
;
207 struct hclgevf_desc desc
;
209 req
= (struct hclgevf_cfg_tso_status_cmd
*)desc
.data
;
211 hclgevf_cmd_setup_basic_desc(&desc
, HCLGEVF_OPC_TSO_GENERIC_CONFIG
,
213 hnae_set_bit(req
->tso_enable
, HCLGEVF_TSO_ENABLE_B
, enable
);
215 return hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
218 static int hclgevf_alloc_tqps(struct hclgevf_dev
*hdev
)
220 struct hclgevf_tqp
*tqp
;
223 hdev
->htqp
= devm_kcalloc(&hdev
->pdev
->dev
, hdev
->num_tqps
,
224 sizeof(struct hclgevf_tqp
), GFP_KERNEL
);
230 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
231 tqp
->dev
= &hdev
->pdev
->dev
;
234 tqp
->q
.ae_algo
= &ae_algovf
;
235 tqp
->q
.buf_size
= hdev
->rx_buf_len
;
236 tqp
->q
.desc_num
= hdev
->num_desc
;
237 tqp
->q
.io_base
= hdev
->hw
.io_base
+ HCLGEVF_TQP_REG_OFFSET
+
238 i
* HCLGEVF_TQP_REG_SIZE
;
246 static int hclgevf_knic_setup(struct hclgevf_dev
*hdev
)
248 struct hnae3_handle
*nic
= &hdev
->nic
;
249 struct hnae3_knic_private_info
*kinfo
;
250 u16 new_tqps
= hdev
->num_tqps
;
255 kinfo
->num_desc
= hdev
->num_desc
;
256 kinfo
->rx_buf_len
= hdev
->rx_buf_len
;
257 for (i
= 0; i
< HCLGEVF_MAX_TC_NUM
; i
++)
258 if (hdev
->hw_tc_map
& BIT(i
))
262 = min_t(u16
, hdev
->rss_size_max
, new_tqps
/ kinfo
->num_tc
);
263 new_tqps
= kinfo
->rss_size
* kinfo
->num_tc
;
264 kinfo
->num_tqps
= min(new_tqps
, hdev
->num_tqps
);
266 kinfo
->tqp
= devm_kcalloc(&hdev
->pdev
->dev
, kinfo
->num_tqps
,
267 sizeof(struct hnae3_queue
*), GFP_KERNEL
);
271 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
272 hdev
->htqp
[i
].q
.handle
= &hdev
->nic
;
273 hdev
->htqp
[i
].q
.tqp_index
= i
;
274 kinfo
->tqp
[i
] = &hdev
->htqp
[i
].q
;
280 static void hclgevf_request_link_info(struct hclgevf_dev
*hdev
)
285 status
= hclgevf_send_mbx_msg(hdev
, HCLGE_MBX_GET_LINK_STATUS
, 0, NULL
,
286 0, false, &resp_msg
, sizeof(u8
));
288 dev_err(&hdev
->pdev
->dev
,
289 "VF failed to fetch link status(%d) from PF", status
);
292 void hclgevf_update_link_status(struct hclgevf_dev
*hdev
, int link_state
)
294 struct hnae3_handle
*handle
= &hdev
->nic
;
295 struct hnae3_client
*client
;
297 client
= handle
->client
;
299 if (link_state
!= hdev
->hw
.mac
.link
) {
300 client
->ops
->link_status_change(handle
, !!link_state
);
301 hdev
->hw
.mac
.link
= link_state
;
305 static int hclgevf_set_handle_info(struct hclgevf_dev
*hdev
)
307 struct hnae3_handle
*nic
= &hdev
->nic
;
310 nic
->ae_algo
= &ae_algovf
;
311 nic
->pdev
= hdev
->pdev
;
312 nic
->numa_node_mask
= hdev
->numa_node_mask
;
313 nic
->flags
|= HNAE3_SUPPORT_VF
;
315 if (hdev
->ae_dev
->dev_type
!= HNAE3_DEV_KNIC
) {
316 dev_err(&hdev
->pdev
->dev
, "unsupported device type %d\n",
317 hdev
->ae_dev
->dev_type
);
321 ret
= hclgevf_knic_setup(hdev
);
323 dev_err(&hdev
->pdev
->dev
, "VF knic setup failed %d\n",
328 static void hclgevf_free_vector(struct hclgevf_dev
*hdev
, int vector_id
)
330 hdev
->vector_status
[vector_id
] = HCLGEVF_INVALID_VPORT
;
331 hdev
->num_msi_left
+= 1;
332 hdev
->num_msi_used
-= 1;
335 static int hclgevf_get_vector(struct hnae3_handle
*handle
, u16 vector_num
,
336 struct hnae3_vector_info
*vector_info
)
338 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
339 struct hnae3_vector_info
*vector
= vector_info
;
343 vector_num
= min(hdev
->num_msi_left
, vector_num
);
345 for (j
= 0; j
< vector_num
; j
++) {
346 for (i
= HCLGEVF_MISC_VECTOR_NUM
+ 1; i
< hdev
->num_msi
; i
++) {
347 if (hdev
->vector_status
[i
] == HCLGEVF_INVALID_VPORT
) {
348 vector
->vector
= pci_irq_vector(hdev
->pdev
, i
);
349 vector
->io_addr
= hdev
->hw
.io_base
+
350 HCLGEVF_VECTOR_REG_BASE
+
351 (i
- 1) * HCLGEVF_VECTOR_REG_OFFSET
;
352 hdev
->vector_status
[i
] = 0;
353 hdev
->vector_irq
[i
] = vector
->vector
;
362 hdev
->num_msi_left
-= alloc
;
363 hdev
->num_msi_used
+= alloc
;
368 static int hclgevf_get_vector_index(struct hclgevf_dev
*hdev
, int vector
)
372 for (i
= 0; i
< hdev
->num_msi
; i
++)
373 if (vector
== hdev
->vector_irq
[i
])
379 static u32
hclgevf_get_rss_key_size(struct hnae3_handle
*handle
)
381 return HCLGEVF_RSS_KEY_SIZE
;
384 static u32
hclgevf_get_rss_indir_size(struct hnae3_handle
*handle
)
386 return HCLGEVF_RSS_IND_TBL_SIZE
;
389 static int hclgevf_set_rss_indir_table(struct hclgevf_dev
*hdev
)
391 const u8
*indir
= hdev
->rss_cfg
.rss_indirection_tbl
;
392 struct hclgevf_rss_indirection_table_cmd
*req
;
393 struct hclgevf_desc desc
;
397 req
= (struct hclgevf_rss_indirection_table_cmd
*)desc
.data
;
399 for (i
= 0; i
< HCLGEVF_RSS_CFG_TBL_NUM
; i
++) {
400 hclgevf_cmd_setup_basic_desc(&desc
, HCLGEVF_OPC_RSS_INDIR_TABLE
,
402 req
->start_table_index
= i
* HCLGEVF_RSS_CFG_TBL_SIZE
;
403 req
->rss_set_bitmap
= HCLGEVF_RSS_SET_BITMAP_MSK
;
404 for (j
= 0; j
< HCLGEVF_RSS_CFG_TBL_SIZE
; j
++)
406 indir
[i
* HCLGEVF_RSS_CFG_TBL_SIZE
+ j
];
408 status
= hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
410 dev_err(&hdev
->pdev
->dev
,
411 "VF failed(=%d) to set RSS indirection table\n",
420 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev
*hdev
, u16 rss_size
)
422 struct hclgevf_rss_tc_mode_cmd
*req
;
423 u16 tc_offset
[HCLGEVF_MAX_TC_NUM
];
424 u16 tc_valid
[HCLGEVF_MAX_TC_NUM
];
425 u16 tc_size
[HCLGEVF_MAX_TC_NUM
];
426 struct hclgevf_desc desc
;
431 req
= (struct hclgevf_rss_tc_mode_cmd
*)desc
.data
;
433 roundup_size
= roundup_pow_of_two(rss_size
);
434 roundup_size
= ilog2(roundup_size
);
436 for (i
= 0; i
< HCLGEVF_MAX_TC_NUM
; i
++) {
437 tc_valid
[i
] = !!(hdev
->hw_tc_map
& BIT(i
));
438 tc_size
[i
] = roundup_size
;
439 tc_offset
[i
] = rss_size
* i
;
442 hclgevf_cmd_setup_basic_desc(&desc
, HCLGEVF_OPC_RSS_TC_MODE
, false);
443 for (i
= 0; i
< HCLGEVF_MAX_TC_NUM
; i
++) {
444 hnae_set_bit(req
->rss_tc_mode
[i
], HCLGEVF_RSS_TC_VALID_B
,
445 (tc_valid
[i
] & 0x1));
446 hnae_set_field(req
->rss_tc_mode
[i
], HCLGEVF_RSS_TC_SIZE_M
,
447 HCLGEVF_RSS_TC_SIZE_S
, tc_size
[i
]);
448 hnae_set_field(req
->rss_tc_mode
[i
], HCLGEVF_RSS_TC_OFFSET_M
,
449 HCLGEVF_RSS_TC_OFFSET_S
, tc_offset
[i
]);
451 status
= hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
453 dev_err(&hdev
->pdev
->dev
,
454 "VF failed(=%d) to set rss tc mode\n", status
);
459 static int hclgevf_get_rss_hw_cfg(struct hnae3_handle
*handle
, u8
*hash
,
462 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
463 struct hclgevf_rss_config_cmd
*req
;
464 int lkup_times
= key
? 3 : 1;
465 struct hclgevf_desc desc
;
470 req
= (struct hclgevf_rss_config_cmd
*)desc
.data
;
471 lkup_times
= (lkup_times
== 3) ? 3 : ((hash
) ? 1 : 0);
473 for (key_offset
= 0; key_offset
< lkup_times
; key_offset
++) {
474 hclgevf_cmd_setup_basic_desc(&desc
,
475 HCLGEVF_OPC_RSS_GENERIC_CONFIG
,
477 req
->hash_config
|= (key_offset
<< HCLGEVF_RSS_HASH_KEY_OFFSET
);
479 status
= hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
481 dev_err(&hdev
->pdev
->dev
,
482 "failed to get hardware RSS cfg, status = %d\n",
489 HCLGEVF_RSS_KEY_SIZE
- HCLGEVF_RSS_HASH_KEY_NUM
* 2;
491 key_size
= HCLGEVF_RSS_HASH_KEY_NUM
;
494 memcpy(key
+ key_offset
* HCLGEVF_RSS_HASH_KEY_NUM
,
500 if ((req
->hash_config
& 0xf) == HCLGEVF_RSS_HASH_ALGO_TOEPLITZ
)
501 *hash
= ETH_RSS_HASH_TOP
;
503 *hash
= ETH_RSS_HASH_UNKNOWN
;
509 static int hclgevf_get_rss(struct hnae3_handle
*handle
, u32
*indir
, u8
*key
,
512 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
513 struct hclgevf_rss_cfg
*rss_cfg
= &hdev
->rss_cfg
;
517 for (i
= 0; i
< HCLGEVF_RSS_IND_TBL_SIZE
; i
++)
518 indir
[i
] = rss_cfg
->rss_indirection_tbl
[i
];
520 return hclgevf_get_rss_hw_cfg(handle
, hfunc
, key
);
523 static int hclgevf_set_rss(struct hnae3_handle
*handle
, const u32
*indir
,
524 const u8
*key
, const u8 hfunc
)
526 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
527 struct hclgevf_rss_cfg
*rss_cfg
= &hdev
->rss_cfg
;
530 /* update the shadow RSS table with user specified qids */
531 for (i
= 0; i
< HCLGEVF_RSS_IND_TBL_SIZE
; i
++)
532 rss_cfg
->rss_indirection_tbl
[i
] = indir
[i
];
534 /* update the hardware */
535 return hclgevf_set_rss_indir_table(hdev
);
538 static int hclgevf_get_tc_size(struct hnae3_handle
*handle
)
540 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
541 struct hclgevf_rss_cfg
*rss_cfg
= &hdev
->rss_cfg
;
543 return rss_cfg
->rss_size
;
546 static int hclgevf_bind_ring_to_vector(struct hnae3_handle
*handle
, bool en
,
548 struct hnae3_ring_chain_node
*ring_chain
)
550 #define HCLGEVF_RING_NODE_VARIABLE_NUM 3
551 #define HCLGEVF_RING_MAP_MBX_BASIC_MSG_NUM 3
552 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
553 struct hnae3_ring_chain_node
*node
;
554 struct hclge_mbx_vf_to_pf_cmd
*req
;
555 struct hclgevf_desc desc
;
560 req
= (struct hclge_mbx_vf_to_pf_cmd
*)desc
.data
;
561 vector_id
= hclgevf_get_vector_index(hdev
, vector
);
563 dev_err(&handle
->pdev
->dev
,
564 "Get vector index fail. ret =%d\n", vector_id
);
568 hclgevf_cmd_setup_basic_desc(&desc
, HCLGEVF_OPC_MBX_VF_TO_PF
, false);
570 HCLGE_MBX_MAP_RING_TO_VECTOR
: HCLGE_MBX_UNMAP_RING_TO_VECTOR
;
572 req
->msg
[1] = vector_id
; /* vector_id should be id in VF */
575 for (node
= ring_chain
; node
; node
= node
->next
) {
577 /* msg[2] is cause num */
578 req
->msg
[HCLGEVF_RING_NODE_VARIABLE_NUM
* i
] =
579 hnae_get_bit(node
->flag
, HNAE3_RING_TYPE_B
);
580 req
->msg
[HCLGEVF_RING_NODE_VARIABLE_NUM
* i
+ 1] =
582 if (i
== (HCLGE_MBX_VF_MSG_DATA_NUM
-
583 HCLGEVF_RING_MAP_MBX_BASIC_MSG_NUM
) /
584 HCLGEVF_RING_NODE_VARIABLE_NUM
) {
587 status
= hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
589 dev_err(&hdev
->pdev
->dev
,
590 "Map TQP fail, status is %d.\n",
595 hclgevf_cmd_setup_basic_desc(&desc
,
596 HCLGEVF_OPC_MBX_VF_TO_PF
,
599 req
->msg
[1] = vector_id
;
606 status
= hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
608 dev_err(&hdev
->pdev
->dev
,
609 "Map TQP fail, status is %d.\n", status
);
617 static int hclgevf_map_ring_to_vector(struct hnae3_handle
*handle
, int vector
,
618 struct hnae3_ring_chain_node
*ring_chain
)
620 return hclgevf_bind_ring_to_vector(handle
, true, vector
, ring_chain
);
623 static int hclgevf_unmap_ring_from_vector(
624 struct hnae3_handle
*handle
,
626 struct hnae3_ring_chain_node
*ring_chain
)
628 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
631 vector_id
= hclgevf_get_vector_index(hdev
, vector
);
633 dev_err(&handle
->pdev
->dev
,
634 "Get vector index fail. ret =%d\n", vector_id
);
638 ret
= hclgevf_bind_ring_to_vector(handle
, false, vector
, ring_chain
);
640 dev_err(&handle
->pdev
->dev
,
641 "Unmap ring from vector fail. vector=%d, ret =%d\n",
647 hclgevf_free_vector(hdev
, vector
);
652 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev
*hdev
, u32 en
)
654 struct hclge_mbx_vf_to_pf_cmd
*req
;
655 struct hclgevf_desc desc
;
658 req
= (struct hclge_mbx_vf_to_pf_cmd
*)desc
.data
;
660 hclgevf_cmd_setup_basic_desc(&desc
, HCLGEVF_OPC_MBX_VF_TO_PF
, false);
661 req
->msg
[0] = HCLGE_MBX_SET_PROMISC_MODE
;
664 status
= hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
666 dev_err(&hdev
->pdev
->dev
,
667 "Set promisc mode fail, status is %d.\n", status
);
672 static void hclgevf_set_promisc_mode(struct hnae3_handle
*handle
, u32 en
)
674 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
676 hclgevf_cmd_set_promisc_mode(hdev
, en
);
679 static int hclgevf_tqp_enable(struct hclgevf_dev
*hdev
, int tqp_id
,
680 int stream_id
, bool enable
)
682 struct hclgevf_cfg_com_tqp_queue_cmd
*req
;
683 struct hclgevf_desc desc
;
686 req
= (struct hclgevf_cfg_com_tqp_queue_cmd
*)desc
.data
;
688 hclgevf_cmd_setup_basic_desc(&desc
, HCLGEVF_OPC_CFG_COM_TQP_QUEUE
,
690 req
->tqp_id
= cpu_to_le16(tqp_id
& HCLGEVF_RING_ID_MASK
);
691 req
->stream_id
= cpu_to_le16(stream_id
);
692 req
->enable
|= enable
<< HCLGEVF_TQP_ENABLE_B
;
694 status
= hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
696 dev_err(&hdev
->pdev
->dev
,
697 "TQP enable fail, status =%d.\n", status
);
702 static int hclgevf_get_queue_id(struct hnae3_queue
*queue
)
704 struct hclgevf_tqp
*tqp
= container_of(queue
, struct hclgevf_tqp
, q
);
709 static void hclgevf_reset_tqp_stats(struct hnae3_handle
*handle
)
711 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
712 struct hnae3_queue
*queue
;
713 struct hclgevf_tqp
*tqp
;
716 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
717 queue
= handle
->kinfo
.tqp
[i
];
718 tqp
= container_of(queue
, struct hclgevf_tqp
, q
);
719 memset(&tqp
->tqp_stats
, 0, sizeof(tqp
->tqp_stats
));
723 static int hclgevf_cfg_func_mta_filter(struct hnae3_handle
*handle
, bool en
)
725 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
729 return hclgevf_send_mbx_msg(hdev
, HCLGE_MBX_SET_MULTICAST
,
730 HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE
,
731 msg
, 1, false, NULL
, 0);
734 static void hclgevf_get_mac_addr(struct hnae3_handle
*handle
, u8
*p
)
736 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
738 ether_addr_copy(p
, hdev
->hw
.mac
.mac_addr
);
741 static int hclgevf_set_mac_addr(struct hnae3_handle
*handle
, void *p
)
743 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
744 u8
*old_mac_addr
= (u8
*)hdev
->hw
.mac
.mac_addr
;
745 u8
*new_mac_addr
= (u8
*)p
;
746 u8 msg_data
[ETH_ALEN
* 2];
749 ether_addr_copy(msg_data
, new_mac_addr
);
750 ether_addr_copy(&msg_data
[ETH_ALEN
], old_mac_addr
);
752 status
= hclgevf_send_mbx_msg(hdev
, HCLGE_MBX_SET_UNICAST
,
753 HCLGE_MBX_MAC_VLAN_UC_MODIFY
,
754 msg_data
, ETH_ALEN
* 2,
757 ether_addr_copy(hdev
->hw
.mac
.mac_addr
, new_mac_addr
);
762 static int hclgevf_add_uc_addr(struct hnae3_handle
*handle
,
763 const unsigned char *addr
)
765 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
767 return hclgevf_send_mbx_msg(hdev
, HCLGE_MBX_SET_UNICAST
,
768 HCLGE_MBX_MAC_VLAN_UC_ADD
,
769 addr
, ETH_ALEN
, false, NULL
, 0);
772 static int hclgevf_rm_uc_addr(struct hnae3_handle
*handle
,
773 const unsigned char *addr
)
775 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
777 return hclgevf_send_mbx_msg(hdev
, HCLGE_MBX_SET_UNICAST
,
778 HCLGE_MBX_MAC_VLAN_UC_REMOVE
,
779 addr
, ETH_ALEN
, false, NULL
, 0);
782 static int hclgevf_add_mc_addr(struct hnae3_handle
*handle
,
783 const unsigned char *addr
)
785 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
787 return hclgevf_send_mbx_msg(hdev
, HCLGE_MBX_SET_MULTICAST
,
788 HCLGE_MBX_MAC_VLAN_MC_ADD
,
789 addr
, ETH_ALEN
, false, NULL
, 0);
792 static int hclgevf_rm_mc_addr(struct hnae3_handle
*handle
,
793 const unsigned char *addr
)
795 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
797 return hclgevf_send_mbx_msg(hdev
, HCLGE_MBX_SET_MULTICAST
,
798 HCLGE_MBX_MAC_VLAN_MC_REMOVE
,
799 addr
, ETH_ALEN
, false, NULL
, 0);
802 static int hclgevf_set_vlan_filter(struct hnae3_handle
*handle
,
803 __be16 proto
, u16 vlan_id
,
806 #define HCLGEVF_VLAN_MBX_MSG_LEN 5
807 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
808 u8 msg_data
[HCLGEVF_VLAN_MBX_MSG_LEN
];
813 if (proto
!= htons(ETH_P_8021Q
))
814 return -EPROTONOSUPPORT
;
816 msg_data
[0] = is_kill
;
817 memcpy(&msg_data
[1], &vlan_id
, sizeof(vlan_id
));
818 memcpy(&msg_data
[3], &proto
, sizeof(proto
));
819 return hclgevf_send_mbx_msg(hdev
, HCLGE_MBX_SET_VLAN
,
820 HCLGE_MBX_VLAN_FILTER
, msg_data
,
821 HCLGEVF_VLAN_MBX_MSG_LEN
, false, NULL
, 0);
824 static void hclgevf_reset_tqp(struct hnae3_handle
*handle
, u16 queue_id
)
826 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
829 memcpy(&msg_data
[0], &queue_id
, sizeof(queue_id
));
831 hclgevf_send_mbx_msg(hdev
, HCLGE_MBX_QUEUE_RESET
, 0, msg_data
, 2, false,
835 static u32
hclgevf_get_fw_version(struct hnae3_handle
*handle
)
837 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
839 return hdev
->fw_version
;
842 static void hclgevf_get_misc_vector(struct hclgevf_dev
*hdev
)
844 struct hclgevf_misc_vector
*vector
= &hdev
->misc_vector
;
846 vector
->vector_irq
= pci_irq_vector(hdev
->pdev
,
847 HCLGEVF_MISC_VECTOR_NUM
);
848 vector
->addr
= hdev
->hw
.io_base
+ HCLGEVF_MISC_VECTOR_REG_BASE
;
849 /* vector status always valid for Vector 0 */
850 hdev
->vector_status
[HCLGEVF_MISC_VECTOR_NUM
] = 0;
851 hdev
->vector_irq
[HCLGEVF_MISC_VECTOR_NUM
] = vector
->vector_irq
;
853 hdev
->num_msi_left
-= 1;
854 hdev
->num_msi_used
+= 1;
857 static void hclgevf_mbx_task_schedule(struct hclgevf_dev
*hdev
)
859 if (!test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED
, &hdev
->state
))
860 schedule_work(&hdev
->mbx_service_task
);
863 static void hclgevf_task_schedule(struct hclgevf_dev
*hdev
)
865 if (!test_bit(HCLGEVF_STATE_DOWN
, &hdev
->state
) &&
866 !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED
, &hdev
->state
))
867 schedule_work(&hdev
->service_task
);
870 static void hclgevf_service_timer(struct timer_list
*t
)
872 struct hclgevf_dev
*hdev
= from_timer(hdev
, t
, service_timer
);
874 mod_timer(&hdev
->service_timer
, jiffies
+ 5 * HZ
);
876 hclgevf_task_schedule(hdev
);
879 static void hclgevf_mailbox_service_task(struct work_struct
*work
)
881 struct hclgevf_dev
*hdev
;
883 hdev
= container_of(work
, struct hclgevf_dev
, mbx_service_task
);
885 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING
, &hdev
->state
))
888 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED
, &hdev
->state
);
890 hclgevf_mbx_handler(hdev
);
892 clear_bit(HCLGEVF_STATE_MBX_HANDLING
, &hdev
->state
);
895 static void hclgevf_service_task(struct work_struct
*work
)
897 struct hclgevf_dev
*hdev
;
899 hdev
= container_of(work
, struct hclgevf_dev
, service_task
);
901 /* request the link status from the PF. PF would be able to tell VF
902 * about such updates in future so we might remove this later
904 hclgevf_request_link_info(hdev
);
906 clear_bit(HCLGEVF_STATE_SERVICE_SCHED
, &hdev
->state
);
909 static void hclgevf_clear_event_cause(struct hclgevf_dev
*hdev
, u32 regclr
)
911 hclgevf_write_dev(&hdev
->hw
, HCLGEVF_VECTOR0_CMDQ_SRC_REG
, regclr
);
914 static bool hclgevf_check_event_cause(struct hclgevf_dev
*hdev
, u32
*clearval
)
918 /* fetch the events from their corresponding regs */
919 cmdq_src_reg
= hclgevf_read_dev(&hdev
->hw
,
920 HCLGEVF_VECTOR0_CMDQ_SRC_REG
);
922 /* check for vector0 mailbox(=CMDQ RX) event source */
923 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B
) & cmdq_src_reg
) {
924 cmdq_src_reg
&= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B
);
925 *clearval
= cmdq_src_reg
;
929 dev_dbg(&hdev
->pdev
->dev
, "vector 0 interrupt from unknown source\n");
934 static void hclgevf_enable_vector(struct hclgevf_misc_vector
*vector
, bool en
)
936 writel(en
? 1 : 0, vector
->addr
);
939 static irqreturn_t
hclgevf_misc_irq_handle(int irq
, void *data
)
941 struct hclgevf_dev
*hdev
= data
;
944 hclgevf_enable_vector(&hdev
->misc_vector
, false);
945 if (!hclgevf_check_event_cause(hdev
, &clearval
))
948 /* schedule the VF mailbox service task, if not already scheduled */
949 hclgevf_mbx_task_schedule(hdev
);
951 hclgevf_clear_event_cause(hdev
, clearval
);
954 hclgevf_enable_vector(&hdev
->misc_vector
, true);
959 static int hclgevf_configure(struct hclgevf_dev
*hdev
)
963 /* get queue configuration from PF */
964 ret
= hclge_get_queue_info(hdev
);
967 /* get tc configuration from PF */
968 return hclgevf_get_tc_info(hdev
);
971 static int hclgevf_init_roce_base_info(struct hclgevf_dev
*hdev
)
973 struct hnae3_handle
*roce
= &hdev
->roce
;
974 struct hnae3_handle
*nic
= &hdev
->nic
;
976 roce
->rinfo
.num_vectors
= HCLGEVF_ROCEE_VECTOR_NUM
;
978 if (hdev
->num_msi_left
< roce
->rinfo
.num_vectors
||
979 hdev
->num_msi_left
== 0)
982 roce
->rinfo
.base_vector
=
983 hdev
->vector_status
[hdev
->num_msi_used
];
985 roce
->rinfo
.netdev
= nic
->kinfo
.netdev
;
986 roce
->rinfo
.roce_io_base
= hdev
->hw
.io_base
;
988 roce
->pdev
= nic
->pdev
;
989 roce
->ae_algo
= nic
->ae_algo
;
990 roce
->numa_node_mask
= nic
->numa_node_mask
;
995 static int hclgevf_rss_init_hw(struct hclgevf_dev
*hdev
)
997 struct hclgevf_rss_cfg
*rss_cfg
= &hdev
->rss_cfg
;
1000 rss_cfg
->rss_size
= hdev
->rss_size_max
;
1002 /* Initialize RSS indirect table for each vport */
1003 for (i
= 0; i
< HCLGEVF_RSS_IND_TBL_SIZE
; i
++)
1004 rss_cfg
->rss_indirection_tbl
[i
] = i
% hdev
->rss_size_max
;
1006 ret
= hclgevf_set_rss_indir_table(hdev
);
1010 return hclgevf_set_rss_tc_mode(hdev
, hdev
->rss_size_max
);
1013 static int hclgevf_init_vlan_config(struct hclgevf_dev
*hdev
)
1015 /* other vlan config(like, VLAN TX/RX offload) would also be added
1018 return hclgevf_set_vlan_filter(&hdev
->nic
, htons(ETH_P_8021Q
), 0,
1022 static int hclgevf_ae_start(struct hnae3_handle
*handle
)
1024 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1027 for (i
= 0; i
< handle
->kinfo
.num_tqps
; i
++) {
1029 queue_id
= hclgevf_get_queue_id(handle
->kinfo
.tqp
[i
]);
1031 dev_warn(&hdev
->pdev
->dev
,
1032 "Get invalid queue id, ignore it\n");
1036 hclgevf_tqp_enable(hdev
, queue_id
, 0, true);
1039 /* reset tqp stats */
1040 hclgevf_reset_tqp_stats(handle
);
1042 hclgevf_request_link_info(hdev
);
1044 clear_bit(HCLGEVF_STATE_DOWN
, &hdev
->state
);
1045 mod_timer(&hdev
->service_timer
, jiffies
+ HZ
);
1050 static void hclgevf_ae_stop(struct hnae3_handle
*handle
)
1052 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1055 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
1057 queue_id
= hclgevf_get_queue_id(handle
->kinfo
.tqp
[i
]);
1059 dev_warn(&hdev
->pdev
->dev
,
1060 "Get invalid queue id, ignore it\n");
1064 hclgevf_tqp_enable(hdev
, queue_id
, 0, false);
1067 /* reset tqp stats */
1068 hclgevf_reset_tqp_stats(handle
);
1071 static void hclgevf_state_init(struct hclgevf_dev
*hdev
)
1073 /* setup tasks for the MBX */
1074 INIT_WORK(&hdev
->mbx_service_task
, hclgevf_mailbox_service_task
);
1075 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED
, &hdev
->state
);
1076 clear_bit(HCLGEVF_STATE_MBX_HANDLING
, &hdev
->state
);
1078 /* setup tasks for service timer */
1079 timer_setup(&hdev
->service_timer
, hclgevf_service_timer
, 0);
1081 INIT_WORK(&hdev
->service_task
, hclgevf_service_task
);
1082 clear_bit(HCLGEVF_STATE_SERVICE_SCHED
, &hdev
->state
);
1084 mutex_init(&hdev
->mbx_resp
.mbx_mutex
);
1086 /* bring the device down */
1087 set_bit(HCLGEVF_STATE_DOWN
, &hdev
->state
);
1090 static void hclgevf_state_uninit(struct hclgevf_dev
*hdev
)
1092 set_bit(HCLGEVF_STATE_DOWN
, &hdev
->state
);
1094 if (hdev
->service_timer
.function
)
1095 del_timer_sync(&hdev
->service_timer
);
1096 if (hdev
->service_task
.func
)
1097 cancel_work_sync(&hdev
->service_task
);
1098 if (hdev
->mbx_service_task
.func
)
1099 cancel_work_sync(&hdev
->mbx_service_task
);
1101 mutex_destroy(&hdev
->mbx_resp
.mbx_mutex
);
1104 static int hclgevf_init_msi(struct hclgevf_dev
*hdev
)
1106 struct pci_dev
*pdev
= hdev
->pdev
;
1110 hdev
->num_msi
= HCLGEVF_MAX_VF_VECTOR_NUM
;
1112 vectors
= pci_alloc_irq_vectors(pdev
, 1, hdev
->num_msi
,
1113 PCI_IRQ_MSI
| PCI_IRQ_MSIX
);
1116 "failed(%d) to allocate MSI/MSI-X vectors\n",
1120 if (vectors
< hdev
->num_msi
)
1121 dev_warn(&hdev
->pdev
->dev
,
1122 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1123 hdev
->num_msi
, vectors
);
1125 hdev
->num_msi
= vectors
;
1126 hdev
->num_msi_left
= vectors
;
1127 hdev
->base_msi_vector
= pdev
->irq
;
1129 hdev
->vector_status
= devm_kcalloc(&pdev
->dev
, hdev
->num_msi
,
1130 sizeof(u16
), GFP_KERNEL
);
1131 if (!hdev
->vector_status
) {
1132 pci_free_irq_vectors(pdev
);
1136 for (i
= 0; i
< hdev
->num_msi
; i
++)
1137 hdev
->vector_status
[i
] = HCLGEVF_INVALID_VPORT
;
1139 hdev
->vector_irq
= devm_kcalloc(&pdev
->dev
, hdev
->num_msi
,
1140 sizeof(int), GFP_KERNEL
);
1141 if (!hdev
->vector_irq
) {
1142 pci_free_irq_vectors(pdev
);
1149 static void hclgevf_uninit_msi(struct hclgevf_dev
*hdev
)
1151 struct pci_dev
*pdev
= hdev
->pdev
;
1153 pci_free_irq_vectors(pdev
);
1156 static int hclgevf_misc_irq_init(struct hclgevf_dev
*hdev
)
1160 hclgevf_get_misc_vector(hdev
);
1162 ret
= request_irq(hdev
->misc_vector
.vector_irq
, hclgevf_misc_irq_handle
,
1163 0, "hclgevf_cmd", hdev
);
1165 dev_err(&hdev
->pdev
->dev
, "VF failed to request misc irq(%d)\n",
1166 hdev
->misc_vector
.vector_irq
);
1170 /* enable misc. vector(vector 0) */
1171 hclgevf_enable_vector(&hdev
->misc_vector
, true);
1176 static void hclgevf_misc_irq_uninit(struct hclgevf_dev
*hdev
)
1178 /* disable misc vector(vector 0) */
1179 hclgevf_enable_vector(&hdev
->misc_vector
, false);
1180 free_irq(hdev
->misc_vector
.vector_irq
, hdev
);
1181 hclgevf_free_vector(hdev
, 0);
1184 static int hclgevf_init_instance(struct hclgevf_dev
*hdev
,
1185 struct hnae3_client
*client
)
1189 switch (client
->type
) {
1190 case HNAE3_CLIENT_KNIC
:
1191 hdev
->nic_client
= client
;
1192 hdev
->nic
.client
= client
;
1194 ret
= client
->ops
->init_instance(&hdev
->nic
);
1198 if (hdev
->roce_client
&& hnae3_dev_roce_supported(hdev
)) {
1199 struct hnae3_client
*rc
= hdev
->roce_client
;
1201 ret
= hclgevf_init_roce_base_info(hdev
);
1204 ret
= rc
->ops
->init_instance(&hdev
->roce
);
1209 case HNAE3_CLIENT_UNIC
:
1210 hdev
->nic_client
= client
;
1211 hdev
->nic
.client
= client
;
1213 ret
= client
->ops
->init_instance(&hdev
->nic
);
1217 case HNAE3_CLIENT_ROCE
:
1218 hdev
->roce_client
= client
;
1219 hdev
->roce
.client
= client
;
1221 if (hdev
->roce_client
&& hnae3_dev_roce_supported(hdev
)) {
1222 ret
= hclgevf_init_roce_base_info(hdev
);
1226 ret
= client
->ops
->init_instance(&hdev
->roce
);
1235 static void hclgevf_uninit_instance(struct hclgevf_dev
*hdev
,
1236 struct hnae3_client
*client
)
1238 /* un-init roce, if it exists */
1239 if (hdev
->roce_client
)
1240 hdev
->roce_client
->ops
->uninit_instance(&hdev
->roce
, 0);
1242 /* un-init nic/unic, if this was not called by roce client */
1243 if ((client
->ops
->uninit_instance
) &&
1244 (client
->type
!= HNAE3_CLIENT_ROCE
))
1245 client
->ops
->uninit_instance(&hdev
->nic
, 0);
1248 static int hclgevf_register_client(struct hnae3_client
*client
,
1249 struct hnae3_ae_dev
*ae_dev
)
1251 struct hclgevf_dev
*hdev
= ae_dev
->priv
;
1253 return hclgevf_init_instance(hdev
, client
);
1256 static void hclgevf_unregister_client(struct hnae3_client
*client
,
1257 struct hnae3_ae_dev
*ae_dev
)
1259 struct hclgevf_dev
*hdev
= ae_dev
->priv
;
1261 hclgevf_uninit_instance(hdev
, client
);
1264 static int hclgevf_pci_init(struct hclgevf_dev
*hdev
)
1266 struct pci_dev
*pdev
= hdev
->pdev
;
1267 struct hclgevf_hw
*hw
;
1270 ret
= pci_enable_device(pdev
);
1272 dev_err(&pdev
->dev
, "failed to enable PCI device\n");
1273 goto err_no_drvdata
;
1276 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
1278 dev_err(&pdev
->dev
, "can't set consistent PCI DMA, exiting");
1279 goto err_disable_device
;
1282 ret
= pci_request_regions(pdev
, HCLGEVF_DRIVER_NAME
);
1284 dev_err(&pdev
->dev
, "PCI request regions failed %d\n", ret
);
1285 goto err_disable_device
;
1288 pci_set_master(pdev
);
1291 hw
->io_base
= pci_iomap(pdev
, 2, 0);;
1293 dev_err(&pdev
->dev
, "can't map configuration register space\n");
1295 goto err_clr_master
;
1301 pci_clear_master(pdev
);
1302 pci_release_regions(pdev
);
1304 pci_disable_device(pdev
);
1306 pci_set_drvdata(pdev
, NULL
);
1310 static void hclgevf_pci_uninit(struct hclgevf_dev
*hdev
)
1312 struct pci_dev
*pdev
= hdev
->pdev
;
1314 pci_iounmap(pdev
, hdev
->hw
.io_base
);
1315 pci_clear_master(pdev
);
1316 pci_release_regions(pdev
);
1317 pci_disable_device(pdev
);
1318 pci_set_drvdata(pdev
, NULL
);
1321 static int hclgevf_init_ae_dev(struct hnae3_ae_dev
*ae_dev
)
1323 struct pci_dev
*pdev
= ae_dev
->pdev
;
1324 struct hclgevf_dev
*hdev
;
1327 hdev
= devm_kzalloc(&pdev
->dev
, sizeof(*hdev
), GFP_KERNEL
);
1332 hdev
->ae_dev
= ae_dev
;
1333 ae_dev
->priv
= hdev
;
1335 ret
= hclgevf_pci_init(hdev
);
1337 dev_err(&pdev
->dev
, "PCI initialization failed\n");
1341 ret
= hclgevf_init_msi(hdev
);
1343 dev_err(&pdev
->dev
, "failed(%d) to init MSI/MSI-X\n", ret
);
1347 hclgevf_state_init(hdev
);
1349 ret
= hclgevf_misc_irq_init(hdev
);
1351 dev_err(&pdev
->dev
, "failed(%d) to init Misc IRQ(vector0)\n",
1353 goto err_misc_irq_init
;
1356 ret
= hclgevf_cmd_init(hdev
);
1360 ret
= hclgevf_configure(hdev
);
1362 dev_err(&pdev
->dev
, "failed(%d) to fetch configuration\n", ret
);
1366 ret
= hclgevf_alloc_tqps(hdev
);
1368 dev_err(&pdev
->dev
, "failed(%d) to allocate TQPs\n", ret
);
1372 ret
= hclgevf_set_handle_info(hdev
);
1374 dev_err(&pdev
->dev
, "failed(%d) to set handle info\n", ret
);
1378 ret
= hclgevf_enable_tso(hdev
, true);
1380 dev_err(&pdev
->dev
, "failed(%d) to enable tso\n", ret
);
1384 /* Initialize VF's MTA */
1385 hdev
->accept_mta_mc
= true;
1386 ret
= hclgevf_cfg_func_mta_filter(&hdev
->nic
, hdev
->accept_mta_mc
);
1388 dev_err(&hdev
->pdev
->dev
,
1389 "failed(%d) to set mta filter mode\n", ret
);
1393 /* Initialize RSS for this VF */
1394 ret
= hclgevf_rss_init_hw(hdev
);
1396 dev_err(&hdev
->pdev
->dev
,
1397 "failed(%d) to initialize RSS\n", ret
);
1401 ret
= hclgevf_init_vlan_config(hdev
);
1403 dev_err(&hdev
->pdev
->dev
,
1404 "failed(%d) to initialize VLAN config\n", ret
);
1408 pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME
);
1413 hclgevf_cmd_uninit(hdev
);
1415 hclgevf_misc_irq_uninit(hdev
);
1417 hclgevf_state_uninit(hdev
);
1418 hclgevf_uninit_msi(hdev
);
1420 hclgevf_pci_uninit(hdev
);
1424 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev
*ae_dev
)
1426 struct hclgevf_dev
*hdev
= ae_dev
->priv
;
1428 hclgevf_cmd_uninit(hdev
);
1429 hclgevf_misc_irq_uninit(hdev
);
1430 hclgevf_state_uninit(hdev
);
1431 hclgevf_uninit_msi(hdev
);
1432 hclgevf_pci_uninit(hdev
);
1433 ae_dev
->priv
= NULL
;
1436 static const struct hnae3_ae_ops hclgevf_ops
= {
1437 .init_ae_dev
= hclgevf_init_ae_dev
,
1438 .uninit_ae_dev
= hclgevf_uninit_ae_dev
,
1439 .init_client_instance
= hclgevf_register_client
,
1440 .uninit_client_instance
= hclgevf_unregister_client
,
1441 .start
= hclgevf_ae_start
,
1442 .stop
= hclgevf_ae_stop
,
1443 .map_ring_to_vector
= hclgevf_map_ring_to_vector
,
1444 .unmap_ring_from_vector
= hclgevf_unmap_ring_from_vector
,
1445 .get_vector
= hclgevf_get_vector
,
1446 .reset_queue
= hclgevf_reset_tqp
,
1447 .set_promisc_mode
= hclgevf_set_promisc_mode
,
1448 .get_mac_addr
= hclgevf_get_mac_addr
,
1449 .set_mac_addr
= hclgevf_set_mac_addr
,
1450 .add_uc_addr
= hclgevf_add_uc_addr
,
1451 .rm_uc_addr
= hclgevf_rm_uc_addr
,
1452 .add_mc_addr
= hclgevf_add_mc_addr
,
1453 .rm_mc_addr
= hclgevf_rm_mc_addr
,
1454 .get_stats
= hclgevf_get_stats
,
1455 .update_stats
= hclgevf_update_stats
,
1456 .get_strings
= hclgevf_get_strings
,
1457 .get_sset_count
= hclgevf_get_sset_count
,
1458 .get_rss_key_size
= hclgevf_get_rss_key_size
,
1459 .get_rss_indir_size
= hclgevf_get_rss_indir_size
,
1460 .get_rss
= hclgevf_get_rss
,
1461 .set_rss
= hclgevf_set_rss
,
1462 .get_tc_size
= hclgevf_get_tc_size
,
1463 .get_fw_version
= hclgevf_get_fw_version
,
1464 .set_vlan_filter
= hclgevf_set_vlan_filter
,
1467 static struct hnae3_ae_algo ae_algovf
= {
1468 .ops
= &hclgevf_ops
,
1469 .name
= HCLGEVF_NAME
,
1470 .pdev_id_table
= ae_algovf_pci_tbl
,
1473 static int hclgevf_init(void)
1475 pr_info("%s is initializing\n", HCLGEVF_NAME
);
1477 return hnae3_register_ae_algo(&ae_algovf
);
1480 static void hclgevf_exit(void)
1482 hnae3_unregister_ae_algo(&ae_algovf
);
1484 module_init(hclgevf_init
);
1485 module_exit(hclgevf_exit
);
1487 MODULE_LICENSE("GPL");
1488 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
1489 MODULE_DESCRIPTION("HCLGEVF Driver");
1490 MODULE_VERSION(HCLGEVF_MOD_VERSION
);