1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/etherdevice.h>
5 #include <net/rtnetlink.h>
6 #include "hclgevf_cmd.h"
7 #include "hclgevf_main.h"
11 #define HCLGEVF_NAME "hclgevf"
13 static int hclgevf_init_hdev(struct hclgevf_dev
*hdev
);
14 static void hclgevf_uninit_hdev(struct hclgevf_dev
*hdev
);
15 static struct hnae3_ae_algo ae_algovf
;
17 static const struct pci_device_id ae_algovf_pci_tbl
[] = {
18 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_VF
), 0},
19 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF
), 0},
20 /* required last entry */
24 MODULE_DEVICE_TABLE(pci
, ae_algovf_pci_tbl
);
26 static inline struct hclgevf_dev
*hclgevf_ae_get_hdev(
27 struct hnae3_handle
*handle
)
29 return container_of(handle
, struct hclgevf_dev
, nic
);
32 static int hclgevf_tqps_update_stats(struct hnae3_handle
*handle
)
34 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
35 struct hnae3_queue
*queue
;
36 struct hclgevf_desc desc
;
37 struct hclgevf_tqp
*tqp
;
41 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
42 queue
= handle
->kinfo
.tqp
[i
];
43 tqp
= container_of(queue
, struct hclgevf_tqp
, q
);
44 hclgevf_cmd_setup_basic_desc(&desc
,
45 HCLGEVF_OPC_QUERY_RX_STATUS
,
48 desc
.data
[0] = cpu_to_le32(tqp
->index
& 0x1ff);
49 status
= hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
51 dev_err(&hdev
->pdev
->dev
,
52 "Query tqp stat fail, status = %d,queue = %d\n",
56 tqp
->tqp_stats
.rcb_rx_ring_pktnum_rcd
+=
57 le32_to_cpu(desc
.data
[1]);
59 hclgevf_cmd_setup_basic_desc(&desc
, HCLGEVF_OPC_QUERY_TX_STATUS
,
62 desc
.data
[0] = cpu_to_le32(tqp
->index
& 0x1ff);
63 status
= hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
65 dev_err(&hdev
->pdev
->dev
,
66 "Query tqp stat fail, status = %d,queue = %d\n",
70 tqp
->tqp_stats
.rcb_tx_ring_pktnum_rcd
+=
71 le32_to_cpu(desc
.data
[1]);
77 static u64
*hclgevf_tqps_get_stats(struct hnae3_handle
*handle
, u64
*data
)
79 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
80 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
81 struct hclgevf_tqp
*tqp
;
85 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
86 tqp
= container_of(handle
->kinfo
.tqp
[i
], struct hclgevf_tqp
, q
);
87 *buff
++ = tqp
->tqp_stats
.rcb_tx_ring_pktnum_rcd
;
89 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
90 tqp
= container_of(handle
->kinfo
.tqp
[i
], struct hclgevf_tqp
, q
);
91 *buff
++ = tqp
->tqp_stats
.rcb_rx_ring_pktnum_rcd
;
97 static int hclgevf_tqps_get_sset_count(struct hnae3_handle
*handle
, int strset
)
99 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
101 return hdev
->num_tqps
* 2;
104 static u8
*hclgevf_tqps_get_strings(struct hnae3_handle
*handle
, u8
*data
)
106 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
110 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
111 struct hclgevf_tqp
*tqp
= container_of(handle
->kinfo
.tqp
[i
],
112 struct hclgevf_tqp
, q
);
113 snprintf(buff
, ETH_GSTRING_LEN
, "txq#%d_pktnum_rcd",
115 buff
+= ETH_GSTRING_LEN
;
118 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
119 struct hclgevf_tqp
*tqp
= container_of(handle
->kinfo
.tqp
[i
],
120 struct hclgevf_tqp
, q
);
121 snprintf(buff
, ETH_GSTRING_LEN
, "rxq#%d_pktnum_rcd",
123 buff
+= ETH_GSTRING_LEN
;
129 static void hclgevf_update_stats(struct hnae3_handle
*handle
,
130 struct net_device_stats
*net_stats
)
132 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
135 status
= hclgevf_tqps_update_stats(handle
);
137 dev_err(&hdev
->pdev
->dev
,
138 "VF update of TQPS stats fail, status = %d.\n",
142 static int hclgevf_get_sset_count(struct hnae3_handle
*handle
, int strset
)
144 if (strset
== ETH_SS_TEST
)
146 else if (strset
== ETH_SS_STATS
)
147 return hclgevf_tqps_get_sset_count(handle
, strset
);
152 static void hclgevf_get_strings(struct hnae3_handle
*handle
, u32 strset
,
155 u8
*p
= (char *)data
;
157 if (strset
== ETH_SS_STATS
)
158 p
= hclgevf_tqps_get_strings(handle
, p
);
161 static void hclgevf_get_stats(struct hnae3_handle
*handle
, u64
*data
)
163 hclgevf_tqps_get_stats(handle
, data
);
166 static int hclgevf_get_tc_info(struct hclgevf_dev
*hdev
)
171 status
= hclgevf_send_mbx_msg(hdev
, HCLGE_MBX_GET_TCINFO
, 0, NULL
, 0,
172 true, &resp_msg
, sizeof(u8
));
174 dev_err(&hdev
->pdev
->dev
,
175 "VF request to get TC info from PF failed %d",
180 hdev
->hw_tc_map
= resp_msg
;
185 static int hclge_get_queue_info(struct hclgevf_dev
*hdev
)
187 #define HCLGEVF_TQPS_RSS_INFO_LEN 8
188 u8 resp_msg
[HCLGEVF_TQPS_RSS_INFO_LEN
];
191 status
= hclgevf_send_mbx_msg(hdev
, HCLGE_MBX_GET_QINFO
, 0, NULL
, 0,
193 HCLGEVF_TQPS_RSS_INFO_LEN
);
195 dev_err(&hdev
->pdev
->dev
,
196 "VF request to get tqp info from PF failed %d",
201 memcpy(&hdev
->num_tqps
, &resp_msg
[0], sizeof(u16
));
202 memcpy(&hdev
->rss_size_max
, &resp_msg
[2], sizeof(u16
));
203 memcpy(&hdev
->num_desc
, &resp_msg
[4], sizeof(u16
));
204 memcpy(&hdev
->rx_buf_len
, &resp_msg
[6], sizeof(u16
));
209 static int hclgevf_alloc_tqps(struct hclgevf_dev
*hdev
)
211 struct hclgevf_tqp
*tqp
;
214 /* if this is on going reset then we need to re-allocate the TPQs
215 * since we cannot assume we would get same number of TPQs back from PF
217 if (hclgevf_dev_ongoing_reset(hdev
))
218 devm_kfree(&hdev
->pdev
->dev
, hdev
->htqp
);
220 hdev
->htqp
= devm_kcalloc(&hdev
->pdev
->dev
, hdev
->num_tqps
,
221 sizeof(struct hclgevf_tqp
), GFP_KERNEL
);
227 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
228 tqp
->dev
= &hdev
->pdev
->dev
;
231 tqp
->q
.ae_algo
= &ae_algovf
;
232 tqp
->q
.buf_size
= hdev
->rx_buf_len
;
233 tqp
->q
.desc_num
= hdev
->num_desc
;
234 tqp
->q
.io_base
= hdev
->hw
.io_base
+ HCLGEVF_TQP_REG_OFFSET
+
235 i
* HCLGEVF_TQP_REG_SIZE
;
243 static int hclgevf_knic_setup(struct hclgevf_dev
*hdev
)
245 struct hnae3_handle
*nic
= &hdev
->nic
;
246 struct hnae3_knic_private_info
*kinfo
;
247 u16 new_tqps
= hdev
->num_tqps
;
252 kinfo
->num_desc
= hdev
->num_desc
;
253 kinfo
->rx_buf_len
= hdev
->rx_buf_len
;
254 for (i
= 0; i
< HCLGEVF_MAX_TC_NUM
; i
++)
255 if (hdev
->hw_tc_map
& BIT(i
))
259 = min_t(u16
, hdev
->rss_size_max
, new_tqps
/ kinfo
->num_tc
);
260 new_tqps
= kinfo
->rss_size
* kinfo
->num_tc
;
261 kinfo
->num_tqps
= min(new_tqps
, hdev
->num_tqps
);
263 /* if this is on going reset then we need to re-allocate the hnae queues
264 * as well since number of TPQs from PF might have changed.
266 if (hclgevf_dev_ongoing_reset(hdev
))
267 devm_kfree(&hdev
->pdev
->dev
, kinfo
->tqp
);
269 kinfo
->tqp
= devm_kcalloc(&hdev
->pdev
->dev
, kinfo
->num_tqps
,
270 sizeof(struct hnae3_queue
*), GFP_KERNEL
);
274 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
275 hdev
->htqp
[i
].q
.handle
= &hdev
->nic
;
276 hdev
->htqp
[i
].q
.tqp_index
= i
;
277 kinfo
->tqp
[i
] = &hdev
->htqp
[i
].q
;
283 static void hclgevf_request_link_info(struct hclgevf_dev
*hdev
)
288 status
= hclgevf_send_mbx_msg(hdev
, HCLGE_MBX_GET_LINK_STATUS
, 0, NULL
,
289 0, false, &resp_msg
, sizeof(u8
));
291 dev_err(&hdev
->pdev
->dev
,
292 "VF failed to fetch link status(%d) from PF", status
);
295 void hclgevf_update_link_status(struct hclgevf_dev
*hdev
, int link_state
)
297 struct hnae3_handle
*rhandle
= &hdev
->roce
;
298 struct hnae3_handle
*handle
= &hdev
->nic
;
299 struct hnae3_client
*rclient
;
300 struct hnae3_client
*client
;
302 client
= handle
->client
;
303 rclient
= hdev
->roce_client
;
305 if (link_state
!= hdev
->hw
.mac
.link
) {
306 client
->ops
->link_status_change(handle
, !!link_state
);
307 if (rclient
&& rclient
->ops
->link_status_change
)
308 rclient
->ops
->link_status_change(rhandle
, !!link_state
);
309 hdev
->hw
.mac
.link
= link_state
;
313 static int hclgevf_set_handle_info(struct hclgevf_dev
*hdev
)
315 struct hnae3_handle
*nic
= &hdev
->nic
;
318 nic
->ae_algo
= &ae_algovf
;
319 nic
->pdev
= hdev
->pdev
;
320 nic
->numa_node_mask
= hdev
->numa_node_mask
;
321 nic
->flags
|= HNAE3_SUPPORT_VF
;
323 if (hdev
->ae_dev
->dev_type
!= HNAE3_DEV_KNIC
) {
324 dev_err(&hdev
->pdev
->dev
, "unsupported device type %d\n",
325 hdev
->ae_dev
->dev_type
);
329 ret
= hclgevf_knic_setup(hdev
);
331 dev_err(&hdev
->pdev
->dev
, "VF knic setup failed %d\n",
336 static void hclgevf_free_vector(struct hclgevf_dev
*hdev
, int vector_id
)
338 hdev
->vector_status
[vector_id
] = HCLGEVF_INVALID_VPORT
;
339 hdev
->num_msi_left
+= 1;
340 hdev
->num_msi_used
-= 1;
343 static int hclgevf_get_vector(struct hnae3_handle
*handle
, u16 vector_num
,
344 struct hnae3_vector_info
*vector_info
)
346 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
347 struct hnae3_vector_info
*vector
= vector_info
;
351 vector_num
= min(hdev
->num_msi_left
, vector_num
);
353 for (j
= 0; j
< vector_num
; j
++) {
354 for (i
= HCLGEVF_MISC_VECTOR_NUM
+ 1; i
< hdev
->num_msi
; i
++) {
355 if (hdev
->vector_status
[i
] == HCLGEVF_INVALID_VPORT
) {
356 vector
->vector
= pci_irq_vector(hdev
->pdev
, i
);
357 vector
->io_addr
= hdev
->hw
.io_base
+
358 HCLGEVF_VECTOR_REG_BASE
+
359 (i
- 1) * HCLGEVF_VECTOR_REG_OFFSET
;
360 hdev
->vector_status
[i
] = 0;
361 hdev
->vector_irq
[i
] = vector
->vector
;
370 hdev
->num_msi_left
-= alloc
;
371 hdev
->num_msi_used
+= alloc
;
376 static int hclgevf_get_vector_index(struct hclgevf_dev
*hdev
, int vector
)
380 for (i
= 0; i
< hdev
->num_msi
; i
++)
381 if (vector
== hdev
->vector_irq
[i
])
387 static u32
hclgevf_get_rss_key_size(struct hnae3_handle
*handle
)
389 return HCLGEVF_RSS_KEY_SIZE
;
392 static u32
hclgevf_get_rss_indir_size(struct hnae3_handle
*handle
)
394 return HCLGEVF_RSS_IND_TBL_SIZE
;
397 static int hclgevf_set_rss_indir_table(struct hclgevf_dev
*hdev
)
399 const u8
*indir
= hdev
->rss_cfg
.rss_indirection_tbl
;
400 struct hclgevf_rss_indirection_table_cmd
*req
;
401 struct hclgevf_desc desc
;
405 req
= (struct hclgevf_rss_indirection_table_cmd
*)desc
.data
;
407 for (i
= 0; i
< HCLGEVF_RSS_CFG_TBL_NUM
; i
++) {
408 hclgevf_cmd_setup_basic_desc(&desc
, HCLGEVF_OPC_RSS_INDIR_TABLE
,
410 req
->start_table_index
= i
* HCLGEVF_RSS_CFG_TBL_SIZE
;
411 req
->rss_set_bitmap
= HCLGEVF_RSS_SET_BITMAP_MSK
;
412 for (j
= 0; j
< HCLGEVF_RSS_CFG_TBL_SIZE
; j
++)
414 indir
[i
* HCLGEVF_RSS_CFG_TBL_SIZE
+ j
];
416 status
= hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
418 dev_err(&hdev
->pdev
->dev
,
419 "VF failed(=%d) to set RSS indirection table\n",
428 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev
*hdev
, u16 rss_size
)
430 struct hclgevf_rss_tc_mode_cmd
*req
;
431 u16 tc_offset
[HCLGEVF_MAX_TC_NUM
];
432 u16 tc_valid
[HCLGEVF_MAX_TC_NUM
];
433 u16 tc_size
[HCLGEVF_MAX_TC_NUM
];
434 struct hclgevf_desc desc
;
439 req
= (struct hclgevf_rss_tc_mode_cmd
*)desc
.data
;
441 roundup_size
= roundup_pow_of_two(rss_size
);
442 roundup_size
= ilog2(roundup_size
);
444 for (i
= 0; i
< HCLGEVF_MAX_TC_NUM
; i
++) {
445 tc_valid
[i
] = !!(hdev
->hw_tc_map
& BIT(i
));
446 tc_size
[i
] = roundup_size
;
447 tc_offset
[i
] = rss_size
* i
;
450 hclgevf_cmd_setup_basic_desc(&desc
, HCLGEVF_OPC_RSS_TC_MODE
, false);
451 for (i
= 0; i
< HCLGEVF_MAX_TC_NUM
; i
++) {
452 hnae_set_bit(req
->rss_tc_mode
[i
], HCLGEVF_RSS_TC_VALID_B
,
453 (tc_valid
[i
] & 0x1));
454 hnae_set_field(req
->rss_tc_mode
[i
], HCLGEVF_RSS_TC_SIZE_M
,
455 HCLGEVF_RSS_TC_SIZE_S
, tc_size
[i
]);
456 hnae_set_field(req
->rss_tc_mode
[i
], HCLGEVF_RSS_TC_OFFSET_M
,
457 HCLGEVF_RSS_TC_OFFSET_S
, tc_offset
[i
]);
459 status
= hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
461 dev_err(&hdev
->pdev
->dev
,
462 "VF failed(=%d) to set rss tc mode\n", status
);
467 static int hclgevf_get_rss_hw_cfg(struct hnae3_handle
*handle
, u8
*hash
,
470 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
471 struct hclgevf_rss_config_cmd
*req
;
472 int lkup_times
= key
? 3 : 1;
473 struct hclgevf_desc desc
;
478 req
= (struct hclgevf_rss_config_cmd
*)desc
.data
;
479 lkup_times
= (lkup_times
== 3) ? 3 : ((hash
) ? 1 : 0);
481 for (key_offset
= 0; key_offset
< lkup_times
; key_offset
++) {
482 hclgevf_cmd_setup_basic_desc(&desc
,
483 HCLGEVF_OPC_RSS_GENERIC_CONFIG
,
485 req
->hash_config
|= (key_offset
<< HCLGEVF_RSS_HASH_KEY_OFFSET
);
487 status
= hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
489 dev_err(&hdev
->pdev
->dev
,
490 "failed to get hardware RSS cfg, status = %d\n",
497 HCLGEVF_RSS_KEY_SIZE
- HCLGEVF_RSS_HASH_KEY_NUM
* 2;
499 key_size
= HCLGEVF_RSS_HASH_KEY_NUM
;
502 memcpy(key
+ key_offset
* HCLGEVF_RSS_HASH_KEY_NUM
,
508 if ((req
->hash_config
& 0xf) == HCLGEVF_RSS_HASH_ALGO_TOEPLITZ
)
509 *hash
= ETH_RSS_HASH_TOP
;
511 *hash
= ETH_RSS_HASH_UNKNOWN
;
517 static int hclgevf_get_rss(struct hnae3_handle
*handle
, u32
*indir
, u8
*key
,
520 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
521 struct hclgevf_rss_cfg
*rss_cfg
= &hdev
->rss_cfg
;
525 for (i
= 0; i
< HCLGEVF_RSS_IND_TBL_SIZE
; i
++)
526 indir
[i
] = rss_cfg
->rss_indirection_tbl
[i
];
528 return hclgevf_get_rss_hw_cfg(handle
, hfunc
, key
);
531 static int hclgevf_set_rss(struct hnae3_handle
*handle
, const u32
*indir
,
532 const u8
*key
, const u8 hfunc
)
534 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
535 struct hclgevf_rss_cfg
*rss_cfg
= &hdev
->rss_cfg
;
538 /* update the shadow RSS table with user specified qids */
539 for (i
= 0; i
< HCLGEVF_RSS_IND_TBL_SIZE
; i
++)
540 rss_cfg
->rss_indirection_tbl
[i
] = indir
[i
];
542 /* update the hardware */
543 return hclgevf_set_rss_indir_table(hdev
);
546 static int hclgevf_get_tc_size(struct hnae3_handle
*handle
)
548 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
549 struct hclgevf_rss_cfg
*rss_cfg
= &hdev
->rss_cfg
;
551 return rss_cfg
->rss_size
;
554 static int hclgevf_bind_ring_to_vector(struct hnae3_handle
*handle
, bool en
,
556 struct hnae3_ring_chain_node
*ring_chain
)
558 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
559 struct hnae3_ring_chain_node
*node
;
560 struct hclge_mbx_vf_to_pf_cmd
*req
;
561 struct hclgevf_desc desc
;
566 req
= (struct hclge_mbx_vf_to_pf_cmd
*)desc
.data
;
568 for (node
= ring_chain
; node
; node
= node
->next
) {
569 int idx_offset
= HCLGE_MBX_RING_MAP_BASIC_MSG_NUM
+
570 HCLGE_MBX_RING_NODE_VARIABLE_NUM
* i
;
573 hclgevf_cmd_setup_basic_desc(&desc
,
574 HCLGEVF_OPC_MBX_VF_TO_PF
,
577 HCLGE_MBX_MAP_RING_TO_VECTOR
:
578 HCLGE_MBX_UNMAP_RING_TO_VECTOR
;
580 req
->msg
[1] = vector_id
;
583 req
->msg
[idx_offset
] =
584 hnae_get_bit(node
->flag
, HNAE3_RING_TYPE_B
);
585 req
->msg
[idx_offset
+ 1] = node
->tqp_index
;
586 req
->msg
[idx_offset
+ 2] = hnae_get_field(node
->int_gl_idx
,
588 HNAE3_RING_GL_IDX_S
);
591 if ((i
== (HCLGE_MBX_VF_MSG_DATA_NUM
-
592 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM
) /
593 HCLGE_MBX_RING_NODE_VARIABLE_NUM
) ||
597 status
= hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
599 dev_err(&hdev
->pdev
->dev
,
600 "Map TQP fail, status is %d.\n",
605 hclgevf_cmd_setup_basic_desc(&desc
,
606 HCLGEVF_OPC_MBX_VF_TO_PF
,
609 req
->msg
[1] = vector_id
;
616 static int hclgevf_map_ring_to_vector(struct hnae3_handle
*handle
, int vector
,
617 struct hnae3_ring_chain_node
*ring_chain
)
619 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
622 vector_id
= hclgevf_get_vector_index(hdev
, vector
);
624 dev_err(&handle
->pdev
->dev
,
625 "Get vector index fail. ret =%d\n", vector_id
);
629 return hclgevf_bind_ring_to_vector(handle
, true, vector_id
, ring_chain
);
632 static int hclgevf_unmap_ring_from_vector(
633 struct hnae3_handle
*handle
,
635 struct hnae3_ring_chain_node
*ring_chain
)
637 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
640 vector_id
= hclgevf_get_vector_index(hdev
, vector
);
642 dev_err(&handle
->pdev
->dev
,
643 "Get vector index fail. ret =%d\n", vector_id
);
647 ret
= hclgevf_bind_ring_to_vector(handle
, false, vector_id
, ring_chain
);
649 dev_err(&handle
->pdev
->dev
,
650 "Unmap ring from vector fail. vector=%d, ret =%d\n",
657 static int hclgevf_put_vector(struct hnae3_handle
*handle
, int vector
)
659 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
661 hclgevf_free_vector(hdev
, vector
);
666 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev
*hdev
,
667 bool en_uc_pmc
, bool en_mc_pmc
)
669 struct hclge_mbx_vf_to_pf_cmd
*req
;
670 struct hclgevf_desc desc
;
673 req
= (struct hclge_mbx_vf_to_pf_cmd
*)desc
.data
;
675 hclgevf_cmd_setup_basic_desc(&desc
, HCLGEVF_OPC_MBX_VF_TO_PF
, false);
676 req
->msg
[0] = HCLGE_MBX_SET_PROMISC_MODE
;
677 req
->msg
[1] = en_uc_pmc
? 1 : 0;
678 req
->msg
[2] = en_mc_pmc
? 1 : 0;
680 status
= hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
682 dev_err(&hdev
->pdev
->dev
,
683 "Set promisc mode fail, status is %d.\n", status
);
688 static void hclgevf_set_promisc_mode(struct hnae3_handle
*handle
,
689 bool en_uc_pmc
, bool en_mc_pmc
)
691 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
693 hclgevf_cmd_set_promisc_mode(hdev
, en_uc_pmc
, en_mc_pmc
);
696 static int hclgevf_tqp_enable(struct hclgevf_dev
*hdev
, int tqp_id
,
697 int stream_id
, bool enable
)
699 struct hclgevf_cfg_com_tqp_queue_cmd
*req
;
700 struct hclgevf_desc desc
;
703 req
= (struct hclgevf_cfg_com_tqp_queue_cmd
*)desc
.data
;
705 hclgevf_cmd_setup_basic_desc(&desc
, HCLGEVF_OPC_CFG_COM_TQP_QUEUE
,
707 req
->tqp_id
= cpu_to_le16(tqp_id
& HCLGEVF_RING_ID_MASK
);
708 req
->stream_id
= cpu_to_le16(stream_id
);
709 req
->enable
|= enable
<< HCLGEVF_TQP_ENABLE_B
;
711 status
= hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
713 dev_err(&hdev
->pdev
->dev
,
714 "TQP enable fail, status =%d.\n", status
);
719 static int hclgevf_get_queue_id(struct hnae3_queue
*queue
)
721 struct hclgevf_tqp
*tqp
= container_of(queue
, struct hclgevf_tqp
, q
);
726 static void hclgevf_reset_tqp_stats(struct hnae3_handle
*handle
)
728 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
729 struct hnae3_queue
*queue
;
730 struct hclgevf_tqp
*tqp
;
733 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
734 queue
= handle
->kinfo
.tqp
[i
];
735 tqp
= container_of(queue
, struct hclgevf_tqp
, q
);
736 memset(&tqp
->tqp_stats
, 0, sizeof(tqp
->tqp_stats
));
740 static int hclgevf_cfg_func_mta_type(struct hclgevf_dev
*hdev
)
742 u8 resp_msg
= HCLGEVF_MTA_TYPE_SEL_MAX
;
745 ret
= hclgevf_send_mbx_msg(hdev
, HCLGE_MBX_SET_MULTICAST
,
746 HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ
,
747 NULL
, 0, true, &resp_msg
, sizeof(u8
));
750 dev_err(&hdev
->pdev
->dev
,
751 "Read mta type fail, ret=%d.\n", ret
);
755 if (resp_msg
> HCLGEVF_MTA_TYPE_SEL_MAX
) {
756 dev_err(&hdev
->pdev
->dev
,
757 "Read mta type invalid, resp=%d.\n", resp_msg
);
761 hdev
->mta_mac_sel_type
= resp_msg
;
766 static u16
hclgevf_get_mac_addr_to_mta_index(struct hclgevf_dev
*hdev
,
769 u32 rsh
= HCLGEVF_MTA_TYPE_SEL_MAX
- hdev
->mta_mac_sel_type
;
770 u16 high_val
= addr
[1] | (addr
[0] << 8);
772 return (high_val
>> rsh
) & 0xfff;
775 static int hclgevf_do_update_mta_status(struct hclgevf_dev
*hdev
,
776 unsigned long *status
)
778 #define HCLGEVF_MTA_STATUS_MSG_SIZE 13
779 #define HCLGEVF_MTA_STATUS_MSG_BITS \
780 (HCLGEVF_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE)
781 #define HCLGEVF_MTA_STATUS_MSG_END_BITS \
782 (HCLGEVF_MTA_TBL_SIZE % HCLGEVF_MTA_STATUS_MSG_BITS)
789 msg_cnt
= DIV_ROUND_UP(HCLGEVF_MTA_TBL_SIZE
,
790 HCLGEVF_MTA_STATUS_MSG_BITS
);
794 u8 msg
[HCLGEVF_MTA_STATUS_MSG_SIZE
+ 1];
799 memset(msg
, 0, sizeof(msg
));
801 /* set index field */
802 msg
[0] = 0x7F & msg_idx
;
804 /* set end flag field */
807 tbl_cnt
= HCLGEVF_MTA_STATUS_MSG_END_BITS
;
809 tbl_cnt
= HCLGEVF_MTA_STATUS_MSG_BITS
;
812 /* set status field */
816 if (test_bit(tbl_idx
, status
))
817 p
[msg_ofs
] |= BIT(msg_bit
);
822 if (msg_bit
== BITS_PER_BYTE
) {
828 ret
= hclgevf_send_mbx_msg(hdev
, HCLGE_MBX_SET_MULTICAST
,
829 HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE
,
830 msg
, sizeof(msg
), false, NULL
, 0);
840 static int hclgevf_update_mta_status(struct hnae3_handle
*handle
)
842 unsigned long mta_status
[BITS_TO_LONGS(HCLGEVF_MTA_TBL_SIZE
)];
843 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
844 struct net_device
*netdev
= hdev
->nic
.kinfo
.netdev
;
845 struct netdev_hw_addr
*ha
;
849 memset(mta_status
, 0, sizeof(mta_status
));
851 /* update status from mc addr list */
852 netdev_for_each_mc_addr(ha
, netdev
) {
853 tbl_idx
= hclgevf_get_mac_addr_to_mta_index(hdev
, ha
->addr
);
854 set_bit(tbl_idx
, mta_status
);
857 return hclgevf_do_update_mta_status(hdev
, mta_status
);
860 static void hclgevf_get_mac_addr(struct hnae3_handle
*handle
, u8
*p
)
862 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
864 ether_addr_copy(p
, hdev
->hw
.mac
.mac_addr
);
867 static int hclgevf_set_mac_addr(struct hnae3_handle
*handle
, void *p
,
870 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
871 u8
*old_mac_addr
= (u8
*)hdev
->hw
.mac
.mac_addr
;
872 u8
*new_mac_addr
= (u8
*)p
;
873 u8 msg_data
[ETH_ALEN
* 2];
877 ether_addr_copy(msg_data
, new_mac_addr
);
878 ether_addr_copy(&msg_data
[ETH_ALEN
], old_mac_addr
);
880 subcode
= is_first
? HCLGE_MBX_MAC_VLAN_UC_ADD
:
881 HCLGE_MBX_MAC_VLAN_UC_MODIFY
;
883 status
= hclgevf_send_mbx_msg(hdev
, HCLGE_MBX_SET_UNICAST
,
884 subcode
, msg_data
, ETH_ALEN
* 2,
887 ether_addr_copy(hdev
->hw
.mac
.mac_addr
, new_mac_addr
);
892 static int hclgevf_add_uc_addr(struct hnae3_handle
*handle
,
893 const unsigned char *addr
)
895 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
897 return hclgevf_send_mbx_msg(hdev
, HCLGE_MBX_SET_UNICAST
,
898 HCLGE_MBX_MAC_VLAN_UC_ADD
,
899 addr
, ETH_ALEN
, false, NULL
, 0);
902 static int hclgevf_rm_uc_addr(struct hnae3_handle
*handle
,
903 const unsigned char *addr
)
905 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
907 return hclgevf_send_mbx_msg(hdev
, HCLGE_MBX_SET_UNICAST
,
908 HCLGE_MBX_MAC_VLAN_UC_REMOVE
,
909 addr
, ETH_ALEN
, false, NULL
, 0);
912 static int hclgevf_add_mc_addr(struct hnae3_handle
*handle
,
913 const unsigned char *addr
)
915 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
917 return hclgevf_send_mbx_msg(hdev
, HCLGE_MBX_SET_MULTICAST
,
918 HCLGE_MBX_MAC_VLAN_MC_ADD
,
919 addr
, ETH_ALEN
, false, NULL
, 0);
922 static int hclgevf_rm_mc_addr(struct hnae3_handle
*handle
,
923 const unsigned char *addr
)
925 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
927 return hclgevf_send_mbx_msg(hdev
, HCLGE_MBX_SET_MULTICAST
,
928 HCLGE_MBX_MAC_VLAN_MC_REMOVE
,
929 addr
, ETH_ALEN
, false, NULL
, 0);
932 static int hclgevf_set_vlan_filter(struct hnae3_handle
*handle
,
933 __be16 proto
, u16 vlan_id
,
936 #define HCLGEVF_VLAN_MBX_MSG_LEN 5
937 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
938 u8 msg_data
[HCLGEVF_VLAN_MBX_MSG_LEN
];
943 if (proto
!= htons(ETH_P_8021Q
))
944 return -EPROTONOSUPPORT
;
946 msg_data
[0] = is_kill
;
947 memcpy(&msg_data
[1], &vlan_id
, sizeof(vlan_id
));
948 memcpy(&msg_data
[3], &proto
, sizeof(proto
));
949 return hclgevf_send_mbx_msg(hdev
, HCLGE_MBX_SET_VLAN
,
950 HCLGE_MBX_VLAN_FILTER
, msg_data
,
951 HCLGEVF_VLAN_MBX_MSG_LEN
, false, NULL
, 0);
954 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle
*handle
, bool enable
)
956 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
959 msg_data
= enable
? 1 : 0;
960 return hclgevf_send_mbx_msg(hdev
, HCLGE_MBX_SET_VLAN
,
961 HCLGE_MBX_VLAN_RX_OFF_CFG
, &msg_data
,
965 static void hclgevf_reset_tqp(struct hnae3_handle
*handle
, u16 queue_id
)
967 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
971 memcpy(&msg_data
[0], &queue_id
, sizeof(queue_id
));
973 /* disable vf queue before send queue reset msg to PF */
974 ret
= hclgevf_tqp_enable(hdev
, queue_id
, 0, false);
978 hclgevf_send_mbx_msg(hdev
, HCLGE_MBX_QUEUE_RESET
, 0, msg_data
,
982 static int hclgevf_notify_client(struct hclgevf_dev
*hdev
,
983 enum hnae3_reset_notify_type type
)
985 struct hnae3_client
*client
= hdev
->nic_client
;
986 struct hnae3_handle
*handle
= &hdev
->nic
;
988 if (!client
->ops
->reset_notify
)
991 return client
->ops
->reset_notify(handle
, type
);
994 static int hclgevf_reset_wait(struct hclgevf_dev
*hdev
)
996 #define HCLGEVF_RESET_WAIT_MS 500
997 #define HCLGEVF_RESET_WAIT_CNT 20
1000 /* wait to check the hardware reset completion status */
1001 val
= hclgevf_read_dev(&hdev
->hw
, HCLGEVF_FUN_RST_ING
);
1002 while (hnae_get_bit(val
, HCLGEVF_FUN_RST_ING_B
) &&
1003 (cnt
< HCLGEVF_RESET_WAIT_CNT
)) {
1004 msleep(HCLGEVF_RESET_WAIT_MS
);
1005 val
= hclgevf_read_dev(&hdev
->hw
, HCLGEVF_FUN_RST_ING
);
1009 /* hardware completion status should be available by this time */
1010 if (cnt
>= HCLGEVF_RESET_WAIT_CNT
) {
1011 dev_warn(&hdev
->pdev
->dev
,
1012 "could'nt get reset done status from h/w, timeout!\n");
1016 /* we will wait a bit more to let reset of the stack to complete. This
1017 * might happen in case reset assertion was made by PF. Yes, this also
1018 * means we might end up waiting bit more even for VF reset.
1025 static int hclgevf_reset_stack(struct hclgevf_dev
*hdev
)
1029 /* uninitialize the nic client */
1030 hclgevf_notify_client(hdev
, HNAE3_UNINIT_CLIENT
);
1032 /* re-initialize the hclge device */
1033 ret
= hclgevf_init_hdev(hdev
);
1035 dev_err(&hdev
->pdev
->dev
,
1036 "hclge device re-init failed, VF is disabled!\n");
1040 /* bring up the nic client again */
1041 hclgevf_notify_client(hdev
, HNAE3_INIT_CLIENT
);
1046 static int hclgevf_reset(struct hclgevf_dev
*hdev
)
1052 /* bring down the nic to stop any ongoing TX/RX */
1053 hclgevf_notify_client(hdev
, HNAE3_DOWN_CLIENT
);
1055 /* check if VF could successfully fetch the hardware reset completion
1056 * status from the hardware
1058 ret
= hclgevf_reset_wait(hdev
);
1060 /* can't do much in this situation, will disable VF */
1061 dev_err(&hdev
->pdev
->dev
,
1062 "VF failed(=%d) to fetch H/W reset completion status\n",
1065 dev_warn(&hdev
->pdev
->dev
, "VF reset failed, disabling VF!\n");
1066 hclgevf_notify_client(hdev
, HNAE3_UNINIT_CLIENT
);
1072 /* now, re-initialize the nic client and ae device*/
1073 ret
= hclgevf_reset_stack(hdev
);
1075 dev_err(&hdev
->pdev
->dev
, "failed to reset VF stack\n");
1077 /* bring up the nic to enable TX/RX again */
1078 hclgevf_notify_client(hdev
, HNAE3_UP_CLIENT
);
1085 static int hclgevf_do_reset(struct hclgevf_dev
*hdev
)
1090 status
= hclgevf_send_mbx_msg(hdev
, HCLGE_MBX_RESET
, 0, NULL
,
1091 0, false, &respmsg
, sizeof(u8
));
1093 dev_err(&hdev
->pdev
->dev
,
1094 "VF reset request to PF failed(=%d)\n", status
);
1099 static void hclgevf_reset_event(struct hnae3_handle
*handle
)
1101 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1103 dev_info(&hdev
->pdev
->dev
, "received reset request from VF enet\n");
1105 handle
->reset_level
= HNAE3_VF_RESET
;
1107 /* reset of this VF requested */
1108 set_bit(HCLGEVF_RESET_REQUESTED
, &hdev
->reset_state
);
1109 hclgevf_reset_task_schedule(hdev
);
1111 handle
->last_reset_time
= jiffies
;
1114 static u32
hclgevf_get_fw_version(struct hnae3_handle
*handle
)
1116 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1118 return hdev
->fw_version
;
1121 static void hclgevf_get_misc_vector(struct hclgevf_dev
*hdev
)
1123 struct hclgevf_misc_vector
*vector
= &hdev
->misc_vector
;
1125 vector
->vector_irq
= pci_irq_vector(hdev
->pdev
,
1126 HCLGEVF_MISC_VECTOR_NUM
);
1127 vector
->addr
= hdev
->hw
.io_base
+ HCLGEVF_MISC_VECTOR_REG_BASE
;
1128 /* vector status always valid for Vector 0 */
1129 hdev
->vector_status
[HCLGEVF_MISC_VECTOR_NUM
] = 0;
1130 hdev
->vector_irq
[HCLGEVF_MISC_VECTOR_NUM
] = vector
->vector_irq
;
1132 hdev
->num_msi_left
-= 1;
1133 hdev
->num_msi_used
+= 1;
1136 void hclgevf_reset_task_schedule(struct hclgevf_dev
*hdev
)
1138 if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED
, &hdev
->state
) &&
1139 !test_bit(HCLGEVF_STATE_RST_HANDLING
, &hdev
->state
)) {
1140 set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED
, &hdev
->state
);
1141 schedule_work(&hdev
->rst_service_task
);
1145 void hclgevf_mbx_task_schedule(struct hclgevf_dev
*hdev
)
1147 if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED
, &hdev
->state
) &&
1148 !test_bit(HCLGEVF_STATE_MBX_HANDLING
, &hdev
->state
)) {
1149 set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED
, &hdev
->state
);
1150 schedule_work(&hdev
->mbx_service_task
);
1154 static void hclgevf_task_schedule(struct hclgevf_dev
*hdev
)
1156 if (!test_bit(HCLGEVF_STATE_DOWN
, &hdev
->state
) &&
1157 !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED
, &hdev
->state
))
1158 schedule_work(&hdev
->service_task
);
1161 static void hclgevf_deferred_task_schedule(struct hclgevf_dev
*hdev
)
1163 /* if we have any pending mailbox event then schedule the mbx task */
1164 if (hdev
->mbx_event_pending
)
1165 hclgevf_mbx_task_schedule(hdev
);
1167 if (test_bit(HCLGEVF_RESET_PENDING
, &hdev
->reset_state
))
1168 hclgevf_reset_task_schedule(hdev
);
1171 static void hclgevf_service_timer(struct timer_list
*t
)
1173 struct hclgevf_dev
*hdev
= from_timer(hdev
, t
, service_timer
);
1175 mod_timer(&hdev
->service_timer
, jiffies
+ 5 * HZ
);
1177 hclgevf_task_schedule(hdev
);
1180 static void hclgevf_reset_service_task(struct work_struct
*work
)
1182 struct hclgevf_dev
*hdev
=
1183 container_of(work
, struct hclgevf_dev
, rst_service_task
);
1186 if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING
, &hdev
->state
))
1189 clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED
, &hdev
->state
);
1191 if (test_and_clear_bit(HCLGEVF_RESET_PENDING
,
1192 &hdev
->reset_state
)) {
1193 /* PF has initmated that it is about to reset the hardware.
1194 * We now have to poll & check if harware has actually completed
1195 * the reset sequence. On hardware reset completion, VF needs to
1196 * reset the client and ae device.
1198 hdev
->reset_attempts
= 0;
1200 ret
= hclgevf_reset(hdev
);
1202 dev_err(&hdev
->pdev
->dev
, "VF stack reset failed.\n");
1203 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED
,
1204 &hdev
->reset_state
)) {
1205 /* we could be here when either of below happens:
1206 * 1. reset was initiated due to watchdog timeout due to
1207 * a. IMP was earlier reset and our TX got choked down and
1208 * which resulted in watchdog reacting and inducing VF
1209 * reset. This also means our cmdq would be unreliable.
1210 * b. problem in TX due to other lower layer(example link
1211 * layer not functioning properly etc.)
1212 * 2. VF reset might have been initiated due to some config
1215 * NOTE: Theres no clear way to detect above cases than to react
1216 * to the response of PF for this reset request. PF will ack the
1217 * 1b and 2. cases but we will not get any intimation about 1a
1218 * from PF as cmdq would be in unreliable state i.e. mailbox
1219 * communication between PF and VF would be broken.
1222 /* if we are never geting into pending state it means either:
1223 * 1. PF is not receiving our request which could be due to IMP
1226 * We cannot do much for 2. but to check first we can try reset
1227 * our PCIe + stack and see if it alleviates the problem.
1229 if (hdev
->reset_attempts
> 3) {
1230 /* prepare for full reset of stack + pcie interface */
1231 hdev
->nic
.reset_level
= HNAE3_VF_FULL_RESET
;
1233 /* "defer" schedule the reset task again */
1234 set_bit(HCLGEVF_RESET_PENDING
, &hdev
->reset_state
);
1236 hdev
->reset_attempts
++;
1238 /* request PF for resetting this VF via mailbox */
1239 ret
= hclgevf_do_reset(hdev
);
1241 dev_warn(&hdev
->pdev
->dev
,
1242 "VF rst fail, stack will call\n");
1246 clear_bit(HCLGEVF_STATE_RST_HANDLING
, &hdev
->state
);
1249 static void hclgevf_mailbox_service_task(struct work_struct
*work
)
1251 struct hclgevf_dev
*hdev
;
1253 hdev
= container_of(work
, struct hclgevf_dev
, mbx_service_task
);
1255 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING
, &hdev
->state
))
1258 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED
, &hdev
->state
);
1260 hclgevf_mbx_async_handler(hdev
);
1262 clear_bit(HCLGEVF_STATE_MBX_HANDLING
, &hdev
->state
);
1265 static void hclgevf_service_task(struct work_struct
*work
)
1267 struct hclgevf_dev
*hdev
;
1269 hdev
= container_of(work
, struct hclgevf_dev
, service_task
);
1271 /* request the link status from the PF. PF would be able to tell VF
1272 * about such updates in future so we might remove this later
1274 hclgevf_request_link_info(hdev
);
1276 hclgevf_deferred_task_schedule(hdev
);
1278 clear_bit(HCLGEVF_STATE_SERVICE_SCHED
, &hdev
->state
);
1281 static void hclgevf_clear_event_cause(struct hclgevf_dev
*hdev
, u32 regclr
)
1283 hclgevf_write_dev(&hdev
->hw
, HCLGEVF_VECTOR0_CMDQ_SRC_REG
, regclr
);
1286 static bool hclgevf_check_event_cause(struct hclgevf_dev
*hdev
, u32
*clearval
)
1290 /* fetch the events from their corresponding regs */
1291 cmdq_src_reg
= hclgevf_read_dev(&hdev
->hw
,
1292 HCLGEVF_VECTOR0_CMDQ_SRC_REG
);
1294 /* check for vector0 mailbox(=CMDQ RX) event source */
1295 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B
) & cmdq_src_reg
) {
1296 cmdq_src_reg
&= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B
);
1297 *clearval
= cmdq_src_reg
;
1301 dev_dbg(&hdev
->pdev
->dev
, "vector 0 interrupt from unknown source\n");
1306 static void hclgevf_enable_vector(struct hclgevf_misc_vector
*vector
, bool en
)
1308 writel(en
? 1 : 0, vector
->addr
);
1311 static irqreturn_t
hclgevf_misc_irq_handle(int irq
, void *data
)
1313 struct hclgevf_dev
*hdev
= data
;
1316 hclgevf_enable_vector(&hdev
->misc_vector
, false);
1317 if (!hclgevf_check_event_cause(hdev
, &clearval
))
1320 hclgevf_mbx_handler(hdev
);
1322 hclgevf_clear_event_cause(hdev
, clearval
);
1325 hclgevf_enable_vector(&hdev
->misc_vector
, true);
1330 static int hclgevf_configure(struct hclgevf_dev
*hdev
)
1334 /* get queue configuration from PF */
1335 ret
= hclge_get_queue_info(hdev
);
1338 /* get tc configuration from PF */
1339 return hclgevf_get_tc_info(hdev
);
1342 static int hclgevf_alloc_hdev(struct hnae3_ae_dev
*ae_dev
)
1344 struct pci_dev
*pdev
= ae_dev
->pdev
;
1345 struct hclgevf_dev
*hdev
= ae_dev
->priv
;
1347 hdev
= devm_kzalloc(&pdev
->dev
, sizeof(*hdev
), GFP_KERNEL
);
1352 hdev
->ae_dev
= ae_dev
;
1353 ae_dev
->priv
= hdev
;
1358 static int hclgevf_init_roce_base_info(struct hclgevf_dev
*hdev
)
1360 struct hnae3_handle
*roce
= &hdev
->roce
;
1361 struct hnae3_handle
*nic
= &hdev
->nic
;
1363 roce
->rinfo
.num_vectors
= HCLGEVF_ROCEE_VECTOR_NUM
;
1365 if (hdev
->num_msi_left
< roce
->rinfo
.num_vectors
||
1366 hdev
->num_msi_left
== 0)
1369 roce
->rinfo
.base_vector
=
1370 hdev
->vector_status
[hdev
->num_msi_used
];
1372 roce
->rinfo
.netdev
= nic
->kinfo
.netdev
;
1373 roce
->rinfo
.roce_io_base
= hdev
->hw
.io_base
;
1375 roce
->pdev
= nic
->pdev
;
1376 roce
->ae_algo
= nic
->ae_algo
;
1377 roce
->numa_node_mask
= nic
->numa_node_mask
;
1382 static int hclgevf_rss_init_hw(struct hclgevf_dev
*hdev
)
1384 struct hclgevf_rss_cfg
*rss_cfg
= &hdev
->rss_cfg
;
1387 rss_cfg
->rss_size
= hdev
->rss_size_max
;
1389 /* Initialize RSS indirect table for each vport */
1390 for (i
= 0; i
< HCLGEVF_RSS_IND_TBL_SIZE
; i
++)
1391 rss_cfg
->rss_indirection_tbl
[i
] = i
% hdev
->rss_size_max
;
1393 ret
= hclgevf_set_rss_indir_table(hdev
);
1397 return hclgevf_set_rss_tc_mode(hdev
, hdev
->rss_size_max
);
1400 static int hclgevf_init_vlan_config(struct hclgevf_dev
*hdev
)
1402 /* other vlan config(like, VLAN TX/RX offload) would also be added
1405 return hclgevf_set_vlan_filter(&hdev
->nic
, htons(ETH_P_8021Q
), 0,
1409 static int hclgevf_ae_start(struct hnae3_handle
*handle
)
1411 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1414 for (i
= 0; i
< handle
->kinfo
.num_tqps
; i
++) {
1416 queue_id
= hclgevf_get_queue_id(handle
->kinfo
.tqp
[i
]);
1418 dev_warn(&hdev
->pdev
->dev
,
1419 "Get invalid queue id, ignore it\n");
1423 hclgevf_tqp_enable(hdev
, queue_id
, 0, true);
1426 /* reset tqp stats */
1427 hclgevf_reset_tqp_stats(handle
);
1429 hclgevf_request_link_info(hdev
);
1431 clear_bit(HCLGEVF_STATE_DOWN
, &hdev
->state
);
1432 mod_timer(&hdev
->service_timer
, jiffies
+ HZ
);
1437 static void hclgevf_ae_stop(struct hnae3_handle
*handle
)
1439 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1442 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
1444 queue_id
= hclgevf_get_queue_id(handle
->kinfo
.tqp
[i
]);
1446 dev_warn(&hdev
->pdev
->dev
,
1447 "Get invalid queue id, ignore it\n");
1451 hclgevf_tqp_enable(hdev
, queue_id
, 0, false);
1454 /* reset tqp stats */
1455 hclgevf_reset_tqp_stats(handle
);
1456 del_timer_sync(&hdev
->service_timer
);
1457 cancel_work_sync(&hdev
->service_task
);
1458 clear_bit(HCLGEVF_STATE_SERVICE_SCHED
, &hdev
->state
);
1459 hclgevf_update_link_status(hdev
, 0);
1462 static void hclgevf_state_init(struct hclgevf_dev
*hdev
)
1464 /* if this is on going reset then skip this initialization */
1465 if (hclgevf_dev_ongoing_reset(hdev
))
1468 /* setup tasks for the MBX */
1469 INIT_WORK(&hdev
->mbx_service_task
, hclgevf_mailbox_service_task
);
1470 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED
, &hdev
->state
);
1471 clear_bit(HCLGEVF_STATE_MBX_HANDLING
, &hdev
->state
);
1473 /* setup tasks for service timer */
1474 timer_setup(&hdev
->service_timer
, hclgevf_service_timer
, 0);
1476 INIT_WORK(&hdev
->service_task
, hclgevf_service_task
);
1477 clear_bit(HCLGEVF_STATE_SERVICE_SCHED
, &hdev
->state
);
1479 INIT_WORK(&hdev
->rst_service_task
, hclgevf_reset_service_task
);
1481 mutex_init(&hdev
->mbx_resp
.mbx_mutex
);
1483 /* bring the device down */
1484 set_bit(HCLGEVF_STATE_DOWN
, &hdev
->state
);
1487 static void hclgevf_state_uninit(struct hclgevf_dev
*hdev
)
1489 set_bit(HCLGEVF_STATE_DOWN
, &hdev
->state
);
1491 if (hdev
->service_timer
.function
)
1492 del_timer_sync(&hdev
->service_timer
);
1493 if (hdev
->service_task
.func
)
1494 cancel_work_sync(&hdev
->service_task
);
1495 if (hdev
->mbx_service_task
.func
)
1496 cancel_work_sync(&hdev
->mbx_service_task
);
1497 if (hdev
->rst_service_task
.func
)
1498 cancel_work_sync(&hdev
->rst_service_task
);
1500 mutex_destroy(&hdev
->mbx_resp
.mbx_mutex
);
1503 static int hclgevf_init_msi(struct hclgevf_dev
*hdev
)
1505 struct pci_dev
*pdev
= hdev
->pdev
;
1509 /* if this is on going reset then skip this initialization */
1510 if (hclgevf_dev_ongoing_reset(hdev
))
1513 hdev
->num_msi
= HCLGEVF_MAX_VF_VECTOR_NUM
;
1515 vectors
= pci_alloc_irq_vectors(pdev
, 1, hdev
->num_msi
,
1516 PCI_IRQ_MSI
| PCI_IRQ_MSIX
);
1519 "failed(%d) to allocate MSI/MSI-X vectors\n",
1523 if (vectors
< hdev
->num_msi
)
1524 dev_warn(&hdev
->pdev
->dev
,
1525 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1526 hdev
->num_msi
, vectors
);
1528 hdev
->num_msi
= vectors
;
1529 hdev
->num_msi_left
= vectors
;
1530 hdev
->base_msi_vector
= pdev
->irq
;
1532 hdev
->vector_status
= devm_kcalloc(&pdev
->dev
, hdev
->num_msi
,
1533 sizeof(u16
), GFP_KERNEL
);
1534 if (!hdev
->vector_status
) {
1535 pci_free_irq_vectors(pdev
);
1539 for (i
= 0; i
< hdev
->num_msi
; i
++)
1540 hdev
->vector_status
[i
] = HCLGEVF_INVALID_VPORT
;
1542 hdev
->vector_irq
= devm_kcalloc(&pdev
->dev
, hdev
->num_msi
,
1543 sizeof(int), GFP_KERNEL
);
1544 if (!hdev
->vector_irq
) {
1545 pci_free_irq_vectors(pdev
);
1552 static void hclgevf_uninit_msi(struct hclgevf_dev
*hdev
)
1554 struct pci_dev
*pdev
= hdev
->pdev
;
1556 pci_free_irq_vectors(pdev
);
1559 static int hclgevf_misc_irq_init(struct hclgevf_dev
*hdev
)
1563 /* if this is on going reset then skip this initialization */
1564 if (hclgevf_dev_ongoing_reset(hdev
))
1567 hclgevf_get_misc_vector(hdev
);
1569 ret
= request_irq(hdev
->misc_vector
.vector_irq
, hclgevf_misc_irq_handle
,
1570 0, "hclgevf_cmd", hdev
);
1572 dev_err(&hdev
->pdev
->dev
, "VF failed to request misc irq(%d)\n",
1573 hdev
->misc_vector
.vector_irq
);
1577 hclgevf_clear_event_cause(hdev
, 0);
1579 /* enable misc. vector(vector 0) */
1580 hclgevf_enable_vector(&hdev
->misc_vector
, true);
1585 static void hclgevf_misc_irq_uninit(struct hclgevf_dev
*hdev
)
1587 /* disable misc vector(vector 0) */
1588 hclgevf_enable_vector(&hdev
->misc_vector
, false);
1589 synchronize_irq(hdev
->misc_vector
.vector_irq
);
1590 free_irq(hdev
->misc_vector
.vector_irq
, hdev
);
1591 hclgevf_free_vector(hdev
, 0);
1594 static int hclgevf_init_instance(struct hclgevf_dev
*hdev
,
1595 struct hnae3_client
*client
)
1599 switch (client
->type
) {
1600 case HNAE3_CLIENT_KNIC
:
1601 hdev
->nic_client
= client
;
1602 hdev
->nic
.client
= client
;
1604 ret
= client
->ops
->init_instance(&hdev
->nic
);
1608 if (hdev
->roce_client
&& hnae3_dev_roce_supported(hdev
)) {
1609 struct hnae3_client
*rc
= hdev
->roce_client
;
1611 ret
= hclgevf_init_roce_base_info(hdev
);
1614 ret
= rc
->ops
->init_instance(&hdev
->roce
);
1619 case HNAE3_CLIENT_UNIC
:
1620 hdev
->nic_client
= client
;
1621 hdev
->nic
.client
= client
;
1623 ret
= client
->ops
->init_instance(&hdev
->nic
);
1627 case HNAE3_CLIENT_ROCE
:
1628 if (hnae3_dev_roce_supported(hdev
)) {
1629 hdev
->roce_client
= client
;
1630 hdev
->roce
.client
= client
;
1633 if (hdev
->roce_client
&& hdev
->nic_client
) {
1634 ret
= hclgevf_init_roce_base_info(hdev
);
1638 ret
= client
->ops
->init_instance(&hdev
->roce
);
1647 static void hclgevf_uninit_instance(struct hclgevf_dev
*hdev
,
1648 struct hnae3_client
*client
)
1650 /* un-init roce, if it exists */
1651 if (hdev
->roce_client
)
1652 hdev
->roce_client
->ops
->uninit_instance(&hdev
->roce
, 0);
1654 /* un-init nic/unic, if this was not called by roce client */
1655 if ((client
->ops
->uninit_instance
) &&
1656 (client
->type
!= HNAE3_CLIENT_ROCE
))
1657 client
->ops
->uninit_instance(&hdev
->nic
, 0);
1660 static int hclgevf_register_client(struct hnae3_client
*client
,
1661 struct hnae3_ae_dev
*ae_dev
)
1663 struct hclgevf_dev
*hdev
= ae_dev
->priv
;
1665 return hclgevf_init_instance(hdev
, client
);
1668 static void hclgevf_unregister_client(struct hnae3_client
*client
,
1669 struct hnae3_ae_dev
*ae_dev
)
1671 struct hclgevf_dev
*hdev
= ae_dev
->priv
;
1673 hclgevf_uninit_instance(hdev
, client
);
1676 static int hclgevf_pci_init(struct hclgevf_dev
*hdev
)
1678 struct pci_dev
*pdev
= hdev
->pdev
;
1679 struct hclgevf_hw
*hw
;
1682 /* check if we need to skip initialization of pci. This will happen if
1683 * device is undergoing VF reset. Otherwise, we would need to
1684 * re-initialize pci interface again i.e. when device is not going
1685 * through *any* reset or actually undergoing full reset.
1687 if (hclgevf_dev_ongoing_reset(hdev
))
1690 ret
= pci_enable_device(pdev
);
1692 dev_err(&pdev
->dev
, "failed to enable PCI device\n");
1696 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
1698 dev_err(&pdev
->dev
, "can't set consistent PCI DMA, exiting");
1699 goto err_disable_device
;
1702 ret
= pci_request_regions(pdev
, HCLGEVF_DRIVER_NAME
);
1704 dev_err(&pdev
->dev
, "PCI request regions failed %d\n", ret
);
1705 goto err_disable_device
;
1708 pci_set_master(pdev
);
1711 hw
->io_base
= pci_iomap(pdev
, 2, 0);
1713 dev_err(&pdev
->dev
, "can't map configuration register space\n");
1715 goto err_clr_master
;
1721 pci_clear_master(pdev
);
1722 pci_release_regions(pdev
);
1724 pci_disable_device(pdev
);
1729 static void hclgevf_pci_uninit(struct hclgevf_dev
*hdev
)
1731 struct pci_dev
*pdev
= hdev
->pdev
;
1733 pci_iounmap(pdev
, hdev
->hw
.io_base
);
1734 pci_clear_master(pdev
);
1735 pci_release_regions(pdev
);
1736 pci_disable_device(pdev
);
1739 static int hclgevf_init_hdev(struct hclgevf_dev
*hdev
)
1741 struct pci_dev
*pdev
= hdev
->pdev
;
1744 /* check if device is on-going full reset(i.e. pcie as well) */
1745 if (hclgevf_dev_ongoing_full_reset(hdev
)) {
1746 dev_warn(&pdev
->dev
, "device is going full reset\n");
1747 hclgevf_uninit_hdev(hdev
);
1750 ret
= hclgevf_pci_init(hdev
);
1752 dev_err(&pdev
->dev
, "PCI initialization failed\n");
1756 ret
= hclgevf_init_msi(hdev
);
1758 dev_err(&pdev
->dev
, "failed(%d) to init MSI/MSI-X\n", ret
);
1762 hclgevf_state_init(hdev
);
1764 ret
= hclgevf_cmd_init(hdev
);
1768 ret
= hclgevf_misc_irq_init(hdev
);
1770 dev_err(&pdev
->dev
, "failed(%d) to init Misc IRQ(vector0)\n",
1772 goto err_misc_irq_init
;
1775 ret
= hclgevf_configure(hdev
);
1777 dev_err(&pdev
->dev
, "failed(%d) to fetch configuration\n", ret
);
1781 ret
= hclgevf_alloc_tqps(hdev
);
1783 dev_err(&pdev
->dev
, "failed(%d) to allocate TQPs\n", ret
);
1787 ret
= hclgevf_set_handle_info(hdev
);
1789 dev_err(&pdev
->dev
, "failed(%d) to set handle info\n", ret
);
1793 /* Initialize mta type for this VF */
1794 ret
= hclgevf_cfg_func_mta_type(hdev
);
1796 dev_err(&hdev
->pdev
->dev
,
1797 "failed(%d) to initialize MTA type\n", ret
);
1801 /* Initialize RSS for this VF */
1802 ret
= hclgevf_rss_init_hw(hdev
);
1804 dev_err(&hdev
->pdev
->dev
,
1805 "failed(%d) to initialize RSS\n", ret
);
1809 ret
= hclgevf_init_vlan_config(hdev
);
1811 dev_err(&hdev
->pdev
->dev
,
1812 "failed(%d) to initialize VLAN config\n", ret
);
1816 pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME
);
1821 hclgevf_misc_irq_uninit(hdev
);
1823 hclgevf_cmd_uninit(hdev
);
1825 hclgevf_state_uninit(hdev
);
1826 hclgevf_uninit_msi(hdev
);
1828 hclgevf_pci_uninit(hdev
);
1832 static void hclgevf_uninit_hdev(struct hclgevf_dev
*hdev
)
1834 hclgevf_state_uninit(hdev
);
1835 hclgevf_misc_irq_uninit(hdev
);
1836 hclgevf_cmd_uninit(hdev
);
1837 hclgevf_uninit_msi(hdev
);
1838 hclgevf_pci_uninit(hdev
);
1841 static int hclgevf_init_ae_dev(struct hnae3_ae_dev
*ae_dev
)
1843 struct pci_dev
*pdev
= ae_dev
->pdev
;
1846 ret
= hclgevf_alloc_hdev(ae_dev
);
1848 dev_err(&pdev
->dev
, "hclge device allocation failed\n");
1852 ret
= hclgevf_init_hdev(ae_dev
->priv
);
1854 dev_err(&pdev
->dev
, "hclge device initialization failed\n");
1859 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev
*ae_dev
)
1861 struct hclgevf_dev
*hdev
= ae_dev
->priv
;
1863 hclgevf_uninit_hdev(hdev
);
1864 ae_dev
->priv
= NULL
;
1867 static u32
hclgevf_get_max_channels(struct hclgevf_dev
*hdev
)
1869 struct hnae3_handle
*nic
= &hdev
->nic
;
1870 struct hnae3_knic_private_info
*kinfo
= &nic
->kinfo
;
1872 return min_t(u32
, hdev
->rss_size_max
* kinfo
->num_tc
, hdev
->num_tqps
);
1876 * hclgevf_get_channels - Get the current channels enabled and max supported.
1877 * @handle: hardware information for network interface
1878 * @ch: ethtool channels structure
1880 * We don't support separate tx and rx queues as channels. The other count
1881 * represents how many queues are being used for control. max_combined counts
1882 * how many queue pairs we can support. They may not be mapped 1 to 1 with
1883 * q_vectors since we support a lot more queue pairs than q_vectors.
1885 static void hclgevf_get_channels(struct hnae3_handle
*handle
,
1886 struct ethtool_channels
*ch
)
1888 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1890 ch
->max_combined
= hclgevf_get_max_channels(hdev
);
1891 ch
->other_count
= 0;
1893 ch
->combined_count
= hdev
->num_tqps
;
1896 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle
*handle
,
1897 u16
*free_tqps
, u16
*max_rss_size
)
1899 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1902 *max_rss_size
= hdev
->rss_size_max
;
1905 static int hclgevf_get_status(struct hnae3_handle
*handle
)
1907 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1909 return hdev
->hw
.mac
.link
;
1912 static void hclgevf_get_ksettings_an_result(struct hnae3_handle
*handle
,
1913 u8
*auto_neg
, u32
*speed
,
1916 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1919 *speed
= hdev
->hw
.mac
.speed
;
1921 *duplex
= hdev
->hw
.mac
.duplex
;
1923 *auto_neg
= AUTONEG_DISABLE
;
1926 void hclgevf_update_speed_duplex(struct hclgevf_dev
*hdev
, u32 speed
,
1929 hdev
->hw
.mac
.speed
= speed
;
1930 hdev
->hw
.mac
.duplex
= duplex
;
1933 static const struct hnae3_ae_ops hclgevf_ops
= {
1934 .init_ae_dev
= hclgevf_init_ae_dev
,
1935 .uninit_ae_dev
= hclgevf_uninit_ae_dev
,
1936 .init_client_instance
= hclgevf_register_client
,
1937 .uninit_client_instance
= hclgevf_unregister_client
,
1938 .start
= hclgevf_ae_start
,
1939 .stop
= hclgevf_ae_stop
,
1940 .map_ring_to_vector
= hclgevf_map_ring_to_vector
,
1941 .unmap_ring_from_vector
= hclgevf_unmap_ring_from_vector
,
1942 .get_vector
= hclgevf_get_vector
,
1943 .put_vector
= hclgevf_put_vector
,
1944 .reset_queue
= hclgevf_reset_tqp
,
1945 .set_promisc_mode
= hclgevf_set_promisc_mode
,
1946 .get_mac_addr
= hclgevf_get_mac_addr
,
1947 .set_mac_addr
= hclgevf_set_mac_addr
,
1948 .add_uc_addr
= hclgevf_add_uc_addr
,
1949 .rm_uc_addr
= hclgevf_rm_uc_addr
,
1950 .add_mc_addr
= hclgevf_add_mc_addr
,
1951 .rm_mc_addr
= hclgevf_rm_mc_addr
,
1952 .update_mta_status
= hclgevf_update_mta_status
,
1953 .get_stats
= hclgevf_get_stats
,
1954 .update_stats
= hclgevf_update_stats
,
1955 .get_strings
= hclgevf_get_strings
,
1956 .get_sset_count
= hclgevf_get_sset_count
,
1957 .get_rss_key_size
= hclgevf_get_rss_key_size
,
1958 .get_rss_indir_size
= hclgevf_get_rss_indir_size
,
1959 .get_rss
= hclgevf_get_rss
,
1960 .set_rss
= hclgevf_set_rss
,
1961 .get_tc_size
= hclgevf_get_tc_size
,
1962 .get_fw_version
= hclgevf_get_fw_version
,
1963 .set_vlan_filter
= hclgevf_set_vlan_filter
,
1964 .enable_hw_strip_rxvtag
= hclgevf_en_hw_strip_rxvtag
,
1965 .reset_event
= hclgevf_reset_event
,
1966 .get_channels
= hclgevf_get_channels
,
1967 .get_tqps_and_rss_info
= hclgevf_get_tqps_and_rss_info
,
1968 .get_status
= hclgevf_get_status
,
1969 .get_ksettings_an_result
= hclgevf_get_ksettings_an_result
,
1972 static struct hnae3_ae_algo ae_algovf
= {
1973 .ops
= &hclgevf_ops
,
1974 .name
= HCLGEVF_NAME
,
1975 .pdev_id_table
= ae_algovf_pci_tbl
,
1978 static int hclgevf_init(void)
1980 pr_info("%s is initializing\n", HCLGEVF_NAME
);
1982 hnae3_register_ae_algo(&ae_algovf
);
1987 static void hclgevf_exit(void)
1989 hnae3_unregister_ae_algo(&ae_algovf
);
1991 module_init(hclgevf_init
);
1992 module_exit(hclgevf_exit
);
1994 MODULE_LICENSE("GPL");
1995 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
1996 MODULE_DESCRIPTION("HCLGEVF Driver");
1997 MODULE_VERSION(HCLGEVF_MOD_VERSION
);