1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/etherdevice.h>
5 #include <net/rtnetlink.h>
6 #include "hclgevf_cmd.h"
7 #include "hclgevf_main.h"
11 #define HCLGEVF_NAME "hclgevf"
13 static int hclgevf_init_hdev(struct hclgevf_dev
*hdev
);
14 static void hclgevf_uninit_hdev(struct hclgevf_dev
*hdev
);
15 static struct hnae3_ae_algo ae_algovf
;
17 static const struct pci_device_id ae_algovf_pci_tbl
[] = {
18 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_VF
), 0},
19 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF
), 0},
20 /* required last entry */
24 MODULE_DEVICE_TABLE(pci
, ae_algovf_pci_tbl
);
26 static inline struct hclgevf_dev
*hclgevf_ae_get_hdev(
27 struct hnae3_handle
*handle
)
29 return container_of(handle
, struct hclgevf_dev
, nic
);
32 static int hclgevf_tqps_update_stats(struct hnae3_handle
*handle
)
34 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
35 struct hnae3_queue
*queue
;
36 struct hclgevf_desc desc
;
37 struct hclgevf_tqp
*tqp
;
41 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
42 queue
= handle
->kinfo
.tqp
[i
];
43 tqp
= container_of(queue
, struct hclgevf_tqp
, q
);
44 hclgevf_cmd_setup_basic_desc(&desc
,
45 HCLGEVF_OPC_QUERY_RX_STATUS
,
48 desc
.data
[0] = cpu_to_le32(tqp
->index
& 0x1ff);
49 status
= hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
51 dev_err(&hdev
->pdev
->dev
,
52 "Query tqp stat fail, status = %d,queue = %d\n",
56 tqp
->tqp_stats
.rcb_rx_ring_pktnum_rcd
+=
57 le32_to_cpu(desc
.data
[1]);
59 hclgevf_cmd_setup_basic_desc(&desc
, HCLGEVF_OPC_QUERY_TX_STATUS
,
62 desc
.data
[0] = cpu_to_le32(tqp
->index
& 0x1ff);
63 status
= hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
65 dev_err(&hdev
->pdev
->dev
,
66 "Query tqp stat fail, status = %d,queue = %d\n",
70 tqp
->tqp_stats
.rcb_tx_ring_pktnum_rcd
+=
71 le32_to_cpu(desc
.data
[1]);
77 static u64
*hclgevf_tqps_get_stats(struct hnae3_handle
*handle
, u64
*data
)
79 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
80 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
81 struct hclgevf_tqp
*tqp
;
85 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
86 tqp
= container_of(handle
->kinfo
.tqp
[i
], struct hclgevf_tqp
, q
);
87 *buff
++ = tqp
->tqp_stats
.rcb_tx_ring_pktnum_rcd
;
89 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
90 tqp
= container_of(handle
->kinfo
.tqp
[i
], struct hclgevf_tqp
, q
);
91 *buff
++ = tqp
->tqp_stats
.rcb_rx_ring_pktnum_rcd
;
97 static int hclgevf_tqps_get_sset_count(struct hnae3_handle
*handle
, int strset
)
99 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
101 return hdev
->num_tqps
* 2;
104 static u8
*hclgevf_tqps_get_strings(struct hnae3_handle
*handle
, u8
*data
)
106 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
110 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
111 struct hclgevf_tqp
*tqp
= container_of(handle
->kinfo
.tqp
[i
],
112 struct hclgevf_tqp
, q
);
113 snprintf(buff
, ETH_GSTRING_LEN
, "txq#%d_pktnum_rcd",
115 buff
+= ETH_GSTRING_LEN
;
118 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
119 struct hclgevf_tqp
*tqp
= container_of(handle
->kinfo
.tqp
[i
],
120 struct hclgevf_tqp
, q
);
121 snprintf(buff
, ETH_GSTRING_LEN
, "rxq#%d_pktnum_rcd",
123 buff
+= ETH_GSTRING_LEN
;
129 static void hclgevf_update_stats(struct hnae3_handle
*handle
,
130 struct net_device_stats
*net_stats
)
132 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
135 status
= hclgevf_tqps_update_stats(handle
);
137 dev_err(&hdev
->pdev
->dev
,
138 "VF update of TQPS stats fail, status = %d.\n",
142 static int hclgevf_get_sset_count(struct hnae3_handle
*handle
, int strset
)
144 if (strset
== ETH_SS_TEST
)
146 else if (strset
== ETH_SS_STATS
)
147 return hclgevf_tqps_get_sset_count(handle
, strset
);
152 static void hclgevf_get_strings(struct hnae3_handle
*handle
, u32 strset
,
155 u8
*p
= (char *)data
;
157 if (strset
== ETH_SS_STATS
)
158 p
= hclgevf_tqps_get_strings(handle
, p
);
161 static void hclgevf_get_stats(struct hnae3_handle
*handle
, u64
*data
)
163 hclgevf_tqps_get_stats(handle
, data
);
166 static int hclgevf_get_tc_info(struct hclgevf_dev
*hdev
)
171 status
= hclgevf_send_mbx_msg(hdev
, HCLGE_MBX_GET_TCINFO
, 0, NULL
, 0,
172 true, &resp_msg
, sizeof(u8
));
174 dev_err(&hdev
->pdev
->dev
,
175 "VF request to get TC info from PF failed %d",
180 hdev
->hw_tc_map
= resp_msg
;
185 static int hclge_get_queue_info(struct hclgevf_dev
*hdev
)
187 #define HCLGEVF_TQPS_RSS_INFO_LEN 8
188 u8 resp_msg
[HCLGEVF_TQPS_RSS_INFO_LEN
];
191 status
= hclgevf_send_mbx_msg(hdev
, HCLGE_MBX_GET_QINFO
, 0, NULL
, 0,
193 HCLGEVF_TQPS_RSS_INFO_LEN
);
195 dev_err(&hdev
->pdev
->dev
,
196 "VF request to get tqp info from PF failed %d",
201 memcpy(&hdev
->num_tqps
, &resp_msg
[0], sizeof(u16
));
202 memcpy(&hdev
->rss_size_max
, &resp_msg
[2], sizeof(u16
));
203 memcpy(&hdev
->num_desc
, &resp_msg
[4], sizeof(u16
));
204 memcpy(&hdev
->rx_buf_len
, &resp_msg
[6], sizeof(u16
));
209 static int hclgevf_alloc_tqps(struct hclgevf_dev
*hdev
)
211 struct hclgevf_tqp
*tqp
;
214 /* if this is on going reset then we need to re-allocate the TPQs
215 * since we cannot assume we would get same number of TPQs back from PF
217 if (hclgevf_dev_ongoing_reset(hdev
))
218 devm_kfree(&hdev
->pdev
->dev
, hdev
->htqp
);
220 hdev
->htqp
= devm_kcalloc(&hdev
->pdev
->dev
, hdev
->num_tqps
,
221 sizeof(struct hclgevf_tqp
), GFP_KERNEL
);
227 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
228 tqp
->dev
= &hdev
->pdev
->dev
;
231 tqp
->q
.ae_algo
= &ae_algovf
;
232 tqp
->q
.buf_size
= hdev
->rx_buf_len
;
233 tqp
->q
.desc_num
= hdev
->num_desc
;
234 tqp
->q
.io_base
= hdev
->hw
.io_base
+ HCLGEVF_TQP_REG_OFFSET
+
235 i
* HCLGEVF_TQP_REG_SIZE
;
243 static int hclgevf_knic_setup(struct hclgevf_dev
*hdev
)
245 struct hnae3_handle
*nic
= &hdev
->nic
;
246 struct hnae3_knic_private_info
*kinfo
;
247 u16 new_tqps
= hdev
->num_tqps
;
252 kinfo
->num_desc
= hdev
->num_desc
;
253 kinfo
->rx_buf_len
= hdev
->rx_buf_len
;
254 for (i
= 0; i
< HCLGEVF_MAX_TC_NUM
; i
++)
255 if (hdev
->hw_tc_map
& BIT(i
))
259 = min_t(u16
, hdev
->rss_size_max
, new_tqps
/ kinfo
->num_tc
);
260 new_tqps
= kinfo
->rss_size
* kinfo
->num_tc
;
261 kinfo
->num_tqps
= min(new_tqps
, hdev
->num_tqps
);
263 /* if this is on going reset then we need to re-allocate the hnae queues
264 * as well since number of TPQs from PF might have changed.
266 if (hclgevf_dev_ongoing_reset(hdev
))
267 devm_kfree(&hdev
->pdev
->dev
, kinfo
->tqp
);
269 kinfo
->tqp
= devm_kcalloc(&hdev
->pdev
->dev
, kinfo
->num_tqps
,
270 sizeof(struct hnae3_queue
*), GFP_KERNEL
);
274 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
275 hdev
->htqp
[i
].q
.handle
= &hdev
->nic
;
276 hdev
->htqp
[i
].q
.tqp_index
= i
;
277 kinfo
->tqp
[i
] = &hdev
->htqp
[i
].q
;
283 static void hclgevf_request_link_info(struct hclgevf_dev
*hdev
)
288 status
= hclgevf_send_mbx_msg(hdev
, HCLGE_MBX_GET_LINK_STATUS
, 0, NULL
,
289 0, false, &resp_msg
, sizeof(u8
));
291 dev_err(&hdev
->pdev
->dev
,
292 "VF failed to fetch link status(%d) from PF", status
);
295 void hclgevf_update_link_status(struct hclgevf_dev
*hdev
, int link_state
)
297 struct hnae3_handle
*rhandle
= &hdev
->roce
;
298 struct hnae3_handle
*handle
= &hdev
->nic
;
299 struct hnae3_client
*rclient
;
300 struct hnae3_client
*client
;
302 client
= handle
->client
;
303 rclient
= hdev
->roce_client
;
305 if (link_state
!= hdev
->hw
.mac
.link
) {
306 client
->ops
->link_status_change(handle
, !!link_state
);
307 if (rclient
&& rclient
->ops
->link_status_change
)
308 rclient
->ops
->link_status_change(rhandle
, !!link_state
);
309 hdev
->hw
.mac
.link
= link_state
;
313 static int hclgevf_set_handle_info(struct hclgevf_dev
*hdev
)
315 struct hnae3_handle
*nic
= &hdev
->nic
;
318 nic
->ae_algo
= &ae_algovf
;
319 nic
->pdev
= hdev
->pdev
;
320 nic
->numa_node_mask
= hdev
->numa_node_mask
;
321 nic
->flags
|= HNAE3_SUPPORT_VF
;
323 if (hdev
->ae_dev
->dev_type
!= HNAE3_DEV_KNIC
) {
324 dev_err(&hdev
->pdev
->dev
, "unsupported device type %d\n",
325 hdev
->ae_dev
->dev_type
);
329 ret
= hclgevf_knic_setup(hdev
);
331 dev_err(&hdev
->pdev
->dev
, "VF knic setup failed %d\n",
336 static void hclgevf_free_vector(struct hclgevf_dev
*hdev
, int vector_id
)
338 if (hdev
->vector_status
[vector_id
] == HCLGEVF_INVALID_VPORT
) {
339 dev_warn(&hdev
->pdev
->dev
,
340 "vector(vector_id %d) has been freed.\n", vector_id
);
344 hdev
->vector_status
[vector_id
] = HCLGEVF_INVALID_VPORT
;
345 hdev
->num_msi_left
+= 1;
346 hdev
->num_msi_used
-= 1;
349 static int hclgevf_get_vector(struct hnae3_handle
*handle
, u16 vector_num
,
350 struct hnae3_vector_info
*vector_info
)
352 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
353 struct hnae3_vector_info
*vector
= vector_info
;
357 vector_num
= min(hdev
->num_msi_left
, vector_num
);
359 for (j
= 0; j
< vector_num
; j
++) {
360 for (i
= HCLGEVF_MISC_VECTOR_NUM
+ 1; i
< hdev
->num_msi
; i
++) {
361 if (hdev
->vector_status
[i
] == HCLGEVF_INVALID_VPORT
) {
362 vector
->vector
= pci_irq_vector(hdev
->pdev
, i
);
363 vector
->io_addr
= hdev
->hw
.io_base
+
364 HCLGEVF_VECTOR_REG_BASE
+
365 (i
- 1) * HCLGEVF_VECTOR_REG_OFFSET
;
366 hdev
->vector_status
[i
] = 0;
367 hdev
->vector_irq
[i
] = vector
->vector
;
376 hdev
->num_msi_left
-= alloc
;
377 hdev
->num_msi_used
+= alloc
;
382 static int hclgevf_get_vector_index(struct hclgevf_dev
*hdev
, int vector
)
386 for (i
= 0; i
< hdev
->num_msi
; i
++)
387 if (vector
== hdev
->vector_irq
[i
])
393 static u32
hclgevf_get_rss_key_size(struct hnae3_handle
*handle
)
395 return HCLGEVF_RSS_KEY_SIZE
;
398 static u32
hclgevf_get_rss_indir_size(struct hnae3_handle
*handle
)
400 return HCLGEVF_RSS_IND_TBL_SIZE
;
403 static int hclgevf_set_rss_indir_table(struct hclgevf_dev
*hdev
)
405 const u8
*indir
= hdev
->rss_cfg
.rss_indirection_tbl
;
406 struct hclgevf_rss_indirection_table_cmd
*req
;
407 struct hclgevf_desc desc
;
411 req
= (struct hclgevf_rss_indirection_table_cmd
*)desc
.data
;
413 for (i
= 0; i
< HCLGEVF_RSS_CFG_TBL_NUM
; i
++) {
414 hclgevf_cmd_setup_basic_desc(&desc
, HCLGEVF_OPC_RSS_INDIR_TABLE
,
416 req
->start_table_index
= i
* HCLGEVF_RSS_CFG_TBL_SIZE
;
417 req
->rss_set_bitmap
= HCLGEVF_RSS_SET_BITMAP_MSK
;
418 for (j
= 0; j
< HCLGEVF_RSS_CFG_TBL_SIZE
; j
++)
420 indir
[i
* HCLGEVF_RSS_CFG_TBL_SIZE
+ j
];
422 status
= hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
424 dev_err(&hdev
->pdev
->dev
,
425 "VF failed(=%d) to set RSS indirection table\n",
434 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev
*hdev
, u16 rss_size
)
436 struct hclgevf_rss_tc_mode_cmd
*req
;
437 u16 tc_offset
[HCLGEVF_MAX_TC_NUM
];
438 u16 tc_valid
[HCLGEVF_MAX_TC_NUM
];
439 u16 tc_size
[HCLGEVF_MAX_TC_NUM
];
440 struct hclgevf_desc desc
;
445 req
= (struct hclgevf_rss_tc_mode_cmd
*)desc
.data
;
447 roundup_size
= roundup_pow_of_two(rss_size
);
448 roundup_size
= ilog2(roundup_size
);
450 for (i
= 0; i
< HCLGEVF_MAX_TC_NUM
; i
++) {
451 tc_valid
[i
] = !!(hdev
->hw_tc_map
& BIT(i
));
452 tc_size
[i
] = roundup_size
;
453 tc_offset
[i
] = rss_size
* i
;
456 hclgevf_cmd_setup_basic_desc(&desc
, HCLGEVF_OPC_RSS_TC_MODE
, false);
457 for (i
= 0; i
< HCLGEVF_MAX_TC_NUM
; i
++) {
458 hnae3_set_bit(req
->rss_tc_mode
[i
], HCLGEVF_RSS_TC_VALID_B
,
459 (tc_valid
[i
] & 0x1));
460 hnae3_set_field(req
->rss_tc_mode
[i
], HCLGEVF_RSS_TC_SIZE_M
,
461 HCLGEVF_RSS_TC_SIZE_S
, tc_size
[i
]);
462 hnae3_set_field(req
->rss_tc_mode
[i
], HCLGEVF_RSS_TC_OFFSET_M
,
463 HCLGEVF_RSS_TC_OFFSET_S
, tc_offset
[i
]);
465 status
= hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
467 dev_err(&hdev
->pdev
->dev
,
468 "VF failed(=%d) to set rss tc mode\n", status
);
473 static int hclgevf_get_rss_hw_cfg(struct hnae3_handle
*handle
, u8
*hash
,
476 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
477 struct hclgevf_rss_config_cmd
*req
;
478 int lkup_times
= key
? 3 : 1;
479 struct hclgevf_desc desc
;
484 req
= (struct hclgevf_rss_config_cmd
*)desc
.data
;
485 lkup_times
= (lkup_times
== 3) ? 3 : ((hash
) ? 1 : 0);
487 for (key_offset
= 0; key_offset
< lkup_times
; key_offset
++) {
488 hclgevf_cmd_setup_basic_desc(&desc
,
489 HCLGEVF_OPC_RSS_GENERIC_CONFIG
,
491 req
->hash_config
|= (key_offset
<< HCLGEVF_RSS_HASH_KEY_OFFSET
);
493 status
= hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
495 dev_err(&hdev
->pdev
->dev
,
496 "failed to get hardware RSS cfg, status = %d\n",
503 HCLGEVF_RSS_KEY_SIZE
- HCLGEVF_RSS_HASH_KEY_NUM
* 2;
505 key_size
= HCLGEVF_RSS_HASH_KEY_NUM
;
508 memcpy(key
+ key_offset
* HCLGEVF_RSS_HASH_KEY_NUM
,
514 if ((req
->hash_config
& 0xf) == HCLGEVF_RSS_HASH_ALGO_TOEPLITZ
)
515 *hash
= ETH_RSS_HASH_TOP
;
517 *hash
= ETH_RSS_HASH_UNKNOWN
;
523 static int hclgevf_get_rss(struct hnae3_handle
*handle
, u32
*indir
, u8
*key
,
526 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
527 struct hclgevf_rss_cfg
*rss_cfg
= &hdev
->rss_cfg
;
531 for (i
= 0; i
< HCLGEVF_RSS_IND_TBL_SIZE
; i
++)
532 indir
[i
] = rss_cfg
->rss_indirection_tbl
[i
];
534 return hclgevf_get_rss_hw_cfg(handle
, hfunc
, key
);
537 static int hclgevf_set_rss(struct hnae3_handle
*handle
, const u32
*indir
,
538 const u8
*key
, const u8 hfunc
)
540 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
541 struct hclgevf_rss_cfg
*rss_cfg
= &hdev
->rss_cfg
;
544 /* update the shadow RSS table with user specified qids */
545 for (i
= 0; i
< HCLGEVF_RSS_IND_TBL_SIZE
; i
++)
546 rss_cfg
->rss_indirection_tbl
[i
] = indir
[i
];
548 /* update the hardware */
549 return hclgevf_set_rss_indir_table(hdev
);
552 static int hclgevf_get_tc_size(struct hnae3_handle
*handle
)
554 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
555 struct hclgevf_rss_cfg
*rss_cfg
= &hdev
->rss_cfg
;
557 return rss_cfg
->rss_size
;
560 static int hclgevf_bind_ring_to_vector(struct hnae3_handle
*handle
, bool en
,
562 struct hnae3_ring_chain_node
*ring_chain
)
564 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
565 struct hnae3_ring_chain_node
*node
;
566 struct hclge_mbx_vf_to_pf_cmd
*req
;
567 struct hclgevf_desc desc
;
572 req
= (struct hclge_mbx_vf_to_pf_cmd
*)desc
.data
;
574 for (node
= ring_chain
; node
; node
= node
->next
) {
575 int idx_offset
= HCLGE_MBX_RING_MAP_BASIC_MSG_NUM
+
576 HCLGE_MBX_RING_NODE_VARIABLE_NUM
* i
;
579 hclgevf_cmd_setup_basic_desc(&desc
,
580 HCLGEVF_OPC_MBX_VF_TO_PF
,
583 HCLGE_MBX_MAP_RING_TO_VECTOR
:
584 HCLGE_MBX_UNMAP_RING_TO_VECTOR
;
586 req
->msg
[1] = vector_id
;
589 req
->msg
[idx_offset
] =
590 hnae3_get_bit(node
->flag
, HNAE3_RING_TYPE_B
);
591 req
->msg
[idx_offset
+ 1] = node
->tqp_index
;
592 req
->msg
[idx_offset
+ 2] = hnae3_get_field(node
->int_gl_idx
,
594 HNAE3_RING_GL_IDX_S
);
597 if ((i
== (HCLGE_MBX_VF_MSG_DATA_NUM
-
598 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM
) /
599 HCLGE_MBX_RING_NODE_VARIABLE_NUM
) ||
603 status
= hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
605 dev_err(&hdev
->pdev
->dev
,
606 "Map TQP fail, status is %d.\n",
611 hclgevf_cmd_setup_basic_desc(&desc
,
612 HCLGEVF_OPC_MBX_VF_TO_PF
,
615 req
->msg
[1] = vector_id
;
622 static int hclgevf_map_ring_to_vector(struct hnae3_handle
*handle
, int vector
,
623 struct hnae3_ring_chain_node
*ring_chain
)
625 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
628 vector_id
= hclgevf_get_vector_index(hdev
, vector
);
630 dev_err(&handle
->pdev
->dev
,
631 "Get vector index fail. ret =%d\n", vector_id
);
635 return hclgevf_bind_ring_to_vector(handle
, true, vector_id
, ring_chain
);
638 static int hclgevf_unmap_ring_from_vector(
639 struct hnae3_handle
*handle
,
641 struct hnae3_ring_chain_node
*ring_chain
)
643 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
646 vector_id
= hclgevf_get_vector_index(hdev
, vector
);
648 dev_err(&handle
->pdev
->dev
,
649 "Get vector index fail. ret =%d\n", vector_id
);
653 ret
= hclgevf_bind_ring_to_vector(handle
, false, vector_id
, ring_chain
);
655 dev_err(&handle
->pdev
->dev
,
656 "Unmap ring from vector fail. vector=%d, ret =%d\n",
663 static int hclgevf_put_vector(struct hnae3_handle
*handle
, int vector
)
665 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
668 vector_id
= hclgevf_get_vector_index(hdev
, vector
);
670 dev_err(&handle
->pdev
->dev
,
671 "hclgevf_put_vector get vector index fail. ret =%d\n",
676 hclgevf_free_vector(hdev
, vector_id
);
681 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev
*hdev
,
682 bool en_uc_pmc
, bool en_mc_pmc
)
684 struct hclge_mbx_vf_to_pf_cmd
*req
;
685 struct hclgevf_desc desc
;
688 req
= (struct hclge_mbx_vf_to_pf_cmd
*)desc
.data
;
690 hclgevf_cmd_setup_basic_desc(&desc
, HCLGEVF_OPC_MBX_VF_TO_PF
, false);
691 req
->msg
[0] = HCLGE_MBX_SET_PROMISC_MODE
;
692 req
->msg
[1] = en_uc_pmc
? 1 : 0;
693 req
->msg
[2] = en_mc_pmc
? 1 : 0;
695 status
= hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
697 dev_err(&hdev
->pdev
->dev
,
698 "Set promisc mode fail, status is %d.\n", status
);
703 static void hclgevf_set_promisc_mode(struct hnae3_handle
*handle
,
704 bool en_uc_pmc
, bool en_mc_pmc
)
706 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
708 hclgevf_cmd_set_promisc_mode(hdev
, en_uc_pmc
, en_mc_pmc
);
711 static int hclgevf_tqp_enable(struct hclgevf_dev
*hdev
, int tqp_id
,
712 int stream_id
, bool enable
)
714 struct hclgevf_cfg_com_tqp_queue_cmd
*req
;
715 struct hclgevf_desc desc
;
718 req
= (struct hclgevf_cfg_com_tqp_queue_cmd
*)desc
.data
;
720 hclgevf_cmd_setup_basic_desc(&desc
, HCLGEVF_OPC_CFG_COM_TQP_QUEUE
,
722 req
->tqp_id
= cpu_to_le16(tqp_id
& HCLGEVF_RING_ID_MASK
);
723 req
->stream_id
= cpu_to_le16(stream_id
);
724 req
->enable
|= enable
<< HCLGEVF_TQP_ENABLE_B
;
726 status
= hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
728 dev_err(&hdev
->pdev
->dev
,
729 "TQP enable fail, status =%d.\n", status
);
734 static int hclgevf_get_queue_id(struct hnae3_queue
*queue
)
736 struct hclgevf_tqp
*tqp
= container_of(queue
, struct hclgevf_tqp
, q
);
741 static void hclgevf_reset_tqp_stats(struct hnae3_handle
*handle
)
743 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
744 struct hnae3_queue
*queue
;
745 struct hclgevf_tqp
*tqp
;
748 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
749 queue
= handle
->kinfo
.tqp
[i
];
750 tqp
= container_of(queue
, struct hclgevf_tqp
, q
);
751 memset(&tqp
->tqp_stats
, 0, sizeof(tqp
->tqp_stats
));
755 static int hclgevf_cfg_func_mta_type(struct hclgevf_dev
*hdev
)
757 u8 resp_msg
= HCLGEVF_MTA_TYPE_SEL_MAX
;
760 ret
= hclgevf_send_mbx_msg(hdev
, HCLGE_MBX_SET_MULTICAST
,
761 HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ
,
762 NULL
, 0, true, &resp_msg
, sizeof(u8
));
765 dev_err(&hdev
->pdev
->dev
,
766 "Read mta type fail, ret=%d.\n", ret
);
770 if (resp_msg
> HCLGEVF_MTA_TYPE_SEL_MAX
) {
771 dev_err(&hdev
->pdev
->dev
,
772 "Read mta type invalid, resp=%d.\n", resp_msg
);
776 hdev
->mta_mac_sel_type
= resp_msg
;
781 static u16
hclgevf_get_mac_addr_to_mta_index(struct hclgevf_dev
*hdev
,
784 u32 rsh
= HCLGEVF_MTA_TYPE_SEL_MAX
- hdev
->mta_mac_sel_type
;
785 u16 high_val
= addr
[1] | (addr
[0] << 8);
787 return (high_val
>> rsh
) & 0xfff;
790 static int hclgevf_do_update_mta_status(struct hclgevf_dev
*hdev
,
791 unsigned long *status
)
793 #define HCLGEVF_MTA_STATUS_MSG_SIZE 13
794 #define HCLGEVF_MTA_STATUS_MSG_BITS \
795 (HCLGEVF_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE)
796 #define HCLGEVF_MTA_STATUS_MSG_END_BITS \
797 (HCLGEVF_MTA_TBL_SIZE % HCLGEVF_MTA_STATUS_MSG_BITS)
804 msg_cnt
= DIV_ROUND_UP(HCLGEVF_MTA_TBL_SIZE
,
805 HCLGEVF_MTA_STATUS_MSG_BITS
);
809 u8 msg
[HCLGEVF_MTA_STATUS_MSG_SIZE
+ 1];
814 memset(msg
, 0, sizeof(msg
));
816 /* set index field */
817 msg
[0] = 0x7F & msg_idx
;
819 /* set end flag field */
822 tbl_cnt
= HCLGEVF_MTA_STATUS_MSG_END_BITS
;
824 tbl_cnt
= HCLGEVF_MTA_STATUS_MSG_BITS
;
827 /* set status field */
831 if (test_bit(tbl_idx
, status
))
832 p
[msg_ofs
] |= BIT(msg_bit
);
837 if (msg_bit
== BITS_PER_BYTE
) {
843 ret
= hclgevf_send_mbx_msg(hdev
, HCLGE_MBX_SET_MULTICAST
,
844 HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE
,
845 msg
, sizeof(msg
), false, NULL
, 0);
855 static int hclgevf_update_mta_status(struct hnae3_handle
*handle
)
857 unsigned long mta_status
[BITS_TO_LONGS(HCLGEVF_MTA_TBL_SIZE
)];
858 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
859 struct net_device
*netdev
= hdev
->nic
.kinfo
.netdev
;
860 struct netdev_hw_addr
*ha
;
864 memset(mta_status
, 0, sizeof(mta_status
));
866 /* update status from mc addr list */
867 netdev_for_each_mc_addr(ha
, netdev
) {
868 tbl_idx
= hclgevf_get_mac_addr_to_mta_index(hdev
, ha
->addr
);
869 set_bit(tbl_idx
, mta_status
);
872 return hclgevf_do_update_mta_status(hdev
, mta_status
);
875 static void hclgevf_get_mac_addr(struct hnae3_handle
*handle
, u8
*p
)
877 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
879 ether_addr_copy(p
, hdev
->hw
.mac
.mac_addr
);
882 static int hclgevf_set_mac_addr(struct hnae3_handle
*handle
, void *p
,
885 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
886 u8
*old_mac_addr
= (u8
*)hdev
->hw
.mac
.mac_addr
;
887 u8
*new_mac_addr
= (u8
*)p
;
888 u8 msg_data
[ETH_ALEN
* 2];
892 ether_addr_copy(msg_data
, new_mac_addr
);
893 ether_addr_copy(&msg_data
[ETH_ALEN
], old_mac_addr
);
895 subcode
= is_first
? HCLGE_MBX_MAC_VLAN_UC_ADD
:
896 HCLGE_MBX_MAC_VLAN_UC_MODIFY
;
898 status
= hclgevf_send_mbx_msg(hdev
, HCLGE_MBX_SET_UNICAST
,
899 subcode
, msg_data
, ETH_ALEN
* 2,
902 ether_addr_copy(hdev
->hw
.mac
.mac_addr
, new_mac_addr
);
907 static int hclgevf_add_uc_addr(struct hnae3_handle
*handle
,
908 const unsigned char *addr
)
910 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
912 return hclgevf_send_mbx_msg(hdev
, HCLGE_MBX_SET_UNICAST
,
913 HCLGE_MBX_MAC_VLAN_UC_ADD
,
914 addr
, ETH_ALEN
, false, NULL
, 0);
917 static int hclgevf_rm_uc_addr(struct hnae3_handle
*handle
,
918 const unsigned char *addr
)
920 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
922 return hclgevf_send_mbx_msg(hdev
, HCLGE_MBX_SET_UNICAST
,
923 HCLGE_MBX_MAC_VLAN_UC_REMOVE
,
924 addr
, ETH_ALEN
, false, NULL
, 0);
927 static int hclgevf_add_mc_addr(struct hnae3_handle
*handle
,
928 const unsigned char *addr
)
930 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
932 return hclgevf_send_mbx_msg(hdev
, HCLGE_MBX_SET_MULTICAST
,
933 HCLGE_MBX_MAC_VLAN_MC_ADD
,
934 addr
, ETH_ALEN
, false, NULL
, 0);
937 static int hclgevf_rm_mc_addr(struct hnae3_handle
*handle
,
938 const unsigned char *addr
)
940 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
942 return hclgevf_send_mbx_msg(hdev
, HCLGE_MBX_SET_MULTICAST
,
943 HCLGE_MBX_MAC_VLAN_MC_REMOVE
,
944 addr
, ETH_ALEN
, false, NULL
, 0);
947 static int hclgevf_set_vlan_filter(struct hnae3_handle
*handle
,
948 __be16 proto
, u16 vlan_id
,
951 #define HCLGEVF_VLAN_MBX_MSG_LEN 5
952 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
953 u8 msg_data
[HCLGEVF_VLAN_MBX_MSG_LEN
];
958 if (proto
!= htons(ETH_P_8021Q
))
959 return -EPROTONOSUPPORT
;
961 msg_data
[0] = is_kill
;
962 memcpy(&msg_data
[1], &vlan_id
, sizeof(vlan_id
));
963 memcpy(&msg_data
[3], &proto
, sizeof(proto
));
964 return hclgevf_send_mbx_msg(hdev
, HCLGE_MBX_SET_VLAN
,
965 HCLGE_MBX_VLAN_FILTER
, msg_data
,
966 HCLGEVF_VLAN_MBX_MSG_LEN
, false, NULL
, 0);
969 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle
*handle
, bool enable
)
971 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
974 msg_data
= enable
? 1 : 0;
975 return hclgevf_send_mbx_msg(hdev
, HCLGE_MBX_SET_VLAN
,
976 HCLGE_MBX_VLAN_RX_OFF_CFG
, &msg_data
,
980 static void hclgevf_reset_tqp(struct hnae3_handle
*handle
, u16 queue_id
)
982 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
986 memcpy(&msg_data
[0], &queue_id
, sizeof(queue_id
));
988 /* disable vf queue before send queue reset msg to PF */
989 ret
= hclgevf_tqp_enable(hdev
, queue_id
, 0, false);
993 hclgevf_send_mbx_msg(hdev
, HCLGE_MBX_QUEUE_RESET
, 0, msg_data
,
997 static int hclgevf_notify_client(struct hclgevf_dev
*hdev
,
998 enum hnae3_reset_notify_type type
)
1000 struct hnae3_client
*client
= hdev
->nic_client
;
1001 struct hnae3_handle
*handle
= &hdev
->nic
;
1003 if (!client
->ops
->reset_notify
)
1006 return client
->ops
->reset_notify(handle
, type
);
1009 static int hclgevf_reset_wait(struct hclgevf_dev
*hdev
)
1011 #define HCLGEVF_RESET_WAIT_MS 500
1012 #define HCLGEVF_RESET_WAIT_CNT 20
1015 /* wait to check the hardware reset completion status */
1016 val
= hclgevf_read_dev(&hdev
->hw
, HCLGEVF_FUN_RST_ING
);
1017 while (hnae3_get_bit(val
, HCLGEVF_FUN_RST_ING_B
) &&
1018 (cnt
< HCLGEVF_RESET_WAIT_CNT
)) {
1019 msleep(HCLGEVF_RESET_WAIT_MS
);
1020 val
= hclgevf_read_dev(&hdev
->hw
, HCLGEVF_FUN_RST_ING
);
1024 /* hardware completion status should be available by this time */
1025 if (cnt
>= HCLGEVF_RESET_WAIT_CNT
) {
1026 dev_warn(&hdev
->pdev
->dev
,
1027 "could'nt get reset done status from h/w, timeout!\n");
1031 /* we will wait a bit more to let reset of the stack to complete. This
1032 * might happen in case reset assertion was made by PF. Yes, this also
1033 * means we might end up waiting bit more even for VF reset.
1040 static int hclgevf_reset_stack(struct hclgevf_dev
*hdev
)
1044 /* uninitialize the nic client */
1045 hclgevf_notify_client(hdev
, HNAE3_UNINIT_CLIENT
);
1047 /* re-initialize the hclge device */
1048 ret
= hclgevf_init_hdev(hdev
);
1050 dev_err(&hdev
->pdev
->dev
,
1051 "hclge device re-init failed, VF is disabled!\n");
1055 /* bring up the nic client again */
1056 hclgevf_notify_client(hdev
, HNAE3_INIT_CLIENT
);
1061 static int hclgevf_reset(struct hclgevf_dev
*hdev
)
1067 /* bring down the nic to stop any ongoing TX/RX */
1068 hclgevf_notify_client(hdev
, HNAE3_DOWN_CLIENT
);
1070 /* check if VF could successfully fetch the hardware reset completion
1071 * status from the hardware
1073 ret
= hclgevf_reset_wait(hdev
);
1075 /* can't do much in this situation, will disable VF */
1076 dev_err(&hdev
->pdev
->dev
,
1077 "VF failed(=%d) to fetch H/W reset completion status\n",
1080 dev_warn(&hdev
->pdev
->dev
, "VF reset failed, disabling VF!\n");
1081 hclgevf_notify_client(hdev
, HNAE3_UNINIT_CLIENT
);
1087 /* now, re-initialize the nic client and ae device*/
1088 ret
= hclgevf_reset_stack(hdev
);
1090 dev_err(&hdev
->pdev
->dev
, "failed to reset VF stack\n");
1092 /* bring up the nic to enable TX/RX again */
1093 hclgevf_notify_client(hdev
, HNAE3_UP_CLIENT
);
1100 static int hclgevf_do_reset(struct hclgevf_dev
*hdev
)
1105 status
= hclgevf_send_mbx_msg(hdev
, HCLGE_MBX_RESET
, 0, NULL
,
1106 0, false, &respmsg
, sizeof(u8
));
1108 dev_err(&hdev
->pdev
->dev
,
1109 "VF reset request to PF failed(=%d)\n", status
);
1114 static void hclgevf_reset_event(struct hnae3_handle
*handle
)
1116 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1118 dev_info(&hdev
->pdev
->dev
, "received reset request from VF enet\n");
1120 handle
->reset_level
= HNAE3_VF_RESET
;
1122 /* reset of this VF requested */
1123 set_bit(HCLGEVF_RESET_REQUESTED
, &hdev
->reset_state
);
1124 hclgevf_reset_task_schedule(hdev
);
1126 handle
->last_reset_time
= jiffies
;
1129 static u32
hclgevf_get_fw_version(struct hnae3_handle
*handle
)
1131 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1133 return hdev
->fw_version
;
1136 static void hclgevf_get_misc_vector(struct hclgevf_dev
*hdev
)
1138 struct hclgevf_misc_vector
*vector
= &hdev
->misc_vector
;
1140 vector
->vector_irq
= pci_irq_vector(hdev
->pdev
,
1141 HCLGEVF_MISC_VECTOR_NUM
);
1142 vector
->addr
= hdev
->hw
.io_base
+ HCLGEVF_MISC_VECTOR_REG_BASE
;
1143 /* vector status always valid for Vector 0 */
1144 hdev
->vector_status
[HCLGEVF_MISC_VECTOR_NUM
] = 0;
1145 hdev
->vector_irq
[HCLGEVF_MISC_VECTOR_NUM
] = vector
->vector_irq
;
1147 hdev
->num_msi_left
-= 1;
1148 hdev
->num_msi_used
+= 1;
1151 void hclgevf_reset_task_schedule(struct hclgevf_dev
*hdev
)
1153 if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED
, &hdev
->state
) &&
1154 !test_bit(HCLGEVF_STATE_RST_HANDLING
, &hdev
->state
)) {
1155 set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED
, &hdev
->state
);
1156 schedule_work(&hdev
->rst_service_task
);
1160 void hclgevf_mbx_task_schedule(struct hclgevf_dev
*hdev
)
1162 if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED
, &hdev
->state
) &&
1163 !test_bit(HCLGEVF_STATE_MBX_HANDLING
, &hdev
->state
)) {
1164 set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED
, &hdev
->state
);
1165 schedule_work(&hdev
->mbx_service_task
);
1169 static void hclgevf_task_schedule(struct hclgevf_dev
*hdev
)
1171 if (!test_bit(HCLGEVF_STATE_DOWN
, &hdev
->state
) &&
1172 !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED
, &hdev
->state
))
1173 schedule_work(&hdev
->service_task
);
1176 static void hclgevf_deferred_task_schedule(struct hclgevf_dev
*hdev
)
1178 /* if we have any pending mailbox event then schedule the mbx task */
1179 if (hdev
->mbx_event_pending
)
1180 hclgevf_mbx_task_schedule(hdev
);
1182 if (test_bit(HCLGEVF_RESET_PENDING
, &hdev
->reset_state
))
1183 hclgevf_reset_task_schedule(hdev
);
1186 static void hclgevf_service_timer(struct timer_list
*t
)
1188 struct hclgevf_dev
*hdev
= from_timer(hdev
, t
, service_timer
);
1190 mod_timer(&hdev
->service_timer
, jiffies
+ 5 * HZ
);
1192 hclgevf_task_schedule(hdev
);
1195 static void hclgevf_reset_service_task(struct work_struct
*work
)
1197 struct hclgevf_dev
*hdev
=
1198 container_of(work
, struct hclgevf_dev
, rst_service_task
);
1201 if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING
, &hdev
->state
))
1204 clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED
, &hdev
->state
);
1206 if (test_and_clear_bit(HCLGEVF_RESET_PENDING
,
1207 &hdev
->reset_state
)) {
1208 /* PF has initmated that it is about to reset the hardware.
1209 * We now have to poll & check if harware has actually completed
1210 * the reset sequence. On hardware reset completion, VF needs to
1211 * reset the client and ae device.
1213 hdev
->reset_attempts
= 0;
1215 ret
= hclgevf_reset(hdev
);
1217 dev_err(&hdev
->pdev
->dev
, "VF stack reset failed.\n");
1218 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED
,
1219 &hdev
->reset_state
)) {
1220 /* we could be here when either of below happens:
1221 * 1. reset was initiated due to watchdog timeout due to
1222 * a. IMP was earlier reset and our TX got choked down and
1223 * which resulted in watchdog reacting and inducing VF
1224 * reset. This also means our cmdq would be unreliable.
1225 * b. problem in TX due to other lower layer(example link
1226 * layer not functioning properly etc.)
1227 * 2. VF reset might have been initiated due to some config
1230 * NOTE: Theres no clear way to detect above cases than to react
1231 * to the response of PF for this reset request. PF will ack the
1232 * 1b and 2. cases but we will not get any intimation about 1a
1233 * from PF as cmdq would be in unreliable state i.e. mailbox
1234 * communication between PF and VF would be broken.
1237 /* if we are never geting into pending state it means either:
1238 * 1. PF is not receiving our request which could be due to IMP
1241 * We cannot do much for 2. but to check first we can try reset
1242 * our PCIe + stack and see if it alleviates the problem.
1244 if (hdev
->reset_attempts
> 3) {
1245 /* prepare for full reset of stack + pcie interface */
1246 hdev
->nic
.reset_level
= HNAE3_VF_FULL_RESET
;
1248 /* "defer" schedule the reset task again */
1249 set_bit(HCLGEVF_RESET_PENDING
, &hdev
->reset_state
);
1251 hdev
->reset_attempts
++;
1253 /* request PF for resetting this VF via mailbox */
1254 ret
= hclgevf_do_reset(hdev
);
1256 dev_warn(&hdev
->pdev
->dev
,
1257 "VF rst fail, stack will call\n");
1261 clear_bit(HCLGEVF_STATE_RST_HANDLING
, &hdev
->state
);
1264 static void hclgevf_mailbox_service_task(struct work_struct
*work
)
1266 struct hclgevf_dev
*hdev
;
1268 hdev
= container_of(work
, struct hclgevf_dev
, mbx_service_task
);
1270 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING
, &hdev
->state
))
1273 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED
, &hdev
->state
);
1275 hclgevf_mbx_async_handler(hdev
);
1277 clear_bit(HCLGEVF_STATE_MBX_HANDLING
, &hdev
->state
);
1280 static void hclgevf_service_task(struct work_struct
*work
)
1282 struct hclgevf_dev
*hdev
;
1284 hdev
= container_of(work
, struct hclgevf_dev
, service_task
);
1286 /* request the link status from the PF. PF would be able to tell VF
1287 * about such updates in future so we might remove this later
1289 hclgevf_request_link_info(hdev
);
1291 hclgevf_deferred_task_schedule(hdev
);
1293 clear_bit(HCLGEVF_STATE_SERVICE_SCHED
, &hdev
->state
);
1296 static void hclgevf_clear_event_cause(struct hclgevf_dev
*hdev
, u32 regclr
)
1298 hclgevf_write_dev(&hdev
->hw
, HCLGEVF_VECTOR0_CMDQ_SRC_REG
, regclr
);
1301 static bool hclgevf_check_event_cause(struct hclgevf_dev
*hdev
, u32
*clearval
)
1305 /* fetch the events from their corresponding regs */
1306 cmdq_src_reg
= hclgevf_read_dev(&hdev
->hw
,
1307 HCLGEVF_VECTOR0_CMDQ_SRC_REG
);
1309 /* check for vector0 mailbox(=CMDQ RX) event source */
1310 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B
) & cmdq_src_reg
) {
1311 cmdq_src_reg
&= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B
);
1312 *clearval
= cmdq_src_reg
;
1316 dev_dbg(&hdev
->pdev
->dev
, "vector 0 interrupt from unknown source\n");
1321 static void hclgevf_enable_vector(struct hclgevf_misc_vector
*vector
, bool en
)
1323 writel(en
? 1 : 0, vector
->addr
);
1326 static irqreturn_t
hclgevf_misc_irq_handle(int irq
, void *data
)
1328 struct hclgevf_dev
*hdev
= data
;
1331 hclgevf_enable_vector(&hdev
->misc_vector
, false);
1332 if (!hclgevf_check_event_cause(hdev
, &clearval
))
1335 hclgevf_mbx_handler(hdev
);
1337 hclgevf_clear_event_cause(hdev
, clearval
);
1340 hclgevf_enable_vector(&hdev
->misc_vector
, true);
1345 static int hclgevf_configure(struct hclgevf_dev
*hdev
)
1349 /* get queue configuration from PF */
1350 ret
= hclge_get_queue_info(hdev
);
1353 /* get tc configuration from PF */
1354 return hclgevf_get_tc_info(hdev
);
1357 static int hclgevf_alloc_hdev(struct hnae3_ae_dev
*ae_dev
)
1359 struct pci_dev
*pdev
= ae_dev
->pdev
;
1360 struct hclgevf_dev
*hdev
= ae_dev
->priv
;
1362 hdev
= devm_kzalloc(&pdev
->dev
, sizeof(*hdev
), GFP_KERNEL
);
1367 hdev
->ae_dev
= ae_dev
;
1368 ae_dev
->priv
= hdev
;
1373 static int hclgevf_init_roce_base_info(struct hclgevf_dev
*hdev
)
1375 struct hnae3_handle
*roce
= &hdev
->roce
;
1376 struct hnae3_handle
*nic
= &hdev
->nic
;
1378 roce
->rinfo
.num_vectors
= HCLGEVF_ROCEE_VECTOR_NUM
;
1380 if (hdev
->num_msi_left
< roce
->rinfo
.num_vectors
||
1381 hdev
->num_msi_left
== 0)
1384 roce
->rinfo
.base_vector
=
1385 hdev
->vector_status
[hdev
->num_msi_used
];
1387 roce
->rinfo
.netdev
= nic
->kinfo
.netdev
;
1388 roce
->rinfo
.roce_io_base
= hdev
->hw
.io_base
;
1390 roce
->pdev
= nic
->pdev
;
1391 roce
->ae_algo
= nic
->ae_algo
;
1392 roce
->numa_node_mask
= nic
->numa_node_mask
;
1397 static int hclgevf_rss_init_hw(struct hclgevf_dev
*hdev
)
1399 struct hclgevf_rss_cfg
*rss_cfg
= &hdev
->rss_cfg
;
1402 rss_cfg
->rss_size
= hdev
->rss_size_max
;
1404 /* Initialize RSS indirect table for each vport */
1405 for (i
= 0; i
< HCLGEVF_RSS_IND_TBL_SIZE
; i
++)
1406 rss_cfg
->rss_indirection_tbl
[i
] = i
% hdev
->rss_size_max
;
1408 ret
= hclgevf_set_rss_indir_table(hdev
);
1412 return hclgevf_set_rss_tc_mode(hdev
, hdev
->rss_size_max
);
1415 static int hclgevf_init_vlan_config(struct hclgevf_dev
*hdev
)
1417 /* other vlan config(like, VLAN TX/RX offload) would also be added
1420 return hclgevf_set_vlan_filter(&hdev
->nic
, htons(ETH_P_8021Q
), 0,
1424 static int hclgevf_ae_start(struct hnae3_handle
*handle
)
1426 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1429 for (i
= 0; i
< handle
->kinfo
.num_tqps
; i
++) {
1431 queue_id
= hclgevf_get_queue_id(handle
->kinfo
.tqp
[i
]);
1433 dev_warn(&hdev
->pdev
->dev
,
1434 "Get invalid queue id, ignore it\n");
1438 hclgevf_tqp_enable(hdev
, queue_id
, 0, true);
1441 /* reset tqp stats */
1442 hclgevf_reset_tqp_stats(handle
);
1444 hclgevf_request_link_info(hdev
);
1446 clear_bit(HCLGEVF_STATE_DOWN
, &hdev
->state
);
1447 mod_timer(&hdev
->service_timer
, jiffies
+ HZ
);
1452 static void hclgevf_ae_stop(struct hnae3_handle
*handle
)
1454 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1457 for (i
= 0; i
< hdev
->num_tqps
; i
++) {
1459 queue_id
= hclgevf_get_queue_id(handle
->kinfo
.tqp
[i
]);
1461 dev_warn(&hdev
->pdev
->dev
,
1462 "Get invalid queue id, ignore it\n");
1466 hclgevf_tqp_enable(hdev
, queue_id
, 0, false);
1469 /* reset tqp stats */
1470 hclgevf_reset_tqp_stats(handle
);
1471 del_timer_sync(&hdev
->service_timer
);
1472 cancel_work_sync(&hdev
->service_task
);
1473 clear_bit(HCLGEVF_STATE_SERVICE_SCHED
, &hdev
->state
);
1474 hclgevf_update_link_status(hdev
, 0);
1477 static void hclgevf_state_init(struct hclgevf_dev
*hdev
)
1479 /* if this is on going reset then skip this initialization */
1480 if (hclgevf_dev_ongoing_reset(hdev
))
1483 /* setup tasks for the MBX */
1484 INIT_WORK(&hdev
->mbx_service_task
, hclgevf_mailbox_service_task
);
1485 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED
, &hdev
->state
);
1486 clear_bit(HCLGEVF_STATE_MBX_HANDLING
, &hdev
->state
);
1488 /* setup tasks for service timer */
1489 timer_setup(&hdev
->service_timer
, hclgevf_service_timer
, 0);
1491 INIT_WORK(&hdev
->service_task
, hclgevf_service_task
);
1492 clear_bit(HCLGEVF_STATE_SERVICE_SCHED
, &hdev
->state
);
1494 INIT_WORK(&hdev
->rst_service_task
, hclgevf_reset_service_task
);
1496 mutex_init(&hdev
->mbx_resp
.mbx_mutex
);
1498 /* bring the device down */
1499 set_bit(HCLGEVF_STATE_DOWN
, &hdev
->state
);
1502 static void hclgevf_state_uninit(struct hclgevf_dev
*hdev
)
1504 set_bit(HCLGEVF_STATE_DOWN
, &hdev
->state
);
1506 if (hdev
->service_timer
.function
)
1507 del_timer_sync(&hdev
->service_timer
);
1508 if (hdev
->service_task
.func
)
1509 cancel_work_sync(&hdev
->service_task
);
1510 if (hdev
->mbx_service_task
.func
)
1511 cancel_work_sync(&hdev
->mbx_service_task
);
1512 if (hdev
->rst_service_task
.func
)
1513 cancel_work_sync(&hdev
->rst_service_task
);
1515 mutex_destroy(&hdev
->mbx_resp
.mbx_mutex
);
1518 static int hclgevf_init_msi(struct hclgevf_dev
*hdev
)
1520 struct pci_dev
*pdev
= hdev
->pdev
;
1524 /* if this is on going reset then skip this initialization */
1525 if (hclgevf_dev_ongoing_reset(hdev
))
1528 hdev
->num_msi
= HCLGEVF_MAX_VF_VECTOR_NUM
;
1530 vectors
= pci_alloc_irq_vectors(pdev
, 1, hdev
->num_msi
,
1531 PCI_IRQ_MSI
| PCI_IRQ_MSIX
);
1534 "failed(%d) to allocate MSI/MSI-X vectors\n",
1538 if (vectors
< hdev
->num_msi
)
1539 dev_warn(&hdev
->pdev
->dev
,
1540 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1541 hdev
->num_msi
, vectors
);
1543 hdev
->num_msi
= vectors
;
1544 hdev
->num_msi_left
= vectors
;
1545 hdev
->base_msi_vector
= pdev
->irq
;
1547 hdev
->vector_status
= devm_kcalloc(&pdev
->dev
, hdev
->num_msi
,
1548 sizeof(u16
), GFP_KERNEL
);
1549 if (!hdev
->vector_status
) {
1550 pci_free_irq_vectors(pdev
);
1554 for (i
= 0; i
< hdev
->num_msi
; i
++)
1555 hdev
->vector_status
[i
] = HCLGEVF_INVALID_VPORT
;
1557 hdev
->vector_irq
= devm_kcalloc(&pdev
->dev
, hdev
->num_msi
,
1558 sizeof(int), GFP_KERNEL
);
1559 if (!hdev
->vector_irq
) {
1560 pci_free_irq_vectors(pdev
);
1567 static void hclgevf_uninit_msi(struct hclgevf_dev
*hdev
)
1569 struct pci_dev
*pdev
= hdev
->pdev
;
1571 pci_free_irq_vectors(pdev
);
1574 static int hclgevf_misc_irq_init(struct hclgevf_dev
*hdev
)
1578 /* if this is on going reset then skip this initialization */
1579 if (hclgevf_dev_ongoing_reset(hdev
))
1582 hclgevf_get_misc_vector(hdev
);
1584 ret
= request_irq(hdev
->misc_vector
.vector_irq
, hclgevf_misc_irq_handle
,
1585 0, "hclgevf_cmd", hdev
);
1587 dev_err(&hdev
->pdev
->dev
, "VF failed to request misc irq(%d)\n",
1588 hdev
->misc_vector
.vector_irq
);
1592 /* enable misc. vector(vector 0) */
1593 hclgevf_enable_vector(&hdev
->misc_vector
, true);
1598 static void hclgevf_misc_irq_uninit(struct hclgevf_dev
*hdev
)
1600 /* disable misc vector(vector 0) */
1601 hclgevf_enable_vector(&hdev
->misc_vector
, false);
1602 free_irq(hdev
->misc_vector
.vector_irq
, hdev
);
1603 hclgevf_free_vector(hdev
, 0);
1606 static int hclgevf_init_client_instance(struct hnae3_client
*client
,
1607 struct hnae3_ae_dev
*ae_dev
)
1609 struct hclgevf_dev
*hdev
= ae_dev
->priv
;
1612 switch (client
->type
) {
1613 case HNAE3_CLIENT_KNIC
:
1614 hdev
->nic_client
= client
;
1615 hdev
->nic
.client
= client
;
1617 ret
= client
->ops
->init_instance(&hdev
->nic
);
1621 if (hdev
->roce_client
&& hnae3_dev_roce_supported(hdev
)) {
1622 struct hnae3_client
*rc
= hdev
->roce_client
;
1624 ret
= hclgevf_init_roce_base_info(hdev
);
1627 ret
= rc
->ops
->init_instance(&hdev
->roce
);
1632 case HNAE3_CLIENT_UNIC
:
1633 hdev
->nic_client
= client
;
1634 hdev
->nic
.client
= client
;
1636 ret
= client
->ops
->init_instance(&hdev
->nic
);
1640 case HNAE3_CLIENT_ROCE
:
1641 if (hnae3_dev_roce_supported(hdev
)) {
1642 hdev
->roce_client
= client
;
1643 hdev
->roce
.client
= client
;
1646 if (hdev
->roce_client
&& hdev
->nic_client
) {
1647 ret
= hclgevf_init_roce_base_info(hdev
);
1651 ret
= client
->ops
->init_instance(&hdev
->roce
);
1660 static void hclgevf_uninit_client_instance(struct hnae3_client
*client
,
1661 struct hnae3_ae_dev
*ae_dev
)
1663 struct hclgevf_dev
*hdev
= ae_dev
->priv
;
1665 /* un-init roce, if it exists */
1666 if (hdev
->roce_client
)
1667 hdev
->roce_client
->ops
->uninit_instance(&hdev
->roce
, 0);
1669 /* un-init nic/unic, if this was not called by roce client */
1670 if ((client
->ops
->uninit_instance
) &&
1671 (client
->type
!= HNAE3_CLIENT_ROCE
))
1672 client
->ops
->uninit_instance(&hdev
->nic
, 0);
1675 static int hclgevf_pci_init(struct hclgevf_dev
*hdev
)
1677 struct pci_dev
*pdev
= hdev
->pdev
;
1678 struct hclgevf_hw
*hw
;
1681 /* check if we need to skip initialization of pci. This will happen if
1682 * device is undergoing VF reset. Otherwise, we would need to
1683 * re-initialize pci interface again i.e. when device is not going
1684 * through *any* reset or actually undergoing full reset.
1686 if (hclgevf_dev_ongoing_reset(hdev
))
1689 ret
= pci_enable_device(pdev
);
1691 dev_err(&pdev
->dev
, "failed to enable PCI device\n");
1695 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
1697 dev_err(&pdev
->dev
, "can't set consistent PCI DMA, exiting");
1698 goto err_disable_device
;
1701 ret
= pci_request_regions(pdev
, HCLGEVF_DRIVER_NAME
);
1703 dev_err(&pdev
->dev
, "PCI request regions failed %d\n", ret
);
1704 goto err_disable_device
;
1707 pci_set_master(pdev
);
1710 hw
->io_base
= pci_iomap(pdev
, 2, 0);
1712 dev_err(&pdev
->dev
, "can't map configuration register space\n");
1714 goto err_clr_master
;
1720 pci_clear_master(pdev
);
1721 pci_release_regions(pdev
);
1723 pci_disable_device(pdev
);
1728 static void hclgevf_pci_uninit(struct hclgevf_dev
*hdev
)
1730 struct pci_dev
*pdev
= hdev
->pdev
;
1732 pci_iounmap(pdev
, hdev
->hw
.io_base
);
1733 pci_clear_master(pdev
);
1734 pci_release_regions(pdev
);
1735 pci_disable_device(pdev
);
1738 static int hclgevf_init_hdev(struct hclgevf_dev
*hdev
)
1740 struct pci_dev
*pdev
= hdev
->pdev
;
1743 /* check if device is on-going full reset(i.e. pcie as well) */
1744 if (hclgevf_dev_ongoing_full_reset(hdev
)) {
1745 dev_warn(&pdev
->dev
, "device is going full reset\n");
1746 hclgevf_uninit_hdev(hdev
);
1749 ret
= hclgevf_pci_init(hdev
);
1751 dev_err(&pdev
->dev
, "PCI initialization failed\n");
1755 ret
= hclgevf_init_msi(hdev
);
1757 dev_err(&pdev
->dev
, "failed(%d) to init MSI/MSI-X\n", ret
);
1761 hclgevf_state_init(hdev
);
1763 ret
= hclgevf_cmd_init(hdev
);
1767 ret
= hclgevf_misc_irq_init(hdev
);
1769 dev_err(&pdev
->dev
, "failed(%d) to init Misc IRQ(vector0)\n",
1771 goto err_misc_irq_init
;
1774 ret
= hclgevf_configure(hdev
);
1776 dev_err(&pdev
->dev
, "failed(%d) to fetch configuration\n", ret
);
1780 ret
= hclgevf_alloc_tqps(hdev
);
1782 dev_err(&pdev
->dev
, "failed(%d) to allocate TQPs\n", ret
);
1786 ret
= hclgevf_set_handle_info(hdev
);
1788 dev_err(&pdev
->dev
, "failed(%d) to set handle info\n", ret
);
1792 /* Initialize mta type for this VF */
1793 ret
= hclgevf_cfg_func_mta_type(hdev
);
1795 dev_err(&hdev
->pdev
->dev
,
1796 "failed(%d) to initialize MTA type\n", ret
);
1800 /* Initialize RSS for this VF */
1801 ret
= hclgevf_rss_init_hw(hdev
);
1803 dev_err(&hdev
->pdev
->dev
,
1804 "failed(%d) to initialize RSS\n", ret
);
1808 ret
= hclgevf_init_vlan_config(hdev
);
1810 dev_err(&hdev
->pdev
->dev
,
1811 "failed(%d) to initialize VLAN config\n", ret
);
1815 pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME
);
1820 hclgevf_misc_irq_uninit(hdev
);
1822 hclgevf_cmd_uninit(hdev
);
1824 hclgevf_state_uninit(hdev
);
1825 hclgevf_uninit_msi(hdev
);
1827 hclgevf_pci_uninit(hdev
);
1831 static void hclgevf_uninit_hdev(struct hclgevf_dev
*hdev
)
1833 hclgevf_state_uninit(hdev
);
1834 hclgevf_misc_irq_uninit(hdev
);
1835 hclgevf_cmd_uninit(hdev
);
1836 hclgevf_uninit_msi(hdev
);
1837 hclgevf_pci_uninit(hdev
);
1840 static int hclgevf_init_ae_dev(struct hnae3_ae_dev
*ae_dev
)
1842 struct pci_dev
*pdev
= ae_dev
->pdev
;
1845 ret
= hclgevf_alloc_hdev(ae_dev
);
1847 dev_err(&pdev
->dev
, "hclge device allocation failed\n");
1851 ret
= hclgevf_init_hdev(ae_dev
->priv
);
1853 dev_err(&pdev
->dev
, "hclge device initialization failed\n");
1858 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev
*ae_dev
)
1860 struct hclgevf_dev
*hdev
= ae_dev
->priv
;
1862 hclgevf_uninit_hdev(hdev
);
1863 ae_dev
->priv
= NULL
;
1866 static u32
hclgevf_get_max_channels(struct hclgevf_dev
*hdev
)
1868 struct hnae3_handle
*nic
= &hdev
->nic
;
1869 struct hnae3_knic_private_info
*kinfo
= &nic
->kinfo
;
1871 return min_t(u32
, hdev
->rss_size_max
* kinfo
->num_tc
, hdev
->num_tqps
);
1875 * hclgevf_get_channels - Get the current channels enabled and max supported.
1876 * @handle: hardware information for network interface
1877 * @ch: ethtool channels structure
1879 * We don't support separate tx and rx queues as channels. The other count
1880 * represents how many queues are being used for control. max_combined counts
1881 * how many queue pairs we can support. They may not be mapped 1 to 1 with
1882 * q_vectors since we support a lot more queue pairs than q_vectors.
1884 static void hclgevf_get_channels(struct hnae3_handle
*handle
,
1885 struct ethtool_channels
*ch
)
1887 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1889 ch
->max_combined
= hclgevf_get_max_channels(hdev
);
1890 ch
->other_count
= 0;
1892 ch
->combined_count
= hdev
->num_tqps
;
1895 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle
*handle
,
1896 u16
*free_tqps
, u16
*max_rss_size
)
1898 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1901 *max_rss_size
= hdev
->rss_size_max
;
1904 static int hclgevf_get_status(struct hnae3_handle
*handle
)
1906 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1908 return hdev
->hw
.mac
.link
;
1911 static void hclgevf_get_ksettings_an_result(struct hnae3_handle
*handle
,
1912 u8
*auto_neg
, u32
*speed
,
1915 struct hclgevf_dev
*hdev
= hclgevf_ae_get_hdev(handle
);
1918 *speed
= hdev
->hw
.mac
.speed
;
1920 *duplex
= hdev
->hw
.mac
.duplex
;
1922 *auto_neg
= AUTONEG_DISABLE
;
1925 void hclgevf_update_speed_duplex(struct hclgevf_dev
*hdev
, u32 speed
,
1928 hdev
->hw
.mac
.speed
= speed
;
1929 hdev
->hw
.mac
.duplex
= duplex
;
1932 static const struct hnae3_ae_ops hclgevf_ops
= {
1933 .init_ae_dev
= hclgevf_init_ae_dev
,
1934 .uninit_ae_dev
= hclgevf_uninit_ae_dev
,
1935 .init_client_instance
= hclgevf_init_client_instance
,
1936 .uninit_client_instance
= hclgevf_uninit_client_instance
,
1937 .start
= hclgevf_ae_start
,
1938 .stop
= hclgevf_ae_stop
,
1939 .map_ring_to_vector
= hclgevf_map_ring_to_vector
,
1940 .unmap_ring_from_vector
= hclgevf_unmap_ring_from_vector
,
1941 .get_vector
= hclgevf_get_vector
,
1942 .put_vector
= hclgevf_put_vector
,
1943 .reset_queue
= hclgevf_reset_tqp
,
1944 .set_promisc_mode
= hclgevf_set_promisc_mode
,
1945 .get_mac_addr
= hclgevf_get_mac_addr
,
1946 .set_mac_addr
= hclgevf_set_mac_addr
,
1947 .add_uc_addr
= hclgevf_add_uc_addr
,
1948 .rm_uc_addr
= hclgevf_rm_uc_addr
,
1949 .add_mc_addr
= hclgevf_add_mc_addr
,
1950 .rm_mc_addr
= hclgevf_rm_mc_addr
,
1951 .update_mta_status
= hclgevf_update_mta_status
,
1952 .get_stats
= hclgevf_get_stats
,
1953 .update_stats
= hclgevf_update_stats
,
1954 .get_strings
= hclgevf_get_strings
,
1955 .get_sset_count
= hclgevf_get_sset_count
,
1956 .get_rss_key_size
= hclgevf_get_rss_key_size
,
1957 .get_rss_indir_size
= hclgevf_get_rss_indir_size
,
1958 .get_rss
= hclgevf_get_rss
,
1959 .set_rss
= hclgevf_set_rss
,
1960 .get_tc_size
= hclgevf_get_tc_size
,
1961 .get_fw_version
= hclgevf_get_fw_version
,
1962 .set_vlan_filter
= hclgevf_set_vlan_filter
,
1963 .enable_hw_strip_rxvtag
= hclgevf_en_hw_strip_rxvtag
,
1964 .reset_event
= hclgevf_reset_event
,
1965 .get_channels
= hclgevf_get_channels
,
1966 .get_tqps_and_rss_info
= hclgevf_get_tqps_and_rss_info
,
1967 .get_status
= hclgevf_get_status
,
1968 .get_ksettings_an_result
= hclgevf_get_ksettings_an_result
,
1971 static struct hnae3_ae_algo ae_algovf
= {
1972 .ops
= &hclgevf_ops
,
1973 .pdev_id_table
= ae_algovf_pci_tbl
,
1976 static int hclgevf_init(void)
1978 pr_info("%s is initializing\n", HCLGEVF_NAME
);
1980 hnae3_register_ae_algo(&ae_algovf
);
1985 static void hclgevf_exit(void)
1987 hnae3_unregister_ae_algo(&ae_algovf
);
1989 module_init(hclgevf_init
);
1990 module_exit(hclgevf_exit
);
1992 MODULE_LICENSE("GPL");
1993 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
1994 MODULE_DESCRIPTION("HCLGEVF Driver");
1995 MODULE_VERSION(HCLGEVF_MOD_VERSION
);