]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
net: hns3: add existence check when remove old uc mac address
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / hisilicon / hns3 / hns3vf / hclgevf_main.c
CommitLineData
5bc3f5f3
SM
1// SPDX-License-Identifier: GPL-2.0+
2// Copyright (c) 2016-2017 Hisilicon Limited.
3
4#include <linux/etherdevice.h>
5#include "hclgevf_cmd.h"
6#include "hclgevf_main.h"
7#include "hclge_mbx.h"
8#include "hnae3.h"
9
10#define HCLGEVF_NAME "hclgevf"
11
12static struct hnae3_ae_algo ae_algovf;
13
14static const struct pci_device_id ae_algovf_pci_tbl[] = {
15 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
16 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
17 /* required last entry */
18 {0, }
19};
20
28d9cec8
YL
21MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
22
5bc3f5f3
SM
23static inline struct hclgevf_dev *hclgevf_ae_get_hdev(
24 struct hnae3_handle *handle)
25{
26 return container_of(handle, struct hclgevf_dev, nic);
27}
28
29static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
30{
31 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
32 struct hnae3_queue *queue;
33 struct hclgevf_desc desc;
34 struct hclgevf_tqp *tqp;
35 int status;
36 int i;
37
38 for (i = 0; i < hdev->num_tqps; i++) {
39 queue = handle->kinfo.tqp[i];
40 tqp = container_of(queue, struct hclgevf_tqp, q);
41 hclgevf_cmd_setup_basic_desc(&desc,
42 HCLGEVF_OPC_QUERY_RX_STATUS,
43 true);
44
45 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
46 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
47 if (status) {
48 dev_err(&hdev->pdev->dev,
49 "Query tqp stat fail, status = %d,queue = %d\n",
50 status, i);
51 return status;
52 }
53 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
93991b65 54 le32_to_cpu(desc.data[1]);
5bc3f5f3
SM
55
56 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS,
57 true);
58
59 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
60 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
61 if (status) {
62 dev_err(&hdev->pdev->dev,
63 "Query tqp stat fail, status = %d,queue = %d\n",
64 status, i);
65 return status;
66 }
67 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
93991b65 68 le32_to_cpu(desc.data[1]);
5bc3f5f3
SM
69 }
70
71 return 0;
72}
73
74static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
75{
76 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
77 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
78 struct hclgevf_tqp *tqp;
79 u64 *buff = data;
80 int i;
81
82 for (i = 0; i < hdev->num_tqps; i++) {
83 tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q);
84 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
85 }
86 for (i = 0; i < kinfo->num_tqps; i++) {
87 tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q);
88 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
89 }
90
91 return buff;
92}
93
94static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset)
95{
96 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
97
98 return hdev->num_tqps * 2;
99}
100
101static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
102{
103 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
104 u8 *buff = data;
105 int i = 0;
106
107 for (i = 0; i < hdev->num_tqps; i++) {
108 struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i],
109 struct hclgevf_tqp, q);
c36317be 110 snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd",
5bc3f5f3
SM
111 tqp->index);
112 buff += ETH_GSTRING_LEN;
113 }
114
115 for (i = 0; i < hdev->num_tqps; i++) {
116 struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i],
117 struct hclgevf_tqp, q);
c36317be 118 snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd",
5bc3f5f3
SM
119 tqp->index);
120 buff += ETH_GSTRING_LEN;
121 }
122
123 return buff;
124}
125
126static void hclgevf_update_stats(struct hnae3_handle *handle,
127 struct net_device_stats *net_stats)
128{
129 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
130 int status;
131
132 status = hclgevf_tqps_update_stats(handle);
133 if (status)
134 dev_err(&hdev->pdev->dev,
135 "VF update of TQPS stats fail, status = %d.\n",
136 status);
137}
138
139static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
140{
141 if (strset == ETH_SS_TEST)
142 return -EOPNOTSUPP;
143 else if (strset == ETH_SS_STATS)
144 return hclgevf_tqps_get_sset_count(handle, strset);
145
146 return 0;
147}
148
149static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
150 u8 *data)
151{
152 u8 *p = (char *)data;
153
154 if (strset == ETH_SS_STATS)
155 p = hclgevf_tqps_get_strings(handle, p);
156}
157
158static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
159{
160 hclgevf_tqps_get_stats(handle, data);
161}
162
163static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
164{
165 u8 resp_msg;
166 int status;
167
168 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0,
169 true, &resp_msg, sizeof(u8));
170 if (status) {
171 dev_err(&hdev->pdev->dev,
172 "VF request to get TC info from PF failed %d",
173 status);
174 return status;
175 }
176
177 hdev->hw_tc_map = resp_msg;
178
179 return 0;
180}
181
182static int hclge_get_queue_info(struct hclgevf_dev *hdev)
183{
184#define HCLGEVF_TQPS_RSS_INFO_LEN 8
185 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
186 int status;
187
188 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0,
189 true, resp_msg,
190 HCLGEVF_TQPS_RSS_INFO_LEN);
191 if (status) {
192 dev_err(&hdev->pdev->dev,
193 "VF request to get tqp info from PF failed %d",
194 status);
195 return status;
196 }
197
198 memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16));
199 memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16));
200 memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16));
201 memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16));
202
203 return 0;
204}
205
5bc3f5f3
SM
206static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
207{
208 struct hclgevf_tqp *tqp;
209 int i;
210
211 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
212 sizeof(struct hclgevf_tqp), GFP_KERNEL);
213 if (!hdev->htqp)
214 return -ENOMEM;
215
216 tqp = hdev->htqp;
217
218 for (i = 0; i < hdev->num_tqps; i++) {
219 tqp->dev = &hdev->pdev->dev;
220 tqp->index = i;
221
222 tqp->q.ae_algo = &ae_algovf;
223 tqp->q.buf_size = hdev->rx_buf_len;
224 tqp->q.desc_num = hdev->num_desc;
225 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET +
226 i * HCLGEVF_TQP_REG_SIZE;
227
228 tqp++;
229 }
230
231 return 0;
232}
233
234static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
235{
236 struct hnae3_handle *nic = &hdev->nic;
237 struct hnae3_knic_private_info *kinfo;
238 u16 new_tqps = hdev->num_tqps;
239 int i;
240
241 kinfo = &nic->kinfo;
242 kinfo->num_tc = 0;
243 kinfo->num_desc = hdev->num_desc;
244 kinfo->rx_buf_len = hdev->rx_buf_len;
245 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++)
246 if (hdev->hw_tc_map & BIT(i))
247 kinfo->num_tc++;
248
249 kinfo->rss_size
250 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc);
251 new_tqps = kinfo->rss_size * kinfo->num_tc;
252 kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
253
254 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
255 sizeof(struct hnae3_queue *), GFP_KERNEL);
256 if (!kinfo->tqp)
257 return -ENOMEM;
258
259 for (i = 0; i < kinfo->num_tqps; i++) {
260 hdev->htqp[i].q.handle = &hdev->nic;
261 hdev->htqp[i].q.tqp_index = i;
262 kinfo->tqp[i] = &hdev->htqp[i].q;
263 }
264
265 return 0;
266}
267
268static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
269{
270 int status;
271 u8 resp_msg;
272
273 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL,
274 0, false, &resp_msg, sizeof(u8));
275 if (status)
276 dev_err(&hdev->pdev->dev,
277 "VF failed to fetch link status(%d) from PF", status);
278}
279
280void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
281{
282 struct hnae3_handle *handle = &hdev->nic;
283 struct hnae3_client *client;
284
285 client = handle->client;
286
287 if (link_state != hdev->hw.mac.link) {
288 client->ops->link_status_change(handle, !!link_state);
289 hdev->hw.mac.link = link_state;
290 }
291}
292
293static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
294{
295 struct hnae3_handle *nic = &hdev->nic;
296 int ret;
297
298 nic->ae_algo = &ae_algovf;
299 nic->pdev = hdev->pdev;
300 nic->numa_node_mask = hdev->numa_node_mask;
a9c89a3f 301 nic->flags |= HNAE3_SUPPORT_VF;
5bc3f5f3
SM
302
303 if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) {
304 dev_err(&hdev->pdev->dev, "unsupported device type %d\n",
305 hdev->ae_dev->dev_type);
306 return -EINVAL;
307 }
308
309 ret = hclgevf_knic_setup(hdev);
310 if (ret)
311 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n",
312 ret);
313 return ret;
314}
315
316static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id)
317{
318 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT;
319 hdev->num_msi_left += 1;
320 hdev->num_msi_used -= 1;
321}
322
323static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
324 struct hnae3_vector_info *vector_info)
325{
326 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
327 struct hnae3_vector_info *vector = vector_info;
328 int alloc = 0;
329 int i, j;
330
331 vector_num = min(hdev->num_msi_left, vector_num);
332
333 for (j = 0; j < vector_num; j++) {
334 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) {
335 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) {
336 vector->vector = pci_irq_vector(hdev->pdev, i);
337 vector->io_addr = hdev->hw.io_base +
338 HCLGEVF_VECTOR_REG_BASE +
339 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET;
340 hdev->vector_status[i] = 0;
341 hdev->vector_irq[i] = vector->vector;
342
343 vector++;
344 alloc++;
345
346 break;
347 }
348 }
349 }
350 hdev->num_msi_left -= alloc;
351 hdev->num_msi_used += alloc;
352
353 return alloc;
354}
355
356static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector)
357{
358 int i;
359
360 for (i = 0; i < hdev->num_msi; i++)
361 if (vector == hdev->vector_irq[i])
362 return i;
363
364 return -EINVAL;
365}
366
367static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle)
368{
369 return HCLGEVF_RSS_KEY_SIZE;
370}
371
372static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle)
373{
374 return HCLGEVF_RSS_IND_TBL_SIZE;
375}
376
377static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev)
378{
379 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl;
380 struct hclgevf_rss_indirection_table_cmd *req;
381 struct hclgevf_desc desc;
382 int status;
383 int i, j;
384
385 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data;
386
387 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) {
388 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE,
389 false);
390 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE;
391 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK;
392 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++)
393 req->rss_result[j] =
394 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j];
395
396 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
397 if (status) {
398 dev_err(&hdev->pdev->dev,
399 "VF failed(=%d) to set RSS indirection table\n",
400 status);
401 return status;
402 }
403 }
404
405 return 0;
406}
407
408static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
409{
410 struct hclgevf_rss_tc_mode_cmd *req;
411 u16 tc_offset[HCLGEVF_MAX_TC_NUM];
412 u16 tc_valid[HCLGEVF_MAX_TC_NUM];
413 u16 tc_size[HCLGEVF_MAX_TC_NUM];
414 struct hclgevf_desc desc;
415 u16 roundup_size;
416 int status;
417 int i;
418
419 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data;
420
421 roundup_size = roundup_pow_of_two(rss_size);
422 roundup_size = ilog2(roundup_size);
423
424 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
425 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i));
426 tc_size[i] = roundup_size;
427 tc_offset[i] = rss_size * i;
428 }
429
430 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
431 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
432 hnae_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
433 (tc_valid[i] & 0x1));
434 hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
435 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
436 hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
437 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
438 }
439 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
440 if (status)
441 dev_err(&hdev->pdev->dev,
442 "VF failed(=%d) to set rss tc mode\n", status);
443
444 return status;
445}
446
447static int hclgevf_get_rss_hw_cfg(struct hnae3_handle *handle, u8 *hash,
448 u8 *key)
449{
450 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
451 struct hclgevf_rss_config_cmd *req;
452 int lkup_times = key ? 3 : 1;
453 struct hclgevf_desc desc;
454 int key_offset;
455 int key_size;
456 int status;
457
458 req = (struct hclgevf_rss_config_cmd *)desc.data;
459 lkup_times = (lkup_times == 3) ? 3 : ((hash) ? 1 : 0);
460
461 for (key_offset = 0; key_offset < lkup_times; key_offset++) {
462 hclgevf_cmd_setup_basic_desc(&desc,
463 HCLGEVF_OPC_RSS_GENERIC_CONFIG,
464 true);
465 req->hash_config |= (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET);
466
467 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
468 if (status) {
469 dev_err(&hdev->pdev->dev,
470 "failed to get hardware RSS cfg, status = %d\n",
471 status);
472 return status;
473 }
474
475 if (key_offset == 2)
476 key_size =
477 HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2;
478 else
479 key_size = HCLGEVF_RSS_HASH_KEY_NUM;
480
481 if (key)
482 memcpy(key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM,
483 req->hash_key,
484 key_size);
485 }
486
487 if (hash) {
488 if ((req->hash_config & 0xf) == HCLGEVF_RSS_HASH_ALGO_TOEPLITZ)
489 *hash = ETH_RSS_HASH_TOP;
490 else
491 *hash = ETH_RSS_HASH_UNKNOWN;
492 }
493
494 return 0;
495}
496
497static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
498 u8 *hfunc)
499{
500 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
501 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
502 int i;
503
504 if (indir)
505 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
506 indir[i] = rss_cfg->rss_indirection_tbl[i];
507
508 return hclgevf_get_rss_hw_cfg(handle, hfunc, key);
509}
510
511static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
512 const u8 *key, const u8 hfunc)
513{
514 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
515 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
516 int i;
517
518 /* update the shadow RSS table with user specified qids */
519 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
520 rss_cfg->rss_indirection_tbl[i] = indir[i];
521
522 /* update the hardware */
523 return hclgevf_set_rss_indir_table(hdev);
524}
525
526static int hclgevf_get_tc_size(struct hnae3_handle *handle)
527{
528 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
529 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
530
531 return rss_cfg->rss_size;
532}
533
534static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
535 int vector,
536 struct hnae3_ring_chain_node *ring_chain)
537{
538#define HCLGEVF_RING_NODE_VARIABLE_NUM 3
539#define HCLGEVF_RING_MAP_MBX_BASIC_MSG_NUM 3
540 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
541 struct hnae3_ring_chain_node *node;
542 struct hclge_mbx_vf_to_pf_cmd *req;
543 struct hclgevf_desc desc;
544 int i, vector_id;
545 int status;
546 u8 type;
547
548 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
549 vector_id = hclgevf_get_vector_index(hdev, vector);
550 if (vector_id < 0) {
551 dev_err(&handle->pdev->dev,
552 "Get vector index fail. ret =%d\n", vector_id);
553 return vector_id;
554 }
555
556 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
557 type = en ?
558 HCLGE_MBX_MAP_RING_TO_VECTOR : HCLGE_MBX_UNMAP_RING_TO_VECTOR;
559 req->msg[0] = type;
560 req->msg[1] = vector_id; /* vector_id should be id in VF */
561
562 i = 0;
563 for (node = ring_chain; node; node = node->next) {
564 i++;
565 /* msg[2] is cause num */
566 req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i] =
567 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B);
568 req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i + 1] =
569 node->tqp_index;
b385cfd8
FL
570 req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i + 2] =
571 hnae_get_field(node->int_gl_idx,
572 HNAE3_RING_GL_IDX_M,
573 HNAE3_RING_GL_IDX_S);
574
5bc3f5f3
SM
575 if (i == (HCLGE_MBX_VF_MSG_DATA_NUM -
576 HCLGEVF_RING_MAP_MBX_BASIC_MSG_NUM) /
577 HCLGEVF_RING_NODE_VARIABLE_NUM) {
578 req->msg[2] = i;
579
580 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
581 if (status) {
582 dev_err(&hdev->pdev->dev,
583 "Map TQP fail, status is %d.\n",
584 status);
585 return status;
586 }
587 i = 0;
588 hclgevf_cmd_setup_basic_desc(&desc,
589 HCLGEVF_OPC_MBX_VF_TO_PF,
590 false);
591 req->msg[0] = type;
592 req->msg[1] = vector_id;
593 }
594 }
595
596 if (i > 0) {
597 req->msg[2] = i;
598
599 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
600 if (status) {
601 dev_err(&hdev->pdev->dev,
602 "Map TQP fail, status is %d.\n", status);
603 return status;
604 }
605 }
606
607 return 0;
608}
609
610static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector,
611 struct hnae3_ring_chain_node *ring_chain)
612{
613 return hclgevf_bind_ring_to_vector(handle, true, vector, ring_chain);
614}
615
616static int hclgevf_unmap_ring_from_vector(
617 struct hnae3_handle *handle,
618 int vector,
619 struct hnae3_ring_chain_node *ring_chain)
620{
621 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
622 int ret, vector_id;
623
624 vector_id = hclgevf_get_vector_index(hdev, vector);
625 if (vector_id < 0) {
626 dev_err(&handle->pdev->dev,
627 "Get vector index fail. ret =%d\n", vector_id);
628 return vector_id;
629 }
630
631 ret = hclgevf_bind_ring_to_vector(handle, false, vector, ring_chain);
7412200c 632 if (ret)
5bc3f5f3
SM
633 dev_err(&handle->pdev->dev,
634 "Unmap ring from vector fail. vector=%d, ret =%d\n",
635 vector_id,
636 ret);
7412200c
YL
637
638 return ret;
639}
640
641static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
642{
643 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
5bc3f5f3
SM
644
645 hclgevf_free_vector(hdev, vector);
646
647 return 0;
648}
649
650static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, u32 en)
651{
652 struct hclge_mbx_vf_to_pf_cmd *req;
653 struct hclgevf_desc desc;
654 int status;
655
656 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
657
658 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
659 req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE;
660 req->msg[1] = en;
661
662 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
663 if (status)
664 dev_err(&hdev->pdev->dev,
665 "Set promisc mode fail, status is %d.\n", status);
666
667 return status;
668}
669
670static void hclgevf_set_promisc_mode(struct hnae3_handle *handle, u32 en)
671{
672 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
673
674 hclgevf_cmd_set_promisc_mode(hdev, en);
675}
676
677static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
678 int stream_id, bool enable)
679{
680 struct hclgevf_cfg_com_tqp_queue_cmd *req;
681 struct hclgevf_desc desc;
682 int status;
683
684 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data;
685
686 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE,
687 false);
688 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
689 req->stream_id = cpu_to_le16(stream_id);
690 req->enable |= enable << HCLGEVF_TQP_ENABLE_B;
691
692 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
693 if (status)
694 dev_err(&hdev->pdev->dev,
695 "TQP enable fail, status =%d.\n", status);
696
697 return status;
698}
699
700static int hclgevf_get_queue_id(struct hnae3_queue *queue)
701{
702 struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q);
703
704 return tqp->index;
705}
706
707static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
708{
709 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
710 struct hnae3_queue *queue;
711 struct hclgevf_tqp *tqp;
712 int i;
713
714 for (i = 0; i < hdev->num_tqps; i++) {
715 queue = handle->kinfo.tqp[i];
716 tqp = container_of(queue, struct hclgevf_tqp, q);
717 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
718 }
719}
720
721static int hclgevf_cfg_func_mta_filter(struct hnae3_handle *handle, bool en)
722{
723 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
724 u8 msg[2] = {0};
725
726 msg[0] = en;
727 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
728 HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE,
729 msg, 1, false, NULL, 0);
730}
731
732static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
733{
734 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
735
736 ether_addr_copy(p, hdev->hw.mac.mac_addr);
737}
738
3cbf5e2d
FL
739static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
740 bool is_first)
5bc3f5f3
SM
741{
742 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
743 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr;
744 u8 *new_mac_addr = (u8 *)p;
745 u8 msg_data[ETH_ALEN * 2];
3cbf5e2d 746 u16 subcode;
5bc3f5f3
SM
747 int status;
748
749 ether_addr_copy(msg_data, new_mac_addr);
750 ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr);
751
3cbf5e2d
FL
752 subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD :
753 HCLGE_MBX_MAC_VLAN_UC_MODIFY;
754
5bc3f5f3 755 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
3cbf5e2d 756 subcode, msg_data, ETH_ALEN * 2,
5bc3f5f3
SM
757 false, NULL, 0);
758 if (!status)
759 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
760
761 return status;
762}
763
764static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
765 const unsigned char *addr)
766{
767 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
768
769 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
770 HCLGE_MBX_MAC_VLAN_UC_ADD,
771 addr, ETH_ALEN, false, NULL, 0);
772}
773
774static int hclgevf_rm_uc_addr(struct hnae3_handle *handle,
775 const unsigned char *addr)
776{
777 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
778
779 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
780 HCLGE_MBX_MAC_VLAN_UC_REMOVE,
781 addr, ETH_ALEN, false, NULL, 0);
782}
783
784static int hclgevf_add_mc_addr(struct hnae3_handle *handle,
785 const unsigned char *addr)
786{
787 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
788
789 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
790 HCLGE_MBX_MAC_VLAN_MC_ADD,
791 addr, ETH_ALEN, false, NULL, 0);
792}
793
794static int hclgevf_rm_mc_addr(struct hnae3_handle *handle,
795 const unsigned char *addr)
796{
797 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
798
799 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
800 HCLGE_MBX_MAC_VLAN_MC_REMOVE,
801 addr, ETH_ALEN, false, NULL, 0);
802}
803
804static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
805 __be16 proto, u16 vlan_id,
806 bool is_kill)
807{
808#define HCLGEVF_VLAN_MBX_MSG_LEN 5
809 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
810 u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN];
811
812 if (vlan_id > 4095)
813 return -EINVAL;
814
815 if (proto != htons(ETH_P_8021Q))
816 return -EPROTONOSUPPORT;
817
818 msg_data[0] = is_kill;
819 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
820 memcpy(&msg_data[3], &proto, sizeof(proto));
821 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
822 HCLGE_MBX_VLAN_FILTER, msg_data,
823 HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
824}
825
826static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
827{
828 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
829 u8 msg_data[2];
830
831 memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
832
833 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 2, false,
834 NULL, 0);
835}
836
837static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
838{
839 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
840
841 return hdev->fw_version;
842}
843
844static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
845{
846 struct hclgevf_misc_vector *vector = &hdev->misc_vector;
847
848 vector->vector_irq = pci_irq_vector(hdev->pdev,
849 HCLGEVF_MISC_VECTOR_NUM);
850 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE;
851 /* vector status always valid for Vector 0 */
852 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0;
853 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq;
854
855 hdev->num_msi_left -= 1;
856 hdev->num_msi_used += 1;
857}
858
859static void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
860{
861 if (!test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state))
862 schedule_work(&hdev->mbx_service_task);
863}
864
865static void hclgevf_task_schedule(struct hclgevf_dev *hdev)
866{
867 if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) &&
868 !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state))
869 schedule_work(&hdev->service_task);
870}
871
872static void hclgevf_service_timer(struct timer_list *t)
873{
874 struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer);
875
876 mod_timer(&hdev->service_timer, jiffies + 5 * HZ);
877
878 hclgevf_task_schedule(hdev);
879}
880
881static void hclgevf_mailbox_service_task(struct work_struct *work)
882{
883 struct hclgevf_dev *hdev;
884
885 hdev = container_of(work, struct hclgevf_dev, mbx_service_task);
886
887 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state))
888 return;
889
890 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
891
892 hclgevf_mbx_handler(hdev);
893
894 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
895}
896
897static void hclgevf_service_task(struct work_struct *work)
898{
899 struct hclgevf_dev *hdev;
900
901 hdev = container_of(work, struct hclgevf_dev, service_task);
902
903 /* request the link status from the PF. PF would be able to tell VF
904 * about such updates in future so we might remove this later
905 */
906 hclgevf_request_link_info(hdev);
907
908 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
909}
910
911static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
912{
913 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr);
914}
915
916static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval)
917{
918 u32 cmdq_src_reg;
919
920 /* fetch the events from their corresponding regs */
921 cmdq_src_reg = hclgevf_read_dev(&hdev->hw,
922 HCLGEVF_VECTOR0_CMDQ_SRC_REG);
923
924 /* check for vector0 mailbox(=CMDQ RX) event source */
925 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
926 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
927 *clearval = cmdq_src_reg;
928 return true;
929 }
930
931 dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n");
932
933 return false;
934}
935
936static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
937{
938 writel(en ? 1 : 0, vector->addr);
939}
940
941static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
942{
943 struct hclgevf_dev *hdev = data;
944 u32 clearval;
945
946 hclgevf_enable_vector(&hdev->misc_vector, false);
947 if (!hclgevf_check_event_cause(hdev, &clearval))
948 goto skip_sched;
949
950 /* schedule the VF mailbox service task, if not already scheduled */
951 hclgevf_mbx_task_schedule(hdev);
952
953 hclgevf_clear_event_cause(hdev, clearval);
954
955skip_sched:
956 hclgevf_enable_vector(&hdev->misc_vector, true);
957
958 return IRQ_HANDLED;
959}
960
961static int hclgevf_configure(struct hclgevf_dev *hdev)
962{
963 int ret;
964
965 /* get queue configuration from PF */
966 ret = hclge_get_queue_info(hdev);
967 if (ret)
968 return ret;
969 /* get tc configuration from PF */
970 return hclgevf_get_tc_info(hdev);
971}
972
973static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
974{
975 struct hnae3_handle *roce = &hdev->roce;
976 struct hnae3_handle *nic = &hdev->nic;
977
978 roce->rinfo.num_vectors = HCLGEVF_ROCEE_VECTOR_NUM;
979
980 if (hdev->num_msi_left < roce->rinfo.num_vectors ||
981 hdev->num_msi_left == 0)
982 return -EINVAL;
983
984 roce->rinfo.base_vector =
985 hdev->vector_status[hdev->num_msi_used];
986
987 roce->rinfo.netdev = nic->kinfo.netdev;
988 roce->rinfo.roce_io_base = hdev->hw.io_base;
989
990 roce->pdev = nic->pdev;
991 roce->ae_algo = nic->ae_algo;
992 roce->numa_node_mask = nic->numa_node_mask;
993
994 return 0;
995}
996
997static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
998{
999 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
1000 int i, ret;
1001
1002 rss_cfg->rss_size = hdev->rss_size_max;
1003
1004 /* Initialize RSS indirect table for each vport */
1005 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
1006 rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max;
1007
1008 ret = hclgevf_set_rss_indir_table(hdev);
1009 if (ret)
1010 return ret;
1011
1012 return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max);
1013}
1014
1015static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
1016{
1017 /* other vlan config(like, VLAN TX/RX offload) would also be added
1018 * here later
1019 */
1020 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
1021 false);
1022}
1023
1024static int hclgevf_ae_start(struct hnae3_handle *handle)
1025{
1026 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1027 int i, queue_id;
1028
1029 for (i = 0; i < handle->kinfo.num_tqps; i++) {
1030 /* ring enable */
1031 queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]);
1032 if (queue_id < 0) {
1033 dev_warn(&hdev->pdev->dev,
1034 "Get invalid queue id, ignore it\n");
1035 continue;
1036 }
1037
1038 hclgevf_tqp_enable(hdev, queue_id, 0, true);
1039 }
1040
1041 /* reset tqp stats */
1042 hclgevf_reset_tqp_stats(handle);
1043
1044 hclgevf_request_link_info(hdev);
1045
1046 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1047 mod_timer(&hdev->service_timer, jiffies + HZ);
1048
1049 return 0;
1050}
1051
1052static void hclgevf_ae_stop(struct hnae3_handle *handle)
1053{
1054 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1055 int i, queue_id;
1056
1057 for (i = 0; i < hdev->num_tqps; i++) {
1058 /* Ring disable */
1059 queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]);
1060 if (queue_id < 0) {
1061 dev_warn(&hdev->pdev->dev,
1062 "Get invalid queue id, ignore it\n");
1063 continue;
1064 }
1065
1066 hclgevf_tqp_enable(hdev, queue_id, 0, false);
1067 }
1068
1069 /* reset tqp stats */
1070 hclgevf_reset_tqp_stats(handle);
1071}
1072
1073static void hclgevf_state_init(struct hclgevf_dev *hdev)
1074{
1075 /* setup tasks for the MBX */
1076 INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task);
1077 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
1078 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
1079
1080 /* setup tasks for service timer */
1081 timer_setup(&hdev->service_timer, hclgevf_service_timer, 0);
1082
1083 INIT_WORK(&hdev->service_task, hclgevf_service_task);
1084 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
1085
1086 mutex_init(&hdev->mbx_resp.mbx_mutex);
1087
1088 /* bring the device down */
1089 set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1090}
1091
1092static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
1093{
1094 set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1095
1096 if (hdev->service_timer.function)
1097 del_timer_sync(&hdev->service_timer);
1098 if (hdev->service_task.func)
1099 cancel_work_sync(&hdev->service_task);
1100 if (hdev->mbx_service_task.func)
1101 cancel_work_sync(&hdev->mbx_service_task);
1102
1103 mutex_destroy(&hdev->mbx_resp.mbx_mutex);
1104}
1105
1106static int hclgevf_init_msi(struct hclgevf_dev *hdev)
1107{
1108 struct pci_dev *pdev = hdev->pdev;
1109 int vectors;
1110 int i;
1111
1112 hdev->num_msi = HCLGEVF_MAX_VF_VECTOR_NUM;
1113
1114 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1115 PCI_IRQ_MSI | PCI_IRQ_MSIX);
1116 if (vectors < 0) {
1117 dev_err(&pdev->dev,
1118 "failed(%d) to allocate MSI/MSI-X vectors\n",
1119 vectors);
1120 return vectors;
1121 }
1122 if (vectors < hdev->num_msi)
1123 dev_warn(&hdev->pdev->dev,
1124 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1125 hdev->num_msi, vectors);
1126
1127 hdev->num_msi = vectors;
1128 hdev->num_msi_left = vectors;
1129 hdev->base_msi_vector = pdev->irq;
1130
1131 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1132 sizeof(u16), GFP_KERNEL);
1133 if (!hdev->vector_status) {
1134 pci_free_irq_vectors(pdev);
1135 return -ENOMEM;
1136 }
1137
1138 for (i = 0; i < hdev->num_msi; i++)
1139 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT;
1140
1141 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
1142 sizeof(int), GFP_KERNEL);
1143 if (!hdev->vector_irq) {
1144 pci_free_irq_vectors(pdev);
1145 return -ENOMEM;
1146 }
1147
1148 return 0;
1149}
1150
1151static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
1152{
1153 struct pci_dev *pdev = hdev->pdev;
1154
1155 pci_free_irq_vectors(pdev);
1156}
1157
1158static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
1159{
1160 int ret = 0;
1161
1162 hclgevf_get_misc_vector(hdev);
1163
1164 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
1165 0, "hclgevf_cmd", hdev);
1166 if (ret) {
1167 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n",
1168 hdev->misc_vector.vector_irq);
1169 return ret;
1170 }
1171
1172 /* enable misc. vector(vector 0) */
1173 hclgevf_enable_vector(&hdev->misc_vector, true);
1174
1175 return ret;
1176}
1177
1178static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
1179{
1180 /* disable misc vector(vector 0) */
1181 hclgevf_enable_vector(&hdev->misc_vector, false);
1182 free_irq(hdev->misc_vector.vector_irq, hdev);
1183 hclgevf_free_vector(hdev, 0);
1184}
1185
1186static int hclgevf_init_instance(struct hclgevf_dev *hdev,
1187 struct hnae3_client *client)
1188{
1189 int ret;
1190
1191 switch (client->type) {
1192 case HNAE3_CLIENT_KNIC:
1193 hdev->nic_client = client;
1194 hdev->nic.client = client;
1195
1196 ret = client->ops->init_instance(&hdev->nic);
1197 if (ret)
1198 return ret;
1199
1200 if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
1201 struct hnae3_client *rc = hdev->roce_client;
1202
1203 ret = hclgevf_init_roce_base_info(hdev);
1204 if (ret)
1205 return ret;
1206 ret = rc->ops->init_instance(&hdev->roce);
1207 if (ret)
1208 return ret;
1209 }
1210 break;
1211 case HNAE3_CLIENT_UNIC:
1212 hdev->nic_client = client;
1213 hdev->nic.client = client;
1214
1215 ret = client->ops->init_instance(&hdev->nic);
1216 if (ret)
1217 return ret;
1218 break;
1219 case HNAE3_CLIENT_ROCE:
1220 hdev->roce_client = client;
1221 hdev->roce.client = client;
1222
1223 if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
1224 ret = hclgevf_init_roce_base_info(hdev);
1225 if (ret)
1226 return ret;
1227
1228 ret = client->ops->init_instance(&hdev->roce);
1229 if (ret)
1230 return ret;
1231 }
1232 }
1233
1234 return 0;
1235}
1236
1237static void hclgevf_uninit_instance(struct hclgevf_dev *hdev,
1238 struct hnae3_client *client)
1239{
1240 /* un-init roce, if it exists */
1241 if (hdev->roce_client)
1242 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
1243
1244 /* un-init nic/unic, if this was not called by roce client */
1245 if ((client->ops->uninit_instance) &&
1246 (client->type != HNAE3_CLIENT_ROCE))
1247 client->ops->uninit_instance(&hdev->nic, 0);
1248}
1249
1250static int hclgevf_register_client(struct hnae3_client *client,
1251 struct hnae3_ae_dev *ae_dev)
1252{
1253 struct hclgevf_dev *hdev = ae_dev->priv;
1254
1255 return hclgevf_init_instance(hdev, client);
1256}
1257
1258static void hclgevf_unregister_client(struct hnae3_client *client,
1259 struct hnae3_ae_dev *ae_dev)
1260{
1261 struct hclgevf_dev *hdev = ae_dev->priv;
1262
1263 hclgevf_uninit_instance(hdev, client);
1264}
1265
1266static int hclgevf_pci_init(struct hclgevf_dev *hdev)
1267{
1268 struct pci_dev *pdev = hdev->pdev;
1269 struct hclgevf_hw *hw;
1270 int ret;
1271
1272 ret = pci_enable_device(pdev);
1273 if (ret) {
1274 dev_err(&pdev->dev, "failed to enable PCI device\n");
1275 goto err_no_drvdata;
1276 }
1277
1278 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1279 if (ret) {
1280 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting");
1281 goto err_disable_device;
1282 }
1283
1284 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME);
1285 if (ret) {
1286 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
1287 goto err_disable_device;
1288 }
1289
1290 pci_set_master(pdev);
1291 hw = &hdev->hw;
1292 hw->hdev = hdev;
d2f70b04 1293 hw->io_base = pci_iomap(pdev, 2, 0);
5bc3f5f3
SM
1294 if (!hw->io_base) {
1295 dev_err(&pdev->dev, "can't map configuration register space\n");
1296 ret = -ENOMEM;
1297 goto err_clr_master;
1298 }
1299
1300 return 0;
1301
1302err_clr_master:
1303 pci_clear_master(pdev);
1304 pci_release_regions(pdev);
1305err_disable_device:
1306 pci_disable_device(pdev);
1307err_no_drvdata:
1308 pci_set_drvdata(pdev, NULL);
1309 return ret;
1310}
1311
1312static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
1313{
1314 struct pci_dev *pdev = hdev->pdev;
1315
1316 pci_iounmap(pdev, hdev->hw.io_base);
1317 pci_clear_master(pdev);
1318 pci_release_regions(pdev);
1319 pci_disable_device(pdev);
1320 pci_set_drvdata(pdev, NULL);
1321}
1322
1323static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
1324{
1325 struct pci_dev *pdev = ae_dev->pdev;
1326 struct hclgevf_dev *hdev;
1327 int ret;
1328
1329 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
1330 if (!hdev)
1331 return -ENOMEM;
1332
1333 hdev->pdev = pdev;
1334 hdev->ae_dev = ae_dev;
1335 ae_dev->priv = hdev;
1336
1337 ret = hclgevf_pci_init(hdev);
1338 if (ret) {
1339 dev_err(&pdev->dev, "PCI initialization failed\n");
1340 return ret;
1341 }
1342
1343 ret = hclgevf_init_msi(hdev);
1344 if (ret) {
1345 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
1346 goto err_irq_init;
1347 }
1348
1349 hclgevf_state_init(hdev);
1350
1351 ret = hclgevf_misc_irq_init(hdev);
1352 if (ret) {
1353 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
1354 ret);
1355 goto err_misc_irq_init;
1356 }
1357
1358 ret = hclgevf_cmd_init(hdev);
1359 if (ret)
1360 goto err_cmd_init;
1361
1362 ret = hclgevf_configure(hdev);
1363 if (ret) {
1364 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
1365 goto err_config;
1366 }
1367
1368 ret = hclgevf_alloc_tqps(hdev);
1369 if (ret) {
1370 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret);
1371 goto err_config;
1372 }
1373
1374 ret = hclgevf_set_handle_info(hdev);
1375 if (ret) {
1376 dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret);
1377 goto err_config;
1378 }
1379
5bc3f5f3
SM
1380 /* Initialize VF's MTA */
1381 hdev->accept_mta_mc = true;
1382 ret = hclgevf_cfg_func_mta_filter(&hdev->nic, hdev->accept_mta_mc);
1383 if (ret) {
1384 dev_err(&hdev->pdev->dev,
1385 "failed(%d) to set mta filter mode\n", ret);
1386 goto err_config;
1387 }
1388
1389 /* Initialize RSS for this VF */
1390 ret = hclgevf_rss_init_hw(hdev);
1391 if (ret) {
1392 dev_err(&hdev->pdev->dev,
1393 "failed(%d) to initialize RSS\n", ret);
1394 goto err_config;
1395 }
1396
1397 ret = hclgevf_init_vlan_config(hdev);
1398 if (ret) {
1399 dev_err(&hdev->pdev->dev,
1400 "failed(%d) to initialize VLAN config\n", ret);
1401 goto err_config;
1402 }
1403
1404 pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME);
1405
1406 return 0;
1407
1408err_config:
1409 hclgevf_cmd_uninit(hdev);
1410err_cmd_init:
1411 hclgevf_misc_irq_uninit(hdev);
1412err_misc_irq_init:
1413 hclgevf_state_uninit(hdev);
1414 hclgevf_uninit_msi(hdev);
1415err_irq_init:
1416 hclgevf_pci_uninit(hdev);
1417 return ret;
1418}
1419
1420static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
1421{
1422 struct hclgevf_dev *hdev = ae_dev->priv;
1423
1424 hclgevf_cmd_uninit(hdev);
1425 hclgevf_misc_irq_uninit(hdev);
1426 hclgevf_state_uninit(hdev);
1427 hclgevf_uninit_msi(hdev);
1428 hclgevf_pci_uninit(hdev);
1429 ae_dev->priv = NULL;
1430}
1431
d65818a7
PL
1432static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
1433{
1434 struct hnae3_handle *nic = &hdev->nic;
1435 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1436
1437 return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
1438}
1439
1440/**
1441 * hclgevf_get_channels - Get the current channels enabled and max supported.
1442 * @handle: hardware information for network interface
1443 * @ch: ethtool channels structure
1444 *
1445 * We don't support separate tx and rx queues as channels. The other count
1446 * represents how many queues are being used for control. max_combined counts
1447 * how many queue pairs we can support. They may not be mapped 1 to 1 with
1448 * q_vectors since we support a lot more queue pairs than q_vectors.
1449 **/
1450static void hclgevf_get_channels(struct hnae3_handle *handle,
1451 struct ethtool_channels *ch)
1452{
1453 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1454
1455 ch->max_combined = hclgevf_get_max_channels(hdev);
1456 ch->other_count = 0;
1457 ch->max_other = 0;
1458 ch->combined_count = hdev->num_tqps;
1459}
1460
f72ed0d9
PL
1461static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle,
1462 u16 *free_tqps, u16 *max_rss_size)
1463{
1464 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1465
1466 *free_tqps = 0;
1467 *max_rss_size = hdev->rss_size_max;
1468}
1469
5bc3f5f3
SM
1470static const struct hnae3_ae_ops hclgevf_ops = {
1471 .init_ae_dev = hclgevf_init_ae_dev,
1472 .uninit_ae_dev = hclgevf_uninit_ae_dev,
1473 .init_client_instance = hclgevf_register_client,
1474 .uninit_client_instance = hclgevf_unregister_client,
1475 .start = hclgevf_ae_start,
1476 .stop = hclgevf_ae_stop,
1477 .map_ring_to_vector = hclgevf_map_ring_to_vector,
1478 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
1479 .get_vector = hclgevf_get_vector,
7412200c 1480 .put_vector = hclgevf_put_vector,
5bc3f5f3
SM
1481 .reset_queue = hclgevf_reset_tqp,
1482 .set_promisc_mode = hclgevf_set_promisc_mode,
1483 .get_mac_addr = hclgevf_get_mac_addr,
1484 .set_mac_addr = hclgevf_set_mac_addr,
1485 .add_uc_addr = hclgevf_add_uc_addr,
1486 .rm_uc_addr = hclgevf_rm_uc_addr,
1487 .add_mc_addr = hclgevf_add_mc_addr,
1488 .rm_mc_addr = hclgevf_rm_mc_addr,
1489 .get_stats = hclgevf_get_stats,
1490 .update_stats = hclgevf_update_stats,
1491 .get_strings = hclgevf_get_strings,
1492 .get_sset_count = hclgevf_get_sset_count,
1493 .get_rss_key_size = hclgevf_get_rss_key_size,
1494 .get_rss_indir_size = hclgevf_get_rss_indir_size,
1495 .get_rss = hclgevf_get_rss,
1496 .set_rss = hclgevf_set_rss,
1497 .get_tc_size = hclgevf_get_tc_size,
1498 .get_fw_version = hclgevf_get_fw_version,
1499 .set_vlan_filter = hclgevf_set_vlan_filter,
d65818a7 1500 .get_channels = hclgevf_get_channels,
f72ed0d9 1501 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info,
5bc3f5f3
SM
1502};
1503
1504static struct hnae3_ae_algo ae_algovf = {
1505 .ops = &hclgevf_ops,
1506 .name = HCLGEVF_NAME,
1507 .pdev_id_table = ae_algovf_pci_tbl,
1508};
1509
1510static int hclgevf_init(void)
1511{
1512 pr_info("%s is initializing\n", HCLGEVF_NAME);
1513
1514 return hnae3_register_ae_algo(&ae_algovf);
1515}
1516
1517static void hclgevf_exit(void)
1518{
1519 hnae3_unregister_ae_algo(&ae_algovf);
1520}
1521module_init(hclgevf_init);
1522module_exit(hclgevf_exit);
1523
1524MODULE_LICENSE("GPL");
1525MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
1526MODULE_DESCRIPTION("HCLGEVF Driver");
1527MODULE_VERSION(HCLGEVF_MOD_VERSION);