]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
net: hns3: refactor the get/put_vector function
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / hisilicon / hns3 / hns3vf / hclgevf_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/etherdevice.h>
5 #include "hclgevf_cmd.h"
6 #include "hclgevf_main.h"
7 #include "hclge_mbx.h"
8 #include "hnae3.h"
9
10 #define HCLGEVF_NAME "hclgevf"
11
12 static struct hnae3_ae_algo ae_algovf;
13
14 static const struct pci_device_id ae_algovf_pci_tbl[] = {
15 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
16 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
17 /* required last entry */
18 {0, }
19 };
20
21 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
22
23 static inline struct hclgevf_dev *hclgevf_ae_get_hdev(
24 struct hnae3_handle *handle)
25 {
26 return container_of(handle, struct hclgevf_dev, nic);
27 }
28
29 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
30 {
31 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
32 struct hnae3_queue *queue;
33 struct hclgevf_desc desc;
34 struct hclgevf_tqp *tqp;
35 int status;
36 int i;
37
38 for (i = 0; i < hdev->num_tqps; i++) {
39 queue = handle->kinfo.tqp[i];
40 tqp = container_of(queue, struct hclgevf_tqp, q);
41 hclgevf_cmd_setup_basic_desc(&desc,
42 HCLGEVF_OPC_QUERY_RX_STATUS,
43 true);
44
45 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
46 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
47 if (status) {
48 dev_err(&hdev->pdev->dev,
49 "Query tqp stat fail, status = %d,queue = %d\n",
50 status, i);
51 return status;
52 }
53 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
54 le32_to_cpu(desc.data[1]);
55
56 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS,
57 true);
58
59 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
60 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
61 if (status) {
62 dev_err(&hdev->pdev->dev,
63 "Query tqp stat fail, status = %d,queue = %d\n",
64 status, i);
65 return status;
66 }
67 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
68 le32_to_cpu(desc.data[1]);
69 }
70
71 return 0;
72 }
73
74 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
75 {
76 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
77 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
78 struct hclgevf_tqp *tqp;
79 u64 *buff = data;
80 int i;
81
82 for (i = 0; i < hdev->num_tqps; i++) {
83 tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q);
84 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
85 }
86 for (i = 0; i < kinfo->num_tqps; i++) {
87 tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q);
88 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
89 }
90
91 return buff;
92 }
93
94 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset)
95 {
96 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
97
98 return hdev->num_tqps * 2;
99 }
100
101 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
102 {
103 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
104 u8 *buff = data;
105 int i = 0;
106
107 for (i = 0; i < hdev->num_tqps; i++) {
108 struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i],
109 struct hclgevf_tqp, q);
110 snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd",
111 tqp->index);
112 buff += ETH_GSTRING_LEN;
113 }
114
115 for (i = 0; i < hdev->num_tqps; i++) {
116 struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i],
117 struct hclgevf_tqp, q);
118 snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd",
119 tqp->index);
120 buff += ETH_GSTRING_LEN;
121 }
122
123 return buff;
124 }
125
126 static void hclgevf_update_stats(struct hnae3_handle *handle,
127 struct net_device_stats *net_stats)
128 {
129 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
130 int status;
131
132 status = hclgevf_tqps_update_stats(handle);
133 if (status)
134 dev_err(&hdev->pdev->dev,
135 "VF update of TQPS stats fail, status = %d.\n",
136 status);
137 }
138
139 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
140 {
141 if (strset == ETH_SS_TEST)
142 return -EOPNOTSUPP;
143 else if (strset == ETH_SS_STATS)
144 return hclgevf_tqps_get_sset_count(handle, strset);
145
146 return 0;
147 }
148
149 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
150 u8 *data)
151 {
152 u8 *p = (char *)data;
153
154 if (strset == ETH_SS_STATS)
155 p = hclgevf_tqps_get_strings(handle, p);
156 }
157
158 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
159 {
160 hclgevf_tqps_get_stats(handle, data);
161 }
162
163 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
164 {
165 u8 resp_msg;
166 int status;
167
168 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0,
169 true, &resp_msg, sizeof(u8));
170 if (status) {
171 dev_err(&hdev->pdev->dev,
172 "VF request to get TC info from PF failed %d",
173 status);
174 return status;
175 }
176
177 hdev->hw_tc_map = resp_msg;
178
179 return 0;
180 }
181
182 static int hclge_get_queue_info(struct hclgevf_dev *hdev)
183 {
184 #define HCLGEVF_TQPS_RSS_INFO_LEN 8
185 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
186 int status;
187
188 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0,
189 true, resp_msg,
190 HCLGEVF_TQPS_RSS_INFO_LEN);
191 if (status) {
192 dev_err(&hdev->pdev->dev,
193 "VF request to get tqp info from PF failed %d",
194 status);
195 return status;
196 }
197
198 memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16));
199 memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16));
200 memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16));
201 memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16));
202
203 return 0;
204 }
205
206 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
207 {
208 struct hclgevf_tqp *tqp;
209 int i;
210
211 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
212 sizeof(struct hclgevf_tqp), GFP_KERNEL);
213 if (!hdev->htqp)
214 return -ENOMEM;
215
216 tqp = hdev->htqp;
217
218 for (i = 0; i < hdev->num_tqps; i++) {
219 tqp->dev = &hdev->pdev->dev;
220 tqp->index = i;
221
222 tqp->q.ae_algo = &ae_algovf;
223 tqp->q.buf_size = hdev->rx_buf_len;
224 tqp->q.desc_num = hdev->num_desc;
225 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET +
226 i * HCLGEVF_TQP_REG_SIZE;
227
228 tqp++;
229 }
230
231 return 0;
232 }
233
234 static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
235 {
236 struct hnae3_handle *nic = &hdev->nic;
237 struct hnae3_knic_private_info *kinfo;
238 u16 new_tqps = hdev->num_tqps;
239 int i;
240
241 kinfo = &nic->kinfo;
242 kinfo->num_tc = 0;
243 kinfo->num_desc = hdev->num_desc;
244 kinfo->rx_buf_len = hdev->rx_buf_len;
245 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++)
246 if (hdev->hw_tc_map & BIT(i))
247 kinfo->num_tc++;
248
249 kinfo->rss_size
250 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc);
251 new_tqps = kinfo->rss_size * kinfo->num_tc;
252 kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
253
254 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
255 sizeof(struct hnae3_queue *), GFP_KERNEL);
256 if (!kinfo->tqp)
257 return -ENOMEM;
258
259 for (i = 0; i < kinfo->num_tqps; i++) {
260 hdev->htqp[i].q.handle = &hdev->nic;
261 hdev->htqp[i].q.tqp_index = i;
262 kinfo->tqp[i] = &hdev->htqp[i].q;
263 }
264
265 return 0;
266 }
267
268 static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
269 {
270 int status;
271 u8 resp_msg;
272
273 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL,
274 0, false, &resp_msg, sizeof(u8));
275 if (status)
276 dev_err(&hdev->pdev->dev,
277 "VF failed to fetch link status(%d) from PF", status);
278 }
279
280 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
281 {
282 struct hnae3_handle *handle = &hdev->nic;
283 struct hnae3_client *client;
284
285 client = handle->client;
286
287 if (link_state != hdev->hw.mac.link) {
288 client->ops->link_status_change(handle, !!link_state);
289 hdev->hw.mac.link = link_state;
290 }
291 }
292
293 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
294 {
295 struct hnae3_handle *nic = &hdev->nic;
296 int ret;
297
298 nic->ae_algo = &ae_algovf;
299 nic->pdev = hdev->pdev;
300 nic->numa_node_mask = hdev->numa_node_mask;
301 nic->flags |= HNAE3_SUPPORT_VF;
302
303 if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) {
304 dev_err(&hdev->pdev->dev, "unsupported device type %d\n",
305 hdev->ae_dev->dev_type);
306 return -EINVAL;
307 }
308
309 ret = hclgevf_knic_setup(hdev);
310 if (ret)
311 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n",
312 ret);
313 return ret;
314 }
315
316 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id)
317 {
318 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT;
319 hdev->num_msi_left += 1;
320 hdev->num_msi_used -= 1;
321 }
322
323 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
324 struct hnae3_vector_info *vector_info)
325 {
326 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
327 struct hnae3_vector_info *vector = vector_info;
328 int alloc = 0;
329 int i, j;
330
331 vector_num = min(hdev->num_msi_left, vector_num);
332
333 for (j = 0; j < vector_num; j++) {
334 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) {
335 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) {
336 vector->vector = pci_irq_vector(hdev->pdev, i);
337 vector->io_addr = hdev->hw.io_base +
338 HCLGEVF_VECTOR_REG_BASE +
339 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET;
340 hdev->vector_status[i] = 0;
341 hdev->vector_irq[i] = vector->vector;
342
343 vector++;
344 alloc++;
345
346 break;
347 }
348 }
349 }
350 hdev->num_msi_left -= alloc;
351 hdev->num_msi_used += alloc;
352
353 return alloc;
354 }
355
356 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector)
357 {
358 int i;
359
360 for (i = 0; i < hdev->num_msi; i++)
361 if (vector == hdev->vector_irq[i])
362 return i;
363
364 return -EINVAL;
365 }
366
367 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle)
368 {
369 return HCLGEVF_RSS_KEY_SIZE;
370 }
371
372 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle)
373 {
374 return HCLGEVF_RSS_IND_TBL_SIZE;
375 }
376
377 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev)
378 {
379 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl;
380 struct hclgevf_rss_indirection_table_cmd *req;
381 struct hclgevf_desc desc;
382 int status;
383 int i, j;
384
385 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data;
386
387 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) {
388 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE,
389 false);
390 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE;
391 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK;
392 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++)
393 req->rss_result[j] =
394 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j];
395
396 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
397 if (status) {
398 dev_err(&hdev->pdev->dev,
399 "VF failed(=%d) to set RSS indirection table\n",
400 status);
401 return status;
402 }
403 }
404
405 return 0;
406 }
407
408 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
409 {
410 struct hclgevf_rss_tc_mode_cmd *req;
411 u16 tc_offset[HCLGEVF_MAX_TC_NUM];
412 u16 tc_valid[HCLGEVF_MAX_TC_NUM];
413 u16 tc_size[HCLGEVF_MAX_TC_NUM];
414 struct hclgevf_desc desc;
415 u16 roundup_size;
416 int status;
417 int i;
418
419 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data;
420
421 roundup_size = roundup_pow_of_two(rss_size);
422 roundup_size = ilog2(roundup_size);
423
424 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
425 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i));
426 tc_size[i] = roundup_size;
427 tc_offset[i] = rss_size * i;
428 }
429
430 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
431 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
432 hnae_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
433 (tc_valid[i] & 0x1));
434 hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
435 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
436 hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
437 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
438 }
439 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
440 if (status)
441 dev_err(&hdev->pdev->dev,
442 "VF failed(=%d) to set rss tc mode\n", status);
443
444 return status;
445 }
446
447 static int hclgevf_get_rss_hw_cfg(struct hnae3_handle *handle, u8 *hash,
448 u8 *key)
449 {
450 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
451 struct hclgevf_rss_config_cmd *req;
452 int lkup_times = key ? 3 : 1;
453 struct hclgevf_desc desc;
454 int key_offset;
455 int key_size;
456 int status;
457
458 req = (struct hclgevf_rss_config_cmd *)desc.data;
459 lkup_times = (lkup_times == 3) ? 3 : ((hash) ? 1 : 0);
460
461 for (key_offset = 0; key_offset < lkup_times; key_offset++) {
462 hclgevf_cmd_setup_basic_desc(&desc,
463 HCLGEVF_OPC_RSS_GENERIC_CONFIG,
464 true);
465 req->hash_config |= (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET);
466
467 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
468 if (status) {
469 dev_err(&hdev->pdev->dev,
470 "failed to get hardware RSS cfg, status = %d\n",
471 status);
472 return status;
473 }
474
475 if (key_offset == 2)
476 key_size =
477 HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2;
478 else
479 key_size = HCLGEVF_RSS_HASH_KEY_NUM;
480
481 if (key)
482 memcpy(key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM,
483 req->hash_key,
484 key_size);
485 }
486
487 if (hash) {
488 if ((req->hash_config & 0xf) == HCLGEVF_RSS_HASH_ALGO_TOEPLITZ)
489 *hash = ETH_RSS_HASH_TOP;
490 else
491 *hash = ETH_RSS_HASH_UNKNOWN;
492 }
493
494 return 0;
495 }
496
497 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
498 u8 *hfunc)
499 {
500 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
501 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
502 int i;
503
504 if (indir)
505 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
506 indir[i] = rss_cfg->rss_indirection_tbl[i];
507
508 return hclgevf_get_rss_hw_cfg(handle, hfunc, key);
509 }
510
511 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
512 const u8 *key, const u8 hfunc)
513 {
514 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
515 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
516 int i;
517
518 /* update the shadow RSS table with user specified qids */
519 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
520 rss_cfg->rss_indirection_tbl[i] = indir[i];
521
522 /* update the hardware */
523 return hclgevf_set_rss_indir_table(hdev);
524 }
525
526 static int hclgevf_get_tc_size(struct hnae3_handle *handle)
527 {
528 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
529 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
530
531 return rss_cfg->rss_size;
532 }
533
534 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
535 int vector,
536 struct hnae3_ring_chain_node *ring_chain)
537 {
538 #define HCLGEVF_RING_NODE_VARIABLE_NUM 3
539 #define HCLGEVF_RING_MAP_MBX_BASIC_MSG_NUM 3
540 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
541 struct hnae3_ring_chain_node *node;
542 struct hclge_mbx_vf_to_pf_cmd *req;
543 struct hclgevf_desc desc;
544 int i, vector_id;
545 int status;
546 u8 type;
547
548 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
549 vector_id = hclgevf_get_vector_index(hdev, vector);
550 if (vector_id < 0) {
551 dev_err(&handle->pdev->dev,
552 "Get vector index fail. ret =%d\n", vector_id);
553 return vector_id;
554 }
555
556 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
557 type = en ?
558 HCLGE_MBX_MAP_RING_TO_VECTOR : HCLGE_MBX_UNMAP_RING_TO_VECTOR;
559 req->msg[0] = type;
560 req->msg[1] = vector_id; /* vector_id should be id in VF */
561
562 i = 0;
563 for (node = ring_chain; node; node = node->next) {
564 i++;
565 /* msg[2] is cause num */
566 req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i] =
567 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B);
568 req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i + 1] =
569 node->tqp_index;
570 req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i + 2] =
571 hnae_get_field(node->int_gl_idx,
572 HNAE3_RING_GL_IDX_M,
573 HNAE3_RING_GL_IDX_S);
574
575 if (i == (HCLGE_MBX_VF_MSG_DATA_NUM -
576 HCLGEVF_RING_MAP_MBX_BASIC_MSG_NUM) /
577 HCLGEVF_RING_NODE_VARIABLE_NUM) {
578 req->msg[2] = i;
579
580 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
581 if (status) {
582 dev_err(&hdev->pdev->dev,
583 "Map TQP fail, status is %d.\n",
584 status);
585 return status;
586 }
587 i = 0;
588 hclgevf_cmd_setup_basic_desc(&desc,
589 HCLGEVF_OPC_MBX_VF_TO_PF,
590 false);
591 req->msg[0] = type;
592 req->msg[1] = vector_id;
593 }
594 }
595
596 if (i > 0) {
597 req->msg[2] = i;
598
599 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
600 if (status) {
601 dev_err(&hdev->pdev->dev,
602 "Map TQP fail, status is %d.\n", status);
603 return status;
604 }
605 }
606
607 return 0;
608 }
609
610 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector,
611 struct hnae3_ring_chain_node *ring_chain)
612 {
613 return hclgevf_bind_ring_to_vector(handle, true, vector, ring_chain);
614 }
615
616 static int hclgevf_unmap_ring_from_vector(
617 struct hnae3_handle *handle,
618 int vector,
619 struct hnae3_ring_chain_node *ring_chain)
620 {
621 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
622 int ret, vector_id;
623
624 vector_id = hclgevf_get_vector_index(hdev, vector);
625 if (vector_id < 0) {
626 dev_err(&handle->pdev->dev,
627 "Get vector index fail. ret =%d\n", vector_id);
628 return vector_id;
629 }
630
631 ret = hclgevf_bind_ring_to_vector(handle, false, vector, ring_chain);
632 if (ret)
633 dev_err(&handle->pdev->dev,
634 "Unmap ring from vector fail. vector=%d, ret =%d\n",
635 vector_id,
636 ret);
637
638 return ret;
639 }
640
641 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
642 {
643 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
644
645 hclgevf_free_vector(hdev, vector);
646
647 return 0;
648 }
649
650 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, u32 en)
651 {
652 struct hclge_mbx_vf_to_pf_cmd *req;
653 struct hclgevf_desc desc;
654 int status;
655
656 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
657
658 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
659 req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE;
660 req->msg[1] = en;
661
662 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
663 if (status)
664 dev_err(&hdev->pdev->dev,
665 "Set promisc mode fail, status is %d.\n", status);
666
667 return status;
668 }
669
670 static void hclgevf_set_promisc_mode(struct hnae3_handle *handle, u32 en)
671 {
672 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
673
674 hclgevf_cmd_set_promisc_mode(hdev, en);
675 }
676
677 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
678 int stream_id, bool enable)
679 {
680 struct hclgevf_cfg_com_tqp_queue_cmd *req;
681 struct hclgevf_desc desc;
682 int status;
683
684 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data;
685
686 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE,
687 false);
688 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
689 req->stream_id = cpu_to_le16(stream_id);
690 req->enable |= enable << HCLGEVF_TQP_ENABLE_B;
691
692 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
693 if (status)
694 dev_err(&hdev->pdev->dev,
695 "TQP enable fail, status =%d.\n", status);
696
697 return status;
698 }
699
700 static int hclgevf_get_queue_id(struct hnae3_queue *queue)
701 {
702 struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q);
703
704 return tqp->index;
705 }
706
707 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
708 {
709 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
710 struct hnae3_queue *queue;
711 struct hclgevf_tqp *tqp;
712 int i;
713
714 for (i = 0; i < hdev->num_tqps; i++) {
715 queue = handle->kinfo.tqp[i];
716 tqp = container_of(queue, struct hclgevf_tqp, q);
717 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
718 }
719 }
720
721 static int hclgevf_cfg_func_mta_filter(struct hnae3_handle *handle, bool en)
722 {
723 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
724 u8 msg[2] = {0};
725
726 msg[0] = en;
727 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
728 HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE,
729 msg, 1, false, NULL, 0);
730 }
731
732 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
733 {
734 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
735
736 ether_addr_copy(p, hdev->hw.mac.mac_addr);
737 }
738
739 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p)
740 {
741 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
742 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr;
743 u8 *new_mac_addr = (u8 *)p;
744 u8 msg_data[ETH_ALEN * 2];
745 int status;
746
747 ether_addr_copy(msg_data, new_mac_addr);
748 ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr);
749
750 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
751 HCLGE_MBX_MAC_VLAN_UC_MODIFY,
752 msg_data, ETH_ALEN * 2,
753 false, NULL, 0);
754 if (!status)
755 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
756
757 return status;
758 }
759
760 static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
761 const unsigned char *addr)
762 {
763 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
764
765 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
766 HCLGE_MBX_MAC_VLAN_UC_ADD,
767 addr, ETH_ALEN, false, NULL, 0);
768 }
769
770 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle,
771 const unsigned char *addr)
772 {
773 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
774
775 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
776 HCLGE_MBX_MAC_VLAN_UC_REMOVE,
777 addr, ETH_ALEN, false, NULL, 0);
778 }
779
780 static int hclgevf_add_mc_addr(struct hnae3_handle *handle,
781 const unsigned char *addr)
782 {
783 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
784
785 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
786 HCLGE_MBX_MAC_VLAN_MC_ADD,
787 addr, ETH_ALEN, false, NULL, 0);
788 }
789
790 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle,
791 const unsigned char *addr)
792 {
793 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
794
795 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
796 HCLGE_MBX_MAC_VLAN_MC_REMOVE,
797 addr, ETH_ALEN, false, NULL, 0);
798 }
799
800 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
801 __be16 proto, u16 vlan_id,
802 bool is_kill)
803 {
804 #define HCLGEVF_VLAN_MBX_MSG_LEN 5
805 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
806 u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN];
807
808 if (vlan_id > 4095)
809 return -EINVAL;
810
811 if (proto != htons(ETH_P_8021Q))
812 return -EPROTONOSUPPORT;
813
814 msg_data[0] = is_kill;
815 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
816 memcpy(&msg_data[3], &proto, sizeof(proto));
817 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
818 HCLGE_MBX_VLAN_FILTER, msg_data,
819 HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
820 }
821
822 static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
823 {
824 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
825 u8 msg_data[2];
826
827 memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
828
829 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 2, false,
830 NULL, 0);
831 }
832
833 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
834 {
835 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
836
837 return hdev->fw_version;
838 }
839
840 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
841 {
842 struct hclgevf_misc_vector *vector = &hdev->misc_vector;
843
844 vector->vector_irq = pci_irq_vector(hdev->pdev,
845 HCLGEVF_MISC_VECTOR_NUM);
846 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE;
847 /* vector status always valid for Vector 0 */
848 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0;
849 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq;
850
851 hdev->num_msi_left -= 1;
852 hdev->num_msi_used += 1;
853 }
854
855 static void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
856 {
857 if (!test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state))
858 schedule_work(&hdev->mbx_service_task);
859 }
860
861 static void hclgevf_task_schedule(struct hclgevf_dev *hdev)
862 {
863 if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) &&
864 !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state))
865 schedule_work(&hdev->service_task);
866 }
867
868 static void hclgevf_service_timer(struct timer_list *t)
869 {
870 struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer);
871
872 mod_timer(&hdev->service_timer, jiffies + 5 * HZ);
873
874 hclgevf_task_schedule(hdev);
875 }
876
877 static void hclgevf_mailbox_service_task(struct work_struct *work)
878 {
879 struct hclgevf_dev *hdev;
880
881 hdev = container_of(work, struct hclgevf_dev, mbx_service_task);
882
883 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state))
884 return;
885
886 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
887
888 hclgevf_mbx_handler(hdev);
889
890 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
891 }
892
893 static void hclgevf_service_task(struct work_struct *work)
894 {
895 struct hclgevf_dev *hdev;
896
897 hdev = container_of(work, struct hclgevf_dev, service_task);
898
899 /* request the link status from the PF. PF would be able to tell VF
900 * about such updates in future so we might remove this later
901 */
902 hclgevf_request_link_info(hdev);
903
904 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
905 }
906
907 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
908 {
909 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr);
910 }
911
912 static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval)
913 {
914 u32 cmdq_src_reg;
915
916 /* fetch the events from their corresponding regs */
917 cmdq_src_reg = hclgevf_read_dev(&hdev->hw,
918 HCLGEVF_VECTOR0_CMDQ_SRC_REG);
919
920 /* check for vector0 mailbox(=CMDQ RX) event source */
921 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
922 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
923 *clearval = cmdq_src_reg;
924 return true;
925 }
926
927 dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n");
928
929 return false;
930 }
931
932 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
933 {
934 writel(en ? 1 : 0, vector->addr);
935 }
936
937 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
938 {
939 struct hclgevf_dev *hdev = data;
940 u32 clearval;
941
942 hclgevf_enable_vector(&hdev->misc_vector, false);
943 if (!hclgevf_check_event_cause(hdev, &clearval))
944 goto skip_sched;
945
946 /* schedule the VF mailbox service task, if not already scheduled */
947 hclgevf_mbx_task_schedule(hdev);
948
949 hclgevf_clear_event_cause(hdev, clearval);
950
951 skip_sched:
952 hclgevf_enable_vector(&hdev->misc_vector, true);
953
954 return IRQ_HANDLED;
955 }
956
957 static int hclgevf_configure(struct hclgevf_dev *hdev)
958 {
959 int ret;
960
961 /* get queue configuration from PF */
962 ret = hclge_get_queue_info(hdev);
963 if (ret)
964 return ret;
965 /* get tc configuration from PF */
966 return hclgevf_get_tc_info(hdev);
967 }
968
969 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
970 {
971 struct hnae3_handle *roce = &hdev->roce;
972 struct hnae3_handle *nic = &hdev->nic;
973
974 roce->rinfo.num_vectors = HCLGEVF_ROCEE_VECTOR_NUM;
975
976 if (hdev->num_msi_left < roce->rinfo.num_vectors ||
977 hdev->num_msi_left == 0)
978 return -EINVAL;
979
980 roce->rinfo.base_vector =
981 hdev->vector_status[hdev->num_msi_used];
982
983 roce->rinfo.netdev = nic->kinfo.netdev;
984 roce->rinfo.roce_io_base = hdev->hw.io_base;
985
986 roce->pdev = nic->pdev;
987 roce->ae_algo = nic->ae_algo;
988 roce->numa_node_mask = nic->numa_node_mask;
989
990 return 0;
991 }
992
993 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
994 {
995 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
996 int i, ret;
997
998 rss_cfg->rss_size = hdev->rss_size_max;
999
1000 /* Initialize RSS indirect table for each vport */
1001 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
1002 rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max;
1003
1004 ret = hclgevf_set_rss_indir_table(hdev);
1005 if (ret)
1006 return ret;
1007
1008 return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max);
1009 }
1010
1011 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
1012 {
1013 /* other vlan config(like, VLAN TX/RX offload) would also be added
1014 * here later
1015 */
1016 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
1017 false);
1018 }
1019
1020 static int hclgevf_ae_start(struct hnae3_handle *handle)
1021 {
1022 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1023 int i, queue_id;
1024
1025 for (i = 0; i < handle->kinfo.num_tqps; i++) {
1026 /* ring enable */
1027 queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]);
1028 if (queue_id < 0) {
1029 dev_warn(&hdev->pdev->dev,
1030 "Get invalid queue id, ignore it\n");
1031 continue;
1032 }
1033
1034 hclgevf_tqp_enable(hdev, queue_id, 0, true);
1035 }
1036
1037 /* reset tqp stats */
1038 hclgevf_reset_tqp_stats(handle);
1039
1040 hclgevf_request_link_info(hdev);
1041
1042 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1043 mod_timer(&hdev->service_timer, jiffies + HZ);
1044
1045 return 0;
1046 }
1047
1048 static void hclgevf_ae_stop(struct hnae3_handle *handle)
1049 {
1050 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1051 int i, queue_id;
1052
1053 for (i = 0; i < hdev->num_tqps; i++) {
1054 /* Ring disable */
1055 queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]);
1056 if (queue_id < 0) {
1057 dev_warn(&hdev->pdev->dev,
1058 "Get invalid queue id, ignore it\n");
1059 continue;
1060 }
1061
1062 hclgevf_tqp_enable(hdev, queue_id, 0, false);
1063 }
1064
1065 /* reset tqp stats */
1066 hclgevf_reset_tqp_stats(handle);
1067 }
1068
1069 static void hclgevf_state_init(struct hclgevf_dev *hdev)
1070 {
1071 /* setup tasks for the MBX */
1072 INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task);
1073 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
1074 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
1075
1076 /* setup tasks for service timer */
1077 timer_setup(&hdev->service_timer, hclgevf_service_timer, 0);
1078
1079 INIT_WORK(&hdev->service_task, hclgevf_service_task);
1080 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
1081
1082 mutex_init(&hdev->mbx_resp.mbx_mutex);
1083
1084 /* bring the device down */
1085 set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1086 }
1087
1088 static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
1089 {
1090 set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1091
1092 if (hdev->service_timer.function)
1093 del_timer_sync(&hdev->service_timer);
1094 if (hdev->service_task.func)
1095 cancel_work_sync(&hdev->service_task);
1096 if (hdev->mbx_service_task.func)
1097 cancel_work_sync(&hdev->mbx_service_task);
1098
1099 mutex_destroy(&hdev->mbx_resp.mbx_mutex);
1100 }
1101
1102 static int hclgevf_init_msi(struct hclgevf_dev *hdev)
1103 {
1104 struct pci_dev *pdev = hdev->pdev;
1105 int vectors;
1106 int i;
1107
1108 hdev->num_msi = HCLGEVF_MAX_VF_VECTOR_NUM;
1109
1110 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1111 PCI_IRQ_MSI | PCI_IRQ_MSIX);
1112 if (vectors < 0) {
1113 dev_err(&pdev->dev,
1114 "failed(%d) to allocate MSI/MSI-X vectors\n",
1115 vectors);
1116 return vectors;
1117 }
1118 if (vectors < hdev->num_msi)
1119 dev_warn(&hdev->pdev->dev,
1120 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1121 hdev->num_msi, vectors);
1122
1123 hdev->num_msi = vectors;
1124 hdev->num_msi_left = vectors;
1125 hdev->base_msi_vector = pdev->irq;
1126
1127 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1128 sizeof(u16), GFP_KERNEL);
1129 if (!hdev->vector_status) {
1130 pci_free_irq_vectors(pdev);
1131 return -ENOMEM;
1132 }
1133
1134 for (i = 0; i < hdev->num_msi; i++)
1135 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT;
1136
1137 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
1138 sizeof(int), GFP_KERNEL);
1139 if (!hdev->vector_irq) {
1140 pci_free_irq_vectors(pdev);
1141 return -ENOMEM;
1142 }
1143
1144 return 0;
1145 }
1146
1147 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
1148 {
1149 struct pci_dev *pdev = hdev->pdev;
1150
1151 pci_free_irq_vectors(pdev);
1152 }
1153
1154 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
1155 {
1156 int ret = 0;
1157
1158 hclgevf_get_misc_vector(hdev);
1159
1160 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
1161 0, "hclgevf_cmd", hdev);
1162 if (ret) {
1163 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n",
1164 hdev->misc_vector.vector_irq);
1165 return ret;
1166 }
1167
1168 /* enable misc. vector(vector 0) */
1169 hclgevf_enable_vector(&hdev->misc_vector, true);
1170
1171 return ret;
1172 }
1173
1174 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
1175 {
1176 /* disable misc vector(vector 0) */
1177 hclgevf_enable_vector(&hdev->misc_vector, false);
1178 free_irq(hdev->misc_vector.vector_irq, hdev);
1179 hclgevf_free_vector(hdev, 0);
1180 }
1181
1182 static int hclgevf_init_instance(struct hclgevf_dev *hdev,
1183 struct hnae3_client *client)
1184 {
1185 int ret;
1186
1187 switch (client->type) {
1188 case HNAE3_CLIENT_KNIC:
1189 hdev->nic_client = client;
1190 hdev->nic.client = client;
1191
1192 ret = client->ops->init_instance(&hdev->nic);
1193 if (ret)
1194 return ret;
1195
1196 if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
1197 struct hnae3_client *rc = hdev->roce_client;
1198
1199 ret = hclgevf_init_roce_base_info(hdev);
1200 if (ret)
1201 return ret;
1202 ret = rc->ops->init_instance(&hdev->roce);
1203 if (ret)
1204 return ret;
1205 }
1206 break;
1207 case HNAE3_CLIENT_UNIC:
1208 hdev->nic_client = client;
1209 hdev->nic.client = client;
1210
1211 ret = client->ops->init_instance(&hdev->nic);
1212 if (ret)
1213 return ret;
1214 break;
1215 case HNAE3_CLIENT_ROCE:
1216 hdev->roce_client = client;
1217 hdev->roce.client = client;
1218
1219 if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
1220 ret = hclgevf_init_roce_base_info(hdev);
1221 if (ret)
1222 return ret;
1223
1224 ret = client->ops->init_instance(&hdev->roce);
1225 if (ret)
1226 return ret;
1227 }
1228 }
1229
1230 return 0;
1231 }
1232
1233 static void hclgevf_uninit_instance(struct hclgevf_dev *hdev,
1234 struct hnae3_client *client)
1235 {
1236 /* un-init roce, if it exists */
1237 if (hdev->roce_client)
1238 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
1239
1240 /* un-init nic/unic, if this was not called by roce client */
1241 if ((client->ops->uninit_instance) &&
1242 (client->type != HNAE3_CLIENT_ROCE))
1243 client->ops->uninit_instance(&hdev->nic, 0);
1244 }
1245
1246 static int hclgevf_register_client(struct hnae3_client *client,
1247 struct hnae3_ae_dev *ae_dev)
1248 {
1249 struct hclgevf_dev *hdev = ae_dev->priv;
1250
1251 return hclgevf_init_instance(hdev, client);
1252 }
1253
1254 static void hclgevf_unregister_client(struct hnae3_client *client,
1255 struct hnae3_ae_dev *ae_dev)
1256 {
1257 struct hclgevf_dev *hdev = ae_dev->priv;
1258
1259 hclgevf_uninit_instance(hdev, client);
1260 }
1261
1262 static int hclgevf_pci_init(struct hclgevf_dev *hdev)
1263 {
1264 struct pci_dev *pdev = hdev->pdev;
1265 struct hclgevf_hw *hw;
1266 int ret;
1267
1268 ret = pci_enable_device(pdev);
1269 if (ret) {
1270 dev_err(&pdev->dev, "failed to enable PCI device\n");
1271 goto err_no_drvdata;
1272 }
1273
1274 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1275 if (ret) {
1276 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting");
1277 goto err_disable_device;
1278 }
1279
1280 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME);
1281 if (ret) {
1282 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
1283 goto err_disable_device;
1284 }
1285
1286 pci_set_master(pdev);
1287 hw = &hdev->hw;
1288 hw->hdev = hdev;
1289 hw->io_base = pci_iomap(pdev, 2, 0);
1290 if (!hw->io_base) {
1291 dev_err(&pdev->dev, "can't map configuration register space\n");
1292 ret = -ENOMEM;
1293 goto err_clr_master;
1294 }
1295
1296 return 0;
1297
1298 err_clr_master:
1299 pci_clear_master(pdev);
1300 pci_release_regions(pdev);
1301 err_disable_device:
1302 pci_disable_device(pdev);
1303 err_no_drvdata:
1304 pci_set_drvdata(pdev, NULL);
1305 return ret;
1306 }
1307
1308 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
1309 {
1310 struct pci_dev *pdev = hdev->pdev;
1311
1312 pci_iounmap(pdev, hdev->hw.io_base);
1313 pci_clear_master(pdev);
1314 pci_release_regions(pdev);
1315 pci_disable_device(pdev);
1316 pci_set_drvdata(pdev, NULL);
1317 }
1318
1319 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
1320 {
1321 struct pci_dev *pdev = ae_dev->pdev;
1322 struct hclgevf_dev *hdev;
1323 int ret;
1324
1325 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
1326 if (!hdev)
1327 return -ENOMEM;
1328
1329 hdev->pdev = pdev;
1330 hdev->ae_dev = ae_dev;
1331 ae_dev->priv = hdev;
1332
1333 ret = hclgevf_pci_init(hdev);
1334 if (ret) {
1335 dev_err(&pdev->dev, "PCI initialization failed\n");
1336 return ret;
1337 }
1338
1339 ret = hclgevf_init_msi(hdev);
1340 if (ret) {
1341 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
1342 goto err_irq_init;
1343 }
1344
1345 hclgevf_state_init(hdev);
1346
1347 ret = hclgevf_misc_irq_init(hdev);
1348 if (ret) {
1349 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
1350 ret);
1351 goto err_misc_irq_init;
1352 }
1353
1354 ret = hclgevf_cmd_init(hdev);
1355 if (ret)
1356 goto err_cmd_init;
1357
1358 ret = hclgevf_configure(hdev);
1359 if (ret) {
1360 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
1361 goto err_config;
1362 }
1363
1364 ret = hclgevf_alloc_tqps(hdev);
1365 if (ret) {
1366 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret);
1367 goto err_config;
1368 }
1369
1370 ret = hclgevf_set_handle_info(hdev);
1371 if (ret) {
1372 dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret);
1373 goto err_config;
1374 }
1375
1376 /* Initialize VF's MTA */
1377 hdev->accept_mta_mc = true;
1378 ret = hclgevf_cfg_func_mta_filter(&hdev->nic, hdev->accept_mta_mc);
1379 if (ret) {
1380 dev_err(&hdev->pdev->dev,
1381 "failed(%d) to set mta filter mode\n", ret);
1382 goto err_config;
1383 }
1384
1385 /* Initialize RSS for this VF */
1386 ret = hclgevf_rss_init_hw(hdev);
1387 if (ret) {
1388 dev_err(&hdev->pdev->dev,
1389 "failed(%d) to initialize RSS\n", ret);
1390 goto err_config;
1391 }
1392
1393 ret = hclgevf_init_vlan_config(hdev);
1394 if (ret) {
1395 dev_err(&hdev->pdev->dev,
1396 "failed(%d) to initialize VLAN config\n", ret);
1397 goto err_config;
1398 }
1399
1400 pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME);
1401
1402 return 0;
1403
1404 err_config:
1405 hclgevf_cmd_uninit(hdev);
1406 err_cmd_init:
1407 hclgevf_misc_irq_uninit(hdev);
1408 err_misc_irq_init:
1409 hclgevf_state_uninit(hdev);
1410 hclgevf_uninit_msi(hdev);
1411 err_irq_init:
1412 hclgevf_pci_uninit(hdev);
1413 return ret;
1414 }
1415
1416 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
1417 {
1418 struct hclgevf_dev *hdev = ae_dev->priv;
1419
1420 hclgevf_cmd_uninit(hdev);
1421 hclgevf_misc_irq_uninit(hdev);
1422 hclgevf_state_uninit(hdev);
1423 hclgevf_uninit_msi(hdev);
1424 hclgevf_pci_uninit(hdev);
1425 ae_dev->priv = NULL;
1426 }
1427
1428 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
1429 {
1430 struct hnae3_handle *nic = &hdev->nic;
1431 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1432
1433 return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
1434 }
1435
1436 /**
1437 * hclgevf_get_channels - Get the current channels enabled and max supported.
1438 * @handle: hardware information for network interface
1439 * @ch: ethtool channels structure
1440 *
1441 * We don't support separate tx and rx queues as channels. The other count
1442 * represents how many queues are being used for control. max_combined counts
1443 * how many queue pairs we can support. They may not be mapped 1 to 1 with
1444 * q_vectors since we support a lot more queue pairs than q_vectors.
1445 **/
1446 static void hclgevf_get_channels(struct hnae3_handle *handle,
1447 struct ethtool_channels *ch)
1448 {
1449 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1450
1451 ch->max_combined = hclgevf_get_max_channels(hdev);
1452 ch->other_count = 0;
1453 ch->max_other = 0;
1454 ch->combined_count = hdev->num_tqps;
1455 }
1456
1457 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle,
1458 u16 *free_tqps, u16 *max_rss_size)
1459 {
1460 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1461
1462 *free_tqps = 0;
1463 *max_rss_size = hdev->rss_size_max;
1464 }
1465
1466 static const struct hnae3_ae_ops hclgevf_ops = {
1467 .init_ae_dev = hclgevf_init_ae_dev,
1468 .uninit_ae_dev = hclgevf_uninit_ae_dev,
1469 .init_client_instance = hclgevf_register_client,
1470 .uninit_client_instance = hclgevf_unregister_client,
1471 .start = hclgevf_ae_start,
1472 .stop = hclgevf_ae_stop,
1473 .map_ring_to_vector = hclgevf_map_ring_to_vector,
1474 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
1475 .get_vector = hclgevf_get_vector,
1476 .put_vector = hclgevf_put_vector,
1477 .reset_queue = hclgevf_reset_tqp,
1478 .set_promisc_mode = hclgevf_set_promisc_mode,
1479 .get_mac_addr = hclgevf_get_mac_addr,
1480 .set_mac_addr = hclgevf_set_mac_addr,
1481 .add_uc_addr = hclgevf_add_uc_addr,
1482 .rm_uc_addr = hclgevf_rm_uc_addr,
1483 .add_mc_addr = hclgevf_add_mc_addr,
1484 .rm_mc_addr = hclgevf_rm_mc_addr,
1485 .get_stats = hclgevf_get_stats,
1486 .update_stats = hclgevf_update_stats,
1487 .get_strings = hclgevf_get_strings,
1488 .get_sset_count = hclgevf_get_sset_count,
1489 .get_rss_key_size = hclgevf_get_rss_key_size,
1490 .get_rss_indir_size = hclgevf_get_rss_indir_size,
1491 .get_rss = hclgevf_get_rss,
1492 .set_rss = hclgevf_set_rss,
1493 .get_tc_size = hclgevf_get_tc_size,
1494 .get_fw_version = hclgevf_get_fw_version,
1495 .set_vlan_filter = hclgevf_set_vlan_filter,
1496 .get_channels = hclgevf_get_channels,
1497 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info,
1498 };
1499
1500 static struct hnae3_ae_algo ae_algovf = {
1501 .ops = &hclgevf_ops,
1502 .name = HCLGEVF_NAME,
1503 .pdev_id_table = ae_algovf_pci_tbl,
1504 };
1505
1506 static int hclgevf_init(void)
1507 {
1508 pr_info("%s is initializing\n", HCLGEVF_NAME);
1509
1510 return hnae3_register_ae_algo(&ae_algovf);
1511 }
1512
1513 static void hclgevf_exit(void)
1514 {
1515 hnae3_unregister_ae_algo(&ae_algovf);
1516 }
1517 module_init(hclgevf_init);
1518 module_exit(hclgevf_exit);
1519
1520 MODULE_LICENSE("GPL");
1521 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
1522 MODULE_DESCRIPTION("HCLGEVF Driver");
1523 MODULE_VERSION(HCLGEVF_MOD_VERSION);