1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include "hclge_main.h"
8 /* hclge_gen_resp_to_vf: used to generate a synchronous response to VF when PF
9 * receives a mailbox message from VF.
10 * @vport: pointer to struct hclge_vport
11 * @vf_to_pf_req: pointer to hclge_mbx_vf_to_pf_cmd of the original mailbox
13 * @resp_status: indicate to VF whether its request success(0) or failed.
15 static int hclge_gen_resp_to_vf(struct hclge_vport
*vport
,
16 struct hclge_mbx_vf_to_pf_cmd
*vf_to_pf_req
,
18 u8
*resp_data
, u16 resp_data_len
)
20 struct hclge_mbx_pf_to_vf_cmd
*resp_pf_to_vf
;
21 struct hclge_dev
*hdev
= vport
->back
;
22 enum hclge_cmd_status status
;
23 struct hclge_desc desc
;
25 resp_pf_to_vf
= (struct hclge_mbx_pf_to_vf_cmd
*)desc
.data
;
27 if (resp_data_len
> HCLGE_MBX_MAX_RESP_DATA_SIZE
) {
28 dev_err(&hdev
->pdev
->dev
,
29 "PF fail to gen resp to VF len %d exceeds max len %d\n",
31 HCLGE_MBX_MAX_RESP_DATA_SIZE
);
34 hclge_cmd_setup_basic_desc(&desc
, HCLGEVF_OPC_MBX_PF_TO_VF
, false);
36 resp_pf_to_vf
->dest_vfid
= vf_to_pf_req
->mbx_src_vfid
;
37 resp_pf_to_vf
->msg_len
= vf_to_pf_req
->msg_len
;
39 resp_pf_to_vf
->msg
[0] = HCLGE_MBX_PF_VF_RESP
;
40 resp_pf_to_vf
->msg
[1] = vf_to_pf_req
->msg
[0];
41 resp_pf_to_vf
->msg
[2] = vf_to_pf_req
->msg
[1];
42 resp_pf_to_vf
->msg
[3] = (resp_status
== 0) ? 0 : 1;
44 if (resp_data
&& resp_data_len
> 0)
45 memcpy(&resp_pf_to_vf
->msg
[4], resp_data
, resp_data_len
);
47 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
49 dev_err(&hdev
->pdev
->dev
,
50 "PF failed(=%d) to send response to VF\n", status
);
55 static int hclge_send_mbx_msg(struct hclge_vport
*vport
, u8
*msg
, u16 msg_len
,
56 u16 mbx_opcode
, u8 dest_vfid
)
58 struct hclge_mbx_pf_to_vf_cmd
*resp_pf_to_vf
;
59 struct hclge_dev
*hdev
= vport
->back
;
60 enum hclge_cmd_status status
;
61 struct hclge_desc desc
;
63 resp_pf_to_vf
= (struct hclge_mbx_pf_to_vf_cmd
*)desc
.data
;
65 hclge_cmd_setup_basic_desc(&desc
, HCLGEVF_OPC_MBX_PF_TO_VF
, false);
67 resp_pf_to_vf
->dest_vfid
= dest_vfid
;
68 resp_pf_to_vf
->msg_len
= msg_len
;
69 resp_pf_to_vf
->msg
[0] = mbx_opcode
;
71 memcpy(&resp_pf_to_vf
->msg
[1], msg
, msg_len
);
73 status
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
75 dev_err(&hdev
->pdev
->dev
,
76 "PF failed(=%d) to send mailbox message to VF\n",
82 static int hclge_inform_reset_assert_to_vf(struct hclge_vport
*vport
)
87 dest_vfid
= (u8
)vport
->vport_id
;
89 /* send this requested info to VF */
90 return hclge_send_mbx_msg(vport
, msg_data
, sizeof(u8
),
91 HCLGE_MBX_ASSERTING_RESET
, dest_vfid
);
94 static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node
*head
)
96 struct hnae3_ring_chain_node
*chain_tmp
, *chain
;
101 chain_tmp
= chain
->next
;
107 /* hclge_get_ring_chain_from_mbx: get ring type & tqpid from mailbox message
109 * msg[1]: <not relevant to this function>
111 * msg[3]: first ring type (TX|RX)
112 * msg[4]: first tqp id
113 * msg[5] ~ msg[14]: other ring type and tqp id
115 static int hclge_get_ring_chain_from_mbx(
116 struct hclge_mbx_vf_to_pf_cmd
*req
,
117 struct hnae3_ring_chain_node
*ring_chain
,
118 struct hclge_vport
*vport
)
120 struct hnae3_ring_chain_node
*cur_chain
, *new_chain
;
124 ring_num
= req
->msg
[2];
126 if (ring_num
> ((HCLGE_MBX_VF_MSG_DATA_NUM
-
127 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM
) /
128 HCLGE_MBX_RING_NODE_VARIABLE_NUM
))
131 hnae_set_bit(ring_chain
->flag
, HNAE3_RING_TYPE_B
, req
->msg
[3]);
132 ring_chain
->tqp_index
=
133 hclge_get_queue_id(vport
->nic
.kinfo
.tqp
[req
->msg
[4]]);
134 hnae_set_field(ring_chain
->int_gl_idx
, HCLGE_INT_GL_IDX_M
,
138 cur_chain
= ring_chain
;
140 for (i
= 1; i
< ring_num
; i
++) {
141 new_chain
= kzalloc(sizeof(*new_chain
), GFP_KERNEL
);
145 hnae_set_bit(new_chain
->flag
, HNAE3_RING_TYPE_B
,
146 req
->msg
[HCLGE_MBX_RING_NODE_VARIABLE_NUM
* i
+
147 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM
]);
149 new_chain
->tqp_index
=
150 hclge_get_queue_id(vport
->nic
.kinfo
.tqp
151 [req
->msg
[HCLGE_MBX_RING_NODE_VARIABLE_NUM
* i
+
152 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM
+ 1]]);
154 hnae_set_field(new_chain
->int_gl_idx
, HCLGE_INT_GL_IDX_M
,
156 req
->msg
[HCLGE_MBX_RING_NODE_VARIABLE_NUM
* i
+
157 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM
+ 2]);
159 cur_chain
->next
= new_chain
;
160 cur_chain
= new_chain
;
165 hclge_free_vector_ring_chain(ring_chain
);
169 static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport
*vport
, bool en
,
170 struct hclge_mbx_vf_to_pf_cmd
*req
)
172 struct hnae3_ring_chain_node ring_chain
;
173 int vector_id
= req
->msg
[1];
176 memset(&ring_chain
, 0, sizeof(ring_chain
));
177 ret
= hclge_get_ring_chain_from_mbx(req
, &ring_chain
, vport
);
181 ret
= hclge_bind_ring_with_vector(vport
, vector_id
, en
, &ring_chain
);
185 hclge_free_vector_ring_chain(&ring_chain
);
190 static int hclge_set_vf_promisc_mode(struct hclge_vport
*vport
,
191 struct hclge_mbx_vf_to_pf_cmd
*req
)
193 bool en
= req
->msg
[1] ? true : false;
194 struct hclge_promisc_param param
;
196 /* always enable broadcast promisc bit */
197 hclge_promisc_param_init(¶m
, en
, en
, true, vport
->vport_id
);
198 return hclge_cmd_set_promisc_mode(vport
->back
, ¶m
);
201 static int hclge_set_vf_uc_mac_addr(struct hclge_vport
*vport
,
202 struct hclge_mbx_vf_to_pf_cmd
*mbx_req
,
205 const u8
*mac_addr
= (const u8
*)(&mbx_req
->msg
[2]);
206 struct hclge_dev
*hdev
= vport
->back
;
209 if (mbx_req
->msg
[1] == HCLGE_MBX_MAC_VLAN_UC_MODIFY
) {
210 const u8
*old_addr
= (const u8
*)(&mbx_req
->msg
[8]);
212 hclge_rm_uc_addr_common(vport
, old_addr
);
213 status
= hclge_add_uc_addr_common(vport
, mac_addr
);
215 hclge_add_uc_addr_common(vport
, old_addr
);
216 } else if (mbx_req
->msg
[1] == HCLGE_MBX_MAC_VLAN_UC_ADD
) {
217 status
= hclge_add_uc_addr_common(vport
, mac_addr
);
218 } else if (mbx_req
->msg
[1] == HCLGE_MBX_MAC_VLAN_UC_REMOVE
) {
219 status
= hclge_rm_uc_addr_common(vport
, mac_addr
);
221 dev_err(&hdev
->pdev
->dev
,
222 "failed to set unicast mac addr, unknown subcode %d\n",
228 hclge_gen_resp_to_vf(vport
, mbx_req
, status
, NULL
, 0);
233 static int hclge_set_vf_mc_mac_addr(struct hclge_vport
*vport
,
234 struct hclge_mbx_vf_to_pf_cmd
*mbx_req
,
237 const u8
*mac_addr
= (const u8
*)(&mbx_req
->msg
[2]);
238 struct hclge_dev
*hdev
= vport
->back
;
241 if (mbx_req
->msg
[1] == HCLGE_MBX_MAC_VLAN_MC_ADD
) {
242 status
= hclge_add_mc_addr_common(vport
, mac_addr
);
243 } else if (mbx_req
->msg
[1] == HCLGE_MBX_MAC_VLAN_MC_REMOVE
) {
244 status
= hclge_rm_mc_addr_common(vport
, mac_addr
);
245 } else if (mbx_req
->msg
[1] == HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE
) {
246 u8 func_id
= vport
->vport_id
;
247 bool enable
= mbx_req
->msg
[2];
249 status
= hclge_cfg_func_mta_filter(hdev
, func_id
, enable
);
251 dev_err(&hdev
->pdev
->dev
,
252 "failed to set mcast mac addr, unknown subcode %d\n",
258 hclge_gen_resp_to_vf(vport
, mbx_req
, status
, NULL
, 0);
263 static int hclge_set_vf_vlan_cfg(struct hclge_vport
*vport
,
264 struct hclge_mbx_vf_to_pf_cmd
*mbx_req
,
267 struct hclge_dev
*hdev
= vport
->back
;
270 if (mbx_req
->msg
[1] == HCLGE_MBX_VLAN_FILTER
) {
274 is_kill
= !!mbx_req
->msg
[2];
275 memcpy(&vlan
, &mbx_req
->msg
[3], sizeof(vlan
));
276 memcpy(&proto
, &mbx_req
->msg
[5], sizeof(proto
));
277 status
= hclge_set_vf_vlan_common(hdev
, vport
->vport_id
,
283 status
= hclge_gen_resp_to_vf(vport
, mbx_req
, status
, NULL
, 0);
288 static int hclge_get_vf_tcinfo(struct hclge_vport
*vport
,
289 struct hclge_mbx_vf_to_pf_cmd
*mbx_req
,
292 struct hclge_dev
*hdev
= vport
->back
;
295 ret
= hclge_gen_resp_to_vf(vport
, mbx_req
, 0, &hdev
->hw_tc_map
,
301 static int hclge_get_vf_queue_info(struct hclge_vport
*vport
,
302 struct hclge_mbx_vf_to_pf_cmd
*mbx_req
,
305 #define HCLGE_TQPS_RSS_INFO_LEN 8
306 u8 resp_data
[HCLGE_TQPS_RSS_INFO_LEN
];
307 struct hclge_dev
*hdev
= vport
->back
;
309 /* get the queue related info */
310 memcpy(&resp_data
[0], &vport
->alloc_tqps
, sizeof(u16
));
311 memcpy(&resp_data
[2], &vport
->nic
.kinfo
.rss_size
, sizeof(u16
));
312 memcpy(&resp_data
[4], &hdev
->num_desc
, sizeof(u16
));
313 memcpy(&resp_data
[6], &hdev
->rx_buf_len
, sizeof(u16
));
315 return hclge_gen_resp_to_vf(vport
, mbx_req
, 0, resp_data
,
316 HCLGE_TQPS_RSS_INFO_LEN
);
319 static int hclge_get_link_info(struct hclge_vport
*vport
,
320 struct hclge_mbx_vf_to_pf_cmd
*mbx_req
)
322 struct hclge_dev
*hdev
= vport
->back
;
328 /* mac.link can only be 0 or 1 */
329 link_status
= (u16
)hdev
->hw
.mac
.link
;
330 duplex
= hdev
->hw
.mac
.duplex
;
331 memcpy(&msg_data
[0], &link_status
, sizeof(u16
));
332 memcpy(&msg_data
[2], &hdev
->hw
.mac
.speed
, sizeof(u32
));
333 memcpy(&msg_data
[6], &duplex
, sizeof(u16
));
334 dest_vfid
= mbx_req
->mbx_src_vfid
;
336 /* send this requested info to VF */
337 return hclge_send_mbx_msg(vport
, msg_data
, sizeof(msg_data
),
338 HCLGE_MBX_LINK_STAT_CHANGE
, dest_vfid
);
341 static void hclge_mbx_reset_vf_queue(struct hclge_vport
*vport
,
342 struct hclge_mbx_vf_to_pf_cmd
*mbx_req
)
346 memcpy(&queue_id
, &mbx_req
->msg
[2], sizeof(queue_id
));
348 hclge_reset_vf_queue(vport
, queue_id
);
350 /* send response msg to VF after queue reset complete*/
351 hclge_gen_resp_to_vf(vport
, mbx_req
, 0, NULL
, 0);
354 static void hclge_reset_vf(struct hclge_vport
*vport
,
355 struct hclge_mbx_vf_to_pf_cmd
*mbx_req
)
357 struct hclge_dev
*hdev
= vport
->back
;
360 dev_warn(&hdev
->pdev
->dev
, "PF received VF reset request from VF %d!",
361 mbx_req
->mbx_src_vfid
);
363 /* Acknowledge VF that PF is now about to assert the reset for the VF.
364 * On receiving this message VF will get into pending state and will
365 * start polling for the hardware reset completion status.
367 ret
= hclge_inform_reset_assert_to_vf(vport
);
369 dev_err(&hdev
->pdev
->dev
,
370 "PF fail(%d) to inform VF(%d)of reset, reset failed!\n",
371 ret
, vport
->vport_id
);
375 dev_warn(&hdev
->pdev
->dev
, "PF is now resetting VF %d.\n",
376 mbx_req
->mbx_src_vfid
);
377 /* reset this virtual function */
378 hclge_func_reset_cmd(hdev
, mbx_req
->mbx_src_vfid
);
381 void hclge_mbx_handler(struct hclge_dev
*hdev
)
383 struct hclge_cmq_ring
*crq
= &hdev
->hw
.cmq
.crq
;
384 struct hclge_mbx_vf_to_pf_cmd
*req
;
385 struct hclge_vport
*vport
;
386 struct hclge_desc
*desc
;
389 flag
= le16_to_cpu(crq
->desc
[crq
->next_to_use
].flag
);
390 /* handle all the mailbox requests in the queue */
391 while (hnae_get_bit(flag
, HCLGE_CMDQ_RX_OUTVLD_B
)) {
392 desc
= &crq
->desc
[crq
->next_to_use
];
393 req
= (struct hclge_mbx_vf_to_pf_cmd
*)desc
->data
;
395 vport
= &hdev
->vport
[req
->mbx_src_vfid
];
397 switch (req
->msg
[0]) {
398 case HCLGE_MBX_MAP_RING_TO_VECTOR
:
399 ret
= hclge_map_unmap_ring_to_vf_vector(vport
, true,
402 case HCLGE_MBX_UNMAP_RING_TO_VECTOR
:
403 ret
= hclge_map_unmap_ring_to_vf_vector(vport
, false,
406 case HCLGE_MBX_SET_PROMISC_MODE
:
407 ret
= hclge_set_vf_promisc_mode(vport
, req
);
409 dev_err(&hdev
->pdev
->dev
,
410 "PF fail(%d) to set VF promisc mode\n",
413 case HCLGE_MBX_SET_UNICAST
:
414 ret
= hclge_set_vf_uc_mac_addr(vport
, req
, true);
416 dev_err(&hdev
->pdev
->dev
,
417 "PF fail(%d) to set VF UC MAC Addr\n",
420 case HCLGE_MBX_SET_MULTICAST
:
421 ret
= hclge_set_vf_mc_mac_addr(vport
, req
, false);
423 dev_err(&hdev
->pdev
->dev
,
424 "PF fail(%d) to set VF MC MAC Addr\n",
427 case HCLGE_MBX_SET_VLAN
:
428 ret
= hclge_set_vf_vlan_cfg(vport
, req
, false);
430 dev_err(&hdev
->pdev
->dev
,
431 "PF failed(%d) to config VF's VLAN\n",
434 case HCLGE_MBX_GET_QINFO
:
435 ret
= hclge_get_vf_queue_info(vport
, req
, true);
437 dev_err(&hdev
->pdev
->dev
,
438 "PF failed(%d) to get Q info for VF\n",
441 case HCLGE_MBX_GET_TCINFO
:
442 ret
= hclge_get_vf_tcinfo(vport
, req
, true);
444 dev_err(&hdev
->pdev
->dev
,
445 "PF failed(%d) to get TC info for VF\n",
448 case HCLGE_MBX_GET_LINK_STATUS
:
449 ret
= hclge_get_link_info(vport
, req
);
451 dev_err(&hdev
->pdev
->dev
,
452 "PF fail(%d) to get link stat for VF\n",
455 case HCLGE_MBX_QUEUE_RESET
:
456 hclge_mbx_reset_vf_queue(vport
, req
);
458 case HCLGE_MBX_RESET
:
459 hclge_reset_vf(vport
, req
);
462 dev_err(&hdev
->pdev
->dev
,
463 "un-supported mailbox message, code = %d\n",
467 crq
->desc
[crq
->next_to_use
].flag
= 0;
468 hclge_mbx_ring_ptr_move_crq(crq
);
469 flag
= le16_to_cpu(crq
->desc
[crq
->next_to_use
].flag
);
472 /* Write back CMDQ_RQ header pointer, M7 need this pointer */
473 hclge_write_dev(&hdev
->hw
, HCLGE_NIC_CRQ_HEAD_REG
, crq
->next_to_use
);