1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Cavium, Inc
10 #include "nicvf_plat.h"
12 #define NICVF_MBOX_PF_RESPONSE_DELAY_US (1000)
14 static const char *mbox_message
[NIC_MBOX_MSG_MAX
] = {
15 [NIC_MBOX_MSG_INVALID
] = "NIC_MBOX_MSG_INVALID",
16 [NIC_MBOX_MSG_READY
] = "NIC_MBOX_MSG_READY",
17 [NIC_MBOX_MSG_ACK
] = "NIC_MBOX_MSG_ACK",
18 [NIC_MBOX_MSG_NACK
] = "NIC_MBOX_MSG_ACK",
19 [NIC_MBOX_MSG_QS_CFG
] = "NIC_MBOX_MSG_QS_CFG",
20 [NIC_MBOX_MSG_RQ_CFG
] = "NIC_MBOX_MSG_RQ_CFG",
21 [NIC_MBOX_MSG_SQ_CFG
] = "NIC_MBOX_MSG_SQ_CFG",
22 [NIC_MBOX_MSG_RQ_DROP_CFG
] = "NIC_MBOX_MSG_RQ_DROP_CFG",
23 [NIC_MBOX_MSG_SET_MAC
] = "NIC_MBOX_MSG_SET_MAC",
24 [NIC_MBOX_MSG_SET_MAX_FRS
] = "NIC_MBOX_MSG_SET_MAX_FRS",
25 [NIC_MBOX_MSG_CPI_CFG
] = "NIC_MBOX_MSG_CPI_CFG",
26 [NIC_MBOX_MSG_RSS_SIZE
] = "NIC_MBOX_MSG_RSS_SIZE",
27 [NIC_MBOX_MSG_RSS_CFG
] = "NIC_MBOX_MSG_RSS_CFG",
28 [NIC_MBOX_MSG_RSS_CFG_CONT
] = "NIC_MBOX_MSG_RSS_CFG_CONT",
29 [NIC_MBOX_MSG_RQ_BP_CFG
] = "NIC_MBOX_MSG_RQ_BP_CFG",
30 [NIC_MBOX_MSG_RQ_SW_SYNC
] = "NIC_MBOX_MSG_RQ_SW_SYNC",
31 [NIC_MBOX_MSG_BGX_LINK_CHANGE
] = "NIC_MBOX_MSG_BGX_LINK_CHANGE",
32 [NIC_MBOX_MSG_ALLOC_SQS
] = "NIC_MBOX_MSG_ALLOC_SQS",
33 [NIC_MBOX_MSG_LOOPBACK
] = "NIC_MBOX_MSG_LOOPBACK",
34 [NIC_MBOX_MSG_RESET_STAT_COUNTER
] = "NIC_MBOX_MSG_RESET_STAT_COUNTER",
35 [NIC_MBOX_MSG_CFG_DONE
] = "NIC_MBOX_MSG_CFG_DONE",
36 [NIC_MBOX_MSG_SHUTDOWN
] = "NIC_MBOX_MSG_SHUTDOWN",
39 static inline const char * __attribute__((unused
))
40 nicvf_mbox_msg_str(int msg
)
42 assert(msg
>= 0 && msg
< NIC_MBOX_MSG_MAX
);
43 /* undefined messages */
44 if (mbox_message
[msg
] == NULL
)
46 return mbox_message
[msg
];
50 nicvf_mbox_send_msg_to_pf_raw(struct nicvf
*nic
, struct nic_mbx
*mbx
)
56 mbx_addr
= NIC_VF_PF_MAILBOX_0_1
;
57 mbx_data
= (uint64_t *)mbx
;
58 for (i
= 0; i
< NIC_PF_VF_MAILBOX_SIZE
; i
++) {
59 nicvf_reg_write(nic
, mbx_addr
, *mbx_data
);
61 mbx_addr
+= sizeof(uint64_t);
63 nicvf_mbox_log("msg sent %s (VF%d)",
64 nicvf_mbox_msg_str(mbx
->msg
.msg
), nic
->vf_id
);
68 nicvf_mbox_send_async_msg_to_pf(struct nicvf
*nic
, struct nic_mbx
*mbx
)
70 nicvf_mbox_send_msg_to_pf_raw(nic
, mbx
);
71 /* Messages without ack are racy!*/
72 nicvf_delay_us(NICVF_MBOX_PF_RESPONSE_DELAY_US
);
76 nicvf_mbox_send_msg_to_pf(struct nicvf
*nic
, struct nic_mbx
*mbx
)
82 for (i
= 0; i
< retry
; i
++) {
83 nic
->pf_acked
= false;
84 nic
->pf_nacked
= false;
87 nicvf_mbox_send_msg_to_pf_raw(nic
, mbx
);
88 /* Give some time to get PF response */
89 nicvf_delay_us(NICVF_MBOX_PF_RESPONSE_DELAY_US
);
90 timeout
= NIC_MBOX_MSG_TIMEOUT
;
92 /* Periodic poll happens from nicvf_interrupt() */
100 nicvf_delay_us(NICVF_MBOX_PF_RESPONSE_DELAY_US
);
103 nicvf_log_error("PF didn't ack to msg 0x%02x %s VF%d (%d/%d)",
104 mbx
->msg
.msg
, nicvf_mbox_msg_str(mbx
->msg
.msg
),
105 nic
->vf_id
, i
, retry
);
112 nicvf_handle_mbx_intr(struct nicvf
*nic
)
115 uint64_t *mbx_data
= (uint64_t *)&mbx
;
116 uint64_t mbx_addr
= NIC_VF_PF_MAILBOX_0_1
;
119 for (i
= 0; i
< NIC_PF_VF_MAILBOX_SIZE
; i
++) {
120 *mbx_data
= nicvf_reg_read(nic
, mbx_addr
);
122 mbx_addr
+= sizeof(uint64_t);
125 /* Overwrite the message so we won't receive it again */
126 nicvf_reg_write(nic
, NIC_VF_PF_MAILBOX_0_1
, 0x0);
128 nicvf_mbox_log("msg received id=0x%hhx %s (VF%d)", mbx
.msg
.msg
,
129 nicvf_mbox_msg_str(mbx
.msg
.msg
), nic
->vf_id
);
131 switch (mbx
.msg
.msg
) {
132 case NIC_MBOX_MSG_READY
:
133 nic
->vf_id
= mbx
.nic_cfg
.vf_id
& 0x7F;
134 nic
->tns_mode
= mbx
.nic_cfg
.tns_mode
& 0x7F;
135 nic
->node
= mbx
.nic_cfg
.node_id
;
136 nic
->sqs_mode
= mbx
.nic_cfg
.sqs_mode
;
137 nic
->loopback_supported
= mbx
.nic_cfg
.loopback_supported
;
138 ether_addr_copy((struct ether_addr
*)mbx
.nic_cfg
.mac_addr
,
139 (struct ether_addr
*)nic
->mac_addr
);
140 nic
->pf_acked
= true;
142 case NIC_MBOX_MSG_ACK
:
143 nic
->pf_acked
= true;
145 case NIC_MBOX_MSG_NACK
:
146 nic
->pf_nacked
= true;
148 case NIC_MBOX_MSG_RSS_SIZE
:
149 nic
->rss_info
.rss_size
= mbx
.rss_size
.ind_tbl_size
;
150 nic
->pf_acked
= true;
152 case NIC_MBOX_MSG_BGX_LINK_CHANGE
:
153 nic
->link_up
= mbx
.link_status
.link_up
;
154 nic
->duplex
= mbx
.link_status
.duplex
;
155 nic
->speed
= mbx
.link_status
.speed
;
156 nic
->pf_acked
= true;
158 case NIC_MBOX_MSG_ALLOC_SQS
:
160 if (mbx
.sqs_alloc
.qs_count
!= nic
->sqs_count
) {
161 nicvf_log_error("Received %" PRIu8
"/%" PRIu8
163 mbx
.sqs_alloc
.qs_count
,
167 for (i
= 0; i
< mbx
.sqs_alloc
.qs_count
; i
++) {
168 if (mbx
.sqs_alloc
.svf
[i
] != nic
->snicvf
[i
]->vf_id
) {
169 nicvf_log_error("Received secondary qset[%zu] "
170 "ID %" PRIu8
" expected %"
171 PRIu8
, i
, mbx
.sqs_alloc
.svf
[i
],
172 nic
->snicvf
[i
]->vf_id
);
176 nic
->pf_acked
= true;
179 nicvf_log_error("Invalid message from PF, msg_id=0x%hhx %s",
180 mbx
.msg
.msg
, nicvf_mbox_msg_str(mbx
.msg
.msg
));
189 * Checks if VF is able to communicate with PF
190 * and also gets the VNIC number this VF is associated to.
193 nicvf_mbox_check_pf_ready(struct nicvf
*nic
)
195 struct nic_mbx mbx
= { .msg
= {.msg
= NIC_MBOX_MSG_READY
} };
197 return nicvf_mbox_send_msg_to_pf(nic
, &mbx
);
201 nicvf_mbox_set_mac_addr(struct nicvf
*nic
,
202 const uint8_t mac
[NICVF_MAC_ADDR_SIZE
])
204 struct nic_mbx mbx
= { .msg
= {0} };
207 mbx
.msg
.msg
= NIC_MBOX_MSG_SET_MAC
;
208 mbx
.mac
.vf_id
= nic
->vf_id
;
209 for (i
= 0; i
< 6; i
++)
210 mbx
.mac
.mac_addr
[i
] = mac
[i
];
212 return nicvf_mbox_send_msg_to_pf(nic
, &mbx
);
216 nicvf_mbox_config_cpi(struct nicvf
*nic
, uint32_t qcnt
)
218 struct nic_mbx mbx
= { .msg
= { 0 } };
220 mbx
.msg
.msg
= NIC_MBOX_MSG_CPI_CFG
;
221 mbx
.cpi_cfg
.vf_id
= nic
->vf_id
;
222 mbx
.cpi_cfg
.cpi_alg
= nic
->cpi_alg
;
223 mbx
.cpi_cfg
.rq_cnt
= qcnt
;
225 return nicvf_mbox_send_msg_to_pf(nic
, &mbx
);
229 nicvf_mbox_get_rss_size(struct nicvf
*nic
)
231 struct nic_mbx mbx
= { .msg
= { 0 } };
233 mbx
.msg
.msg
= NIC_MBOX_MSG_RSS_SIZE
;
234 mbx
.rss_size
.vf_id
= nic
->vf_id
;
236 /* Result will be stored in nic->rss_info.rss_size */
237 return nicvf_mbox_send_msg_to_pf(nic
, &mbx
);
241 nicvf_mbox_config_rss(struct nicvf
*nic
)
243 struct nic_mbx mbx
= { .msg
= { 0 } };
244 struct nicvf_rss_reta_info
*rss
= &nic
->rss_info
;
245 size_t tot_len
= rss
->rss_size
;
250 mbx
.rss_cfg
.vf_id
= nic
->vf_id
;
251 mbx
.rss_cfg
.hash_bits
= rss
->hash_bits
;
252 mbx
.rss_cfg
.tbl_len
= 0;
253 mbx
.rss_cfg
.tbl_offset
= 0;
255 while (cur_idx
< tot_len
) {
256 cur_len
= nicvf_min(tot_len
- cur_idx
,
257 (size_t)RSS_IND_TBL_LEN_PER_MBX_MSG
);
258 mbx
.msg
.msg
= (cur_idx
> 0) ?
259 NIC_MBOX_MSG_RSS_CFG_CONT
: NIC_MBOX_MSG_RSS_CFG
;
260 mbx
.rss_cfg
.tbl_offset
= cur_idx
;
261 mbx
.rss_cfg
.tbl_len
= cur_len
;
262 for (i
= 0; i
< cur_len
; i
++)
263 mbx
.rss_cfg
.ind_tbl
[i
] = rss
->ind_tbl
[cur_idx
++];
265 if (nicvf_mbox_send_msg_to_pf(nic
, &mbx
))
266 return NICVF_ERR_RSS_TBL_UPDATE
;
273 nicvf_mbox_rq_config(struct nicvf
*nic
, uint16_t qidx
,
274 struct pf_rq_cfg
*pf_rq_cfg
)
276 struct nic_mbx mbx
= { .msg
= { 0 } };
278 mbx
.msg
.msg
= NIC_MBOX_MSG_RQ_CFG
;
279 mbx
.rq
.qs_num
= nic
->vf_id
;
280 mbx
.rq
.rq_num
= qidx
;
281 mbx
.rq
.cfg
= pf_rq_cfg
->value
;
282 return nicvf_mbox_send_msg_to_pf(nic
, &mbx
);
286 nicvf_mbox_sq_config(struct nicvf
*nic
, uint16_t qidx
)
288 struct nic_mbx mbx
= { .msg
= { 0 } };
290 mbx
.msg
.msg
= NIC_MBOX_MSG_SQ_CFG
;
291 mbx
.sq
.qs_num
= nic
->vf_id
;
292 mbx
.sq
.sq_num
= qidx
;
293 mbx
.sq
.sqs_mode
= nic
->sqs_mode
;
294 mbx
.sq
.cfg
= (nic
->vf_id
<< 3) | qidx
;
295 return nicvf_mbox_send_msg_to_pf(nic
, &mbx
);
299 nicvf_mbox_qset_config(struct nicvf
*nic
, struct pf_qs_cfg
*qs_cfg
)
301 struct nic_mbx mbx
= { .msg
= { 0 } };
303 #if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
306 /* Send a mailbox msg to PF to config Qset */
307 mbx
.msg
.msg
= NIC_MBOX_MSG_QS_CFG
;
308 mbx
.qs
.num
= nic
->vf_id
;
309 mbx
.qs
.sqs_count
= nic
->sqs_count
;
310 mbx
.qs
.cfg
= qs_cfg
->value
;
311 return nicvf_mbox_send_msg_to_pf(nic
, &mbx
);
315 nicvf_mbox_request_sqs(struct nicvf
*nic
)
317 struct nic_mbx mbx
= { .msg
= { 0 } };
321 assert(nic
->sqs_count
> 0);
322 assert(nic
->sqs_count
<= MAX_SQS_PER_VF
);
324 mbx
.sqs_alloc
.msg
= NIC_MBOX_MSG_ALLOC_SQS
;
325 mbx
.sqs_alloc
.spec
= 1;
326 mbx
.sqs_alloc
.qs_count
= nic
->sqs_count
;
328 /* Set no of Rx/Tx queues in each of the SQsets */
329 for (i
= 0; i
< nic
->sqs_count
; i
++)
330 mbx
.sqs_alloc
.svf
[i
] = nic
->snicvf
[i
]->vf_id
;
332 return nicvf_mbox_send_msg_to_pf(nic
, &mbx
);
336 nicvf_mbox_rq_drop_config(struct nicvf
*nic
, uint16_t qidx
, bool enable
)
338 struct nic_mbx mbx
= { .msg
= { 0 } };
339 struct pf_rq_drop_cfg
*drop_cfg
;
341 /* Enable CQ drop to reserve sufficient CQEs for all tx packets */
342 mbx
.msg
.msg
= NIC_MBOX_MSG_RQ_DROP_CFG
;
343 mbx
.rq
.qs_num
= nic
->vf_id
;
344 mbx
.rq
.rq_num
= qidx
;
345 drop_cfg
= (struct pf_rq_drop_cfg
*)&mbx
.rq
.cfg
;
348 drop_cfg
->cq_red
= 1;
349 drop_cfg
->cq_drop
= 2;
351 return nicvf_mbox_send_msg_to_pf(nic
, &mbx
);
355 nicvf_mbox_update_hw_max_frs(struct nicvf
*nic
, uint16_t mtu
)
357 struct nic_mbx mbx
= { .msg
= { 0 } };
359 mbx
.msg
.msg
= NIC_MBOX_MSG_SET_MAX_FRS
;
360 mbx
.frs
.max_frs
= mtu
;
361 mbx
.frs
.vf_id
= nic
->vf_id
;
362 return nicvf_mbox_send_msg_to_pf(nic
, &mbx
);
366 nicvf_mbox_rq_sync(struct nicvf
*nic
)
368 struct nic_mbx mbx
= { .msg
= { 0 } };
370 /* Make sure all packets in the pipeline are written back into mem */
371 mbx
.msg
.msg
= NIC_MBOX_MSG_RQ_SW_SYNC
;
373 return nicvf_mbox_send_msg_to_pf(nic
, &mbx
);
377 nicvf_mbox_rq_bp_config(struct nicvf
*nic
, uint16_t qidx
, bool enable
)
379 struct nic_mbx mbx
= { .msg
= { 0 } };
381 mbx
.msg
.msg
= NIC_MBOX_MSG_RQ_BP_CFG
;
382 mbx
.rq
.qs_num
= nic
->vf_id
;
383 mbx
.rq
.rq_num
= qidx
;
386 mbx
.rq
.cfg
= (1ULL << 63) | (1ULL << 62) | (nic
->vf_id
<< 0);
387 return nicvf_mbox_send_msg_to_pf(nic
, &mbx
);
391 nicvf_mbox_loopback_config(struct nicvf
*nic
, bool enable
)
393 struct nic_mbx mbx
= { .msg
= { 0 } };
395 mbx
.lbk
.msg
= NIC_MBOX_MSG_LOOPBACK
;
396 mbx
.lbk
.vf_id
= nic
->vf_id
;
397 mbx
.lbk
.enable
= enable
;
398 return nicvf_mbox_send_msg_to_pf(nic
, &mbx
);
402 nicvf_mbox_reset_stat_counters(struct nicvf
*nic
, uint16_t rx_stat_mask
,
403 uint8_t tx_stat_mask
, uint16_t rq_stat_mask
,
404 uint16_t sq_stat_mask
)
406 struct nic_mbx mbx
= { .msg
= { 0 } };
408 mbx
.reset_stat
.msg
= NIC_MBOX_MSG_RESET_STAT_COUNTER
;
409 mbx
.reset_stat
.rx_stat_mask
= rx_stat_mask
;
410 mbx
.reset_stat
.tx_stat_mask
= tx_stat_mask
;
411 mbx
.reset_stat
.rq_stat_mask
= rq_stat_mask
;
412 mbx
.reset_stat
.sq_stat_mask
= sq_stat_mask
;
413 return nicvf_mbox_send_msg_to_pf(nic
, &mbx
);
417 nicvf_mbox_shutdown(struct nicvf
*nic
)
419 struct nic_mbx mbx
= { .msg
= { 0 } };
421 mbx
.msg
.msg
= NIC_MBOX_MSG_SHUTDOWN
;
422 nicvf_mbox_send_msg_to_pf(nic
, &mbx
);
426 nicvf_mbox_cfg_done(struct nicvf
*nic
)
428 struct nic_mbx mbx
= { .msg
= { 0 } };
430 mbx
.msg
.msg
= NIC_MBOX_MSG_CFG_DONE
;
431 nicvf_mbox_send_async_msg_to_pf(nic
, &mbx
);