]> git.proxmox.com Git - ceph.git/blob - ceph/src/dpdk/drivers/net/thunderx/base/nicvf_mbox.c
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / dpdk / drivers / net / thunderx / base / nicvf_mbox.c
1 /*
2 * BSD LICENSE
3 *
4 * Copyright (C) Cavium networks Ltd. 2016.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Cavium networks nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <assert.h>
34 #include <unistd.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37
38 #include "nicvf_plat.h"
39
40 #define NICVF_MBOX_PF_RESPONSE_DELAY_US (1000)
41
42 static const char *mbox_message[NIC_MBOX_MSG_MAX] = {
43 [NIC_MBOX_MSG_INVALID] = "NIC_MBOX_MSG_INVALID",
44 [NIC_MBOX_MSG_READY] = "NIC_MBOX_MSG_READY",
45 [NIC_MBOX_MSG_ACK] = "NIC_MBOX_MSG_ACK",
46 [NIC_MBOX_MSG_NACK] = "NIC_MBOX_MSG_ACK",
47 [NIC_MBOX_MSG_QS_CFG] = "NIC_MBOX_MSG_QS_CFG",
48 [NIC_MBOX_MSG_RQ_CFG] = "NIC_MBOX_MSG_RQ_CFG",
49 [NIC_MBOX_MSG_SQ_CFG] = "NIC_MBOX_MSG_SQ_CFG",
50 [NIC_MBOX_MSG_RQ_DROP_CFG] = "NIC_MBOX_MSG_RQ_DROP_CFG",
51 [NIC_MBOX_MSG_SET_MAC] = "NIC_MBOX_MSG_SET_MAC",
52 [NIC_MBOX_MSG_SET_MAX_FRS] = "NIC_MBOX_MSG_SET_MAX_FRS",
53 [NIC_MBOX_MSG_CPI_CFG] = "NIC_MBOX_MSG_CPI_CFG",
54 [NIC_MBOX_MSG_RSS_SIZE] = "NIC_MBOX_MSG_RSS_SIZE",
55 [NIC_MBOX_MSG_RSS_CFG] = "NIC_MBOX_MSG_RSS_CFG",
56 [NIC_MBOX_MSG_RSS_CFG_CONT] = "NIC_MBOX_MSG_RSS_CFG_CONT",
57 [NIC_MBOX_MSG_RQ_BP_CFG] = "NIC_MBOX_MSG_RQ_BP_CFG",
58 [NIC_MBOX_MSG_RQ_SW_SYNC] = "NIC_MBOX_MSG_RQ_SW_SYNC",
59 [NIC_MBOX_MSG_BGX_LINK_CHANGE] = "NIC_MBOX_MSG_BGX_LINK_CHANGE",
60 [NIC_MBOX_MSG_ALLOC_SQS] = "NIC_MBOX_MSG_ALLOC_SQS",
61 [NIC_MBOX_MSG_LOOPBACK] = "NIC_MBOX_MSG_LOOPBACK",
62 [NIC_MBOX_MSG_RESET_STAT_COUNTER] = "NIC_MBOX_MSG_RESET_STAT_COUNTER",
63 [NIC_MBOX_MSG_CFG_DONE] = "NIC_MBOX_MSG_CFG_DONE",
64 [NIC_MBOX_MSG_SHUTDOWN] = "NIC_MBOX_MSG_SHUTDOWN",
65 [NIC_MBOX_MSG_RES_BIT] = "NIC_MBOX_MSG_RES_BIT",
66 [NIC_MBOX_MSG_RSS_SIZE_RES_BIT] = "NIC_MBOX_MSG_RSS_SIZE",
67 [NIC_MBOX_MSG_ALLOC_SQS_RES_BIT] = "NIC_MBOX_MSG_ALLOC_SQS",
68 };
69
70 static inline const char * __attribute__((unused))
71 nicvf_mbox_msg_str(int msg)
72 {
73 assert(msg >= 0 && msg < NIC_MBOX_MSG_MAX);
74 /* undefined messages */
75 if (mbox_message[msg] == NULL)
76 msg = 0;
77 return mbox_message[msg];
78 }
79
80 static inline void
81 nicvf_mbox_send_msg_to_pf_raw(struct nicvf *nic, struct nic_mbx *mbx)
82 {
83 uint64_t *mbx_data;
84 uint64_t mbx_addr;
85 int i;
86
87 mbx_addr = NIC_VF_PF_MAILBOX_0_1;
88 mbx_data = (uint64_t *)mbx;
89 for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
90 nicvf_reg_write(nic, mbx_addr, *mbx_data);
91 mbx_data++;
92 mbx_addr += sizeof(uint64_t);
93 }
94 nicvf_mbox_log("msg sent %s (VF%d)",
95 nicvf_mbox_msg_str(mbx->msg.msg), nic->vf_id);
96 }
97
98 static inline void
99 nicvf_mbox_send_async_msg_to_pf(struct nicvf *nic, struct nic_mbx *mbx)
100 {
101 nicvf_mbox_send_msg_to_pf_raw(nic, mbx);
102 /* Messages without ack are racy!*/
103 nicvf_delay_us(NICVF_MBOX_PF_RESPONSE_DELAY_US);
104 }
105
106 static inline int
107 nicvf_mbox_send_msg_to_pf(struct nicvf *nic, struct nic_mbx *mbx)
108 {
109 long timeout;
110 long sleep = 10;
111 int i, retry = 5;
112
113 for (i = 0; i < retry; i++) {
114 nic->pf_acked = false;
115 nic->pf_nacked = false;
116 nicvf_smp_wmb();
117
118 nicvf_mbox_send_msg_to_pf_raw(nic, mbx);
119 /* Give some time to get PF response */
120 nicvf_delay_us(NICVF_MBOX_PF_RESPONSE_DELAY_US);
121 timeout = NIC_MBOX_MSG_TIMEOUT;
122 while (timeout > 0) {
123 /* Periodic poll happens from nicvf_interrupt() */
124 nicvf_smp_rmb();
125
126 if (nic->pf_nacked)
127 return -EINVAL;
128 if (nic->pf_acked)
129 return 0;
130
131 nicvf_delay_us(NICVF_MBOX_PF_RESPONSE_DELAY_US);
132 timeout -= sleep;
133 }
134 nicvf_log_error("PF didn't ack to msg 0x%02x %s VF%d (%d/%d)",
135 mbx->msg.msg, nicvf_mbox_msg_str(mbx->msg.msg),
136 nic->vf_id, i, retry);
137 }
138 return -EBUSY;
139 }
140
141
142 int
143 nicvf_handle_mbx_intr(struct nicvf *nic)
144 {
145 struct nic_mbx mbx;
146 uint64_t *mbx_data = (uint64_t *)&mbx;
147 uint64_t mbx_addr = NIC_VF_PF_MAILBOX_0_1;
148 size_t i;
149
150 for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
151 *mbx_data = nicvf_reg_read(nic, mbx_addr);
152 mbx_data++;
153 mbx_addr += sizeof(uint64_t);
154 }
155
156 /* Overwrite the message so we won't receive it again */
157 nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1, 0x0);
158
159 nicvf_mbox_log("msg received id=0x%hhx %s (VF%d)", mbx.msg.msg,
160 nicvf_mbox_msg_str(mbx.msg.msg), nic->vf_id);
161
162 switch (mbx.msg.msg) {
163 case NIC_MBOX_MSG_READY:
164 nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
165 nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
166 nic->node = mbx.nic_cfg.node_id;
167 nic->sqs_mode = mbx.nic_cfg.sqs_mode;
168 nic->loopback_supported = mbx.nic_cfg.loopback_supported;
169 ether_addr_copy((struct ether_addr *)mbx.nic_cfg.mac_addr,
170 (struct ether_addr *)nic->mac_addr);
171 nic->pf_acked = true;
172 break;
173 case NIC_MBOX_MSG_ACK:
174 nic->pf_acked = true;
175 break;
176 case NIC_MBOX_MSG_NACK:
177 nic->pf_nacked = true;
178 break;
179 case NIC_MBOX_MSG_RSS_SIZE_RES_BIT:
180 nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size;
181 nic->pf_acked = true;
182 break;
183 case NIC_MBOX_MSG_BGX_LINK_CHANGE:
184 nic->link_up = mbx.link_status.link_up;
185 nic->duplex = mbx.link_status.duplex;
186 nic->speed = mbx.link_status.speed;
187 nic->pf_acked = true;
188 break;
189 case NIC_MBOX_MSG_ALLOC_SQS_RES_BIT:
190 assert_primary(nic);
191 if (mbx.sqs_alloc.qs_count != nic->sqs_count) {
192 nicvf_log_error("Received %" PRIu8 "/%" PRIu8
193 " secondary qsets",
194 mbx.sqs_alloc.qs_count,
195 nic->sqs_count);
196 abort();
197 }
198 for (i = 0; i < mbx.sqs_alloc.qs_count; i++) {
199 if (mbx.sqs_alloc.svf[i] != nic->snicvf[i]->vf_id) {
200 nicvf_log_error("Received secondary qset[%zu] "
201 "ID %" PRIu8 " expected %"
202 PRIu8, i, mbx.sqs_alloc.svf[i],
203 nic->snicvf[i]->vf_id);
204 abort();
205 }
206 }
207 nic->pf_acked = true;
208 break;
209 default:
210 nicvf_log_error("Invalid message from PF, msg_id=0x%hhx %s",
211 mbx.msg.msg, nicvf_mbox_msg_str(mbx.msg.msg));
212 break;
213 }
214 nicvf_smp_wmb();
215
216 return mbx.msg.msg;
217 }
218
219 /*
220 * Checks if VF is able to communicate with PF
221 * and also gets the VNIC number this VF is associated to.
222 */
223 int
224 nicvf_mbox_check_pf_ready(struct nicvf *nic)
225 {
226 struct nic_mbx mbx = { .msg = {.msg = NIC_MBOX_MSG_READY} };
227
228 return nicvf_mbox_send_msg_to_pf(nic, &mbx);
229 }
230
231 int
232 nicvf_mbox_set_mac_addr(struct nicvf *nic,
233 const uint8_t mac[NICVF_MAC_ADDR_SIZE])
234 {
235 struct nic_mbx mbx = { .msg = {0} };
236 int i;
237
238 mbx.msg.msg = NIC_MBOX_MSG_SET_MAC;
239 mbx.mac.vf_id = nic->vf_id;
240 for (i = 0; i < 6; i++)
241 mbx.mac.mac_addr[i] = mac[i];
242
243 return nicvf_mbox_send_msg_to_pf(nic, &mbx);
244 }
245
246 int
247 nicvf_mbox_config_cpi(struct nicvf *nic, uint32_t qcnt)
248 {
249 struct nic_mbx mbx = { .msg = { 0 } };
250
251 mbx.msg.msg = NIC_MBOX_MSG_CPI_CFG;
252 mbx.cpi_cfg.vf_id = nic->vf_id;
253 mbx.cpi_cfg.cpi_alg = nic->cpi_alg;
254 mbx.cpi_cfg.rq_cnt = qcnt;
255
256 return nicvf_mbox_send_msg_to_pf(nic, &mbx);
257 }
258
259 int
260 nicvf_mbox_get_rss_size(struct nicvf *nic)
261 {
262 struct nic_mbx mbx = { .msg = { 0 } };
263
264 mbx.msg.msg = NIC_MBOX_MSG_RSS_SIZE;
265 mbx.rss_size.vf_id = nic->vf_id;
266
267 /* Result will be stored in nic->rss_info.rss_size */
268 return nicvf_mbox_send_msg_to_pf(nic, &mbx);
269 }
270
271 int
272 nicvf_mbox_config_rss(struct nicvf *nic)
273 {
274 struct nic_mbx mbx = { .msg = { 0 } };
275 struct nicvf_rss_reta_info *rss = &nic->rss_info;
276 size_t tot_len = rss->rss_size;
277 size_t cur_len;
278 size_t cur_idx = 0;
279 size_t i;
280
281 mbx.rss_cfg.vf_id = nic->vf_id;
282 mbx.rss_cfg.hash_bits = rss->hash_bits;
283 mbx.rss_cfg.tbl_len = 0;
284 mbx.rss_cfg.tbl_offset = 0;
285
286 while (cur_idx < tot_len) {
287 cur_len = nicvf_min(tot_len - cur_idx,
288 (size_t)RSS_IND_TBL_LEN_PER_MBX_MSG);
289 mbx.msg.msg = (cur_idx > 0) ?
290 NIC_MBOX_MSG_RSS_CFG_CONT : NIC_MBOX_MSG_RSS_CFG;
291 mbx.rss_cfg.tbl_offset = cur_idx;
292 mbx.rss_cfg.tbl_len = cur_len;
293 for (i = 0; i < cur_len; i++)
294 mbx.rss_cfg.ind_tbl[i] = rss->ind_tbl[cur_idx++];
295
296 if (nicvf_mbox_send_msg_to_pf(nic, &mbx))
297 return NICVF_ERR_RSS_TBL_UPDATE;
298 }
299
300 return 0;
301 }
302
303 int
304 nicvf_mbox_rq_config(struct nicvf *nic, uint16_t qidx,
305 struct pf_rq_cfg *pf_rq_cfg)
306 {
307 struct nic_mbx mbx = { .msg = { 0 } };
308
309 mbx.msg.msg = NIC_MBOX_MSG_RQ_CFG;
310 mbx.rq.qs_num = nic->vf_id;
311 mbx.rq.rq_num = qidx;
312 mbx.rq.cfg = pf_rq_cfg->value;
313 return nicvf_mbox_send_msg_to_pf(nic, &mbx);
314 }
315
316 int
317 nicvf_mbox_sq_config(struct nicvf *nic, uint16_t qidx)
318 {
319 struct nic_mbx mbx = { .msg = { 0 } };
320
321 mbx.msg.msg = NIC_MBOX_MSG_SQ_CFG;
322 mbx.sq.qs_num = nic->vf_id;
323 mbx.sq.sq_num = qidx;
324 mbx.sq.sqs_mode = nic->sqs_mode;
325 mbx.sq.cfg = (nic->vf_id << 3) | qidx;
326 return nicvf_mbox_send_msg_to_pf(nic, &mbx);
327 }
328
329 int
330 nicvf_mbox_qset_config(struct nicvf *nic, struct pf_qs_cfg *qs_cfg)
331 {
332 struct nic_mbx mbx = { .msg = { 0 } };
333
334 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
335 qs_cfg->be = 1;
336 #endif
337 /* Send a mailbox msg to PF to config Qset */
338 mbx.msg.msg = NIC_MBOX_MSG_QS_CFG;
339 mbx.qs.num = nic->vf_id;
340 mbx.qs.sqs_count = nic->sqs_count;
341 mbx.qs.cfg = qs_cfg->value;
342 return nicvf_mbox_send_msg_to_pf(nic, &mbx);
343 }
344
345 int
346 nicvf_mbox_request_sqs(struct nicvf *nic)
347 {
348 struct nic_mbx mbx = { .msg = { 0 } };
349 size_t i;
350
351 assert_primary(nic);
352 assert(nic->sqs_count > 0);
353 assert(nic->sqs_count <= MAX_SQS_PER_VF);
354
355 mbx.sqs_alloc.msg = NIC_MBOX_MSG_ALLOC_SQS;
356 mbx.sqs_alloc.spec = 1;
357 mbx.sqs_alloc.qs_count = nic->sqs_count;
358
359 /* Set no of Rx/Tx queues in each of the SQsets */
360 for (i = 0; i < nic->sqs_count; i++)
361 mbx.sqs_alloc.svf[i] = nic->snicvf[i]->vf_id;
362
363 return nicvf_mbox_send_msg_to_pf(nic, &mbx);
364 }
365
366 int
367 nicvf_mbox_rq_drop_config(struct nicvf *nic, uint16_t qidx, bool enable)
368 {
369 struct nic_mbx mbx = { .msg = { 0 } };
370 struct pf_rq_drop_cfg *drop_cfg;
371
372 /* Enable CQ drop to reserve sufficient CQEs for all tx packets */
373 mbx.msg.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
374 mbx.rq.qs_num = nic->vf_id;
375 mbx.rq.rq_num = qidx;
376 drop_cfg = (struct pf_rq_drop_cfg *)&mbx.rq.cfg;
377 drop_cfg->value = 0;
378 if (enable) {
379 drop_cfg->cq_red = 1;
380 drop_cfg->cq_drop = 2;
381 }
382 return nicvf_mbox_send_msg_to_pf(nic, &mbx);
383 }
384
385 int
386 nicvf_mbox_update_hw_max_frs(struct nicvf *nic, uint16_t mtu)
387 {
388 struct nic_mbx mbx = { .msg = { 0 } };
389
390 mbx.msg.msg = NIC_MBOX_MSG_SET_MAX_FRS;
391 mbx.frs.max_frs = mtu;
392 mbx.frs.vf_id = nic->vf_id;
393 return nicvf_mbox_send_msg_to_pf(nic, &mbx);
394 }
395
396 int
397 nicvf_mbox_rq_sync(struct nicvf *nic)
398 {
399 struct nic_mbx mbx = { .msg = { 0 } };
400
401 /* Make sure all packets in the pipeline are written back into mem */
402 mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
403 mbx.rq.cfg = 0;
404 return nicvf_mbox_send_msg_to_pf(nic, &mbx);
405 }
406
407 int
408 nicvf_mbox_rq_bp_config(struct nicvf *nic, uint16_t qidx, bool enable)
409 {
410 struct nic_mbx mbx = { .msg = { 0 } };
411
412 mbx.msg.msg = NIC_MBOX_MSG_RQ_BP_CFG;
413 mbx.rq.qs_num = nic->vf_id;
414 mbx.rq.rq_num = qidx;
415 mbx.rq.cfg = 0;
416 if (enable)
417 mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (nic->vf_id << 0);
418 return nicvf_mbox_send_msg_to_pf(nic, &mbx);
419 }
420
421 int
422 nicvf_mbox_loopback_config(struct nicvf *nic, bool enable)
423 {
424 struct nic_mbx mbx = { .msg = { 0 } };
425
426 mbx.lbk.msg = NIC_MBOX_MSG_LOOPBACK;
427 mbx.lbk.vf_id = nic->vf_id;
428 mbx.lbk.enable = enable;
429 return nicvf_mbox_send_msg_to_pf(nic, &mbx);
430 }
431
432 int
433 nicvf_mbox_reset_stat_counters(struct nicvf *nic, uint16_t rx_stat_mask,
434 uint8_t tx_stat_mask, uint16_t rq_stat_mask,
435 uint16_t sq_stat_mask)
436 {
437 struct nic_mbx mbx = { .msg = { 0 } };
438
439 mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER;
440 mbx.reset_stat.rx_stat_mask = rx_stat_mask;
441 mbx.reset_stat.tx_stat_mask = tx_stat_mask;
442 mbx.reset_stat.rq_stat_mask = rq_stat_mask;
443 mbx.reset_stat.sq_stat_mask = sq_stat_mask;
444 return nicvf_mbox_send_msg_to_pf(nic, &mbx);
445 }
446
447 void
448 nicvf_mbox_shutdown(struct nicvf *nic)
449 {
450 struct nic_mbx mbx = { .msg = { 0 } };
451
452 mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
453 nicvf_mbox_send_msg_to_pf(nic, &mbx);
454 }
455
456 void
457 nicvf_mbox_cfg_done(struct nicvf *nic)
458 {
459 struct nic_mbx mbx = { .msg = { 0 } };
460
461 mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
462 nicvf_mbox_send_async_msg_to_pf(nic, &mbx);
463 }