]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/bus/dpaa/base/fman/fman_hw.c
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / spdk / dpdk / drivers / bus / dpaa / base / fman / fman_hw.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 *
3 * Copyright 2017 NXP
4 *
5 */
6
7 #include <sys/types.h>
8 #include <sys/ioctl.h>
9 #include <ifaddrs.h>
10 #include <fman.h>
11 /* This header declares things about Fman hardware itself (the format of status
12 * words and an inline implementation of CRC64). We include it only in order to
13 * instantiate the one global variable it depends on.
14 */
15 #include <fsl_fman.h>
16 #include <fsl_fman_crc64.h>
17 #include <fsl_bman.h>
18
19 #define FMAN_SP_SG_DISABLE 0x80000000
20 #define FMAN_SP_EXT_BUF_MARG_START_SHIFT 16
21
22 /* Instantiate the global variable that the inline CRC64 implementation (in
23 * <fsl_fman.h>) depends on.
24 */
25 DECLARE_FMAN_CRC64_TABLE();
26
27 #define ETH_ADDR_TO_UINT64(eth_addr) \
28 (uint64_t)(((uint64_t)(eth_addr)[0] << 40) | \
29 ((uint64_t)(eth_addr)[1] << 32) | \
30 ((uint64_t)(eth_addr)[2] << 24) | \
31 ((uint64_t)(eth_addr)[3] << 16) | \
32 ((uint64_t)(eth_addr)[4] << 8) | \
33 ((uint64_t)(eth_addr)[5]))
34
35 void
36 fman_if_set_mcast_filter_table(struct fman_if *p)
37 {
38 struct __fman_if *__if = container_of(p, struct __fman_if, __if);
39 void *hashtable_ctrl;
40 uint32_t i;
41
42 hashtable_ctrl = &((struct memac_regs *)__if->ccsr_map)->hashtable_ctrl;
43 for (i = 0; i < 64; i++)
44 out_be32(hashtable_ctrl, i|HASH_CTRL_MCAST_EN);
45 }
46
47 void
48 fman_if_reset_mcast_filter_table(struct fman_if *p)
49 {
50 struct __fman_if *__if = container_of(p, struct __fman_if, __if);
51 void *hashtable_ctrl;
52 uint32_t i;
53
54 hashtable_ctrl = &((struct memac_regs *)__if->ccsr_map)->hashtable_ctrl;
55 for (i = 0; i < 64; i++)
56 out_be32(hashtable_ctrl, i & ~HASH_CTRL_MCAST_EN);
57 }
58
59 static
60 uint32_t get_mac_hash_code(uint64_t eth_addr)
61 {
62 uint64_t mask1, mask2;
63 uint32_t xorVal = 0;
64 uint8_t i, j;
65
66 for (i = 0; i < 6; i++) {
67 mask1 = eth_addr & (uint64_t)0x01;
68 eth_addr >>= 1;
69
70 for (j = 0; j < 7; j++) {
71 mask2 = eth_addr & (uint64_t)0x01;
72 mask1 ^= mask2;
73 eth_addr >>= 1;
74 }
75
76 xorVal |= (mask1 << (5 - i));
77 }
78
79 return xorVal;
80 }
81
82 int
83 fman_if_add_hash_mac_addr(struct fman_if *p, uint8_t *eth)
84 {
85 uint64_t eth_addr;
86 void *hashtable_ctrl;
87 uint32_t hash;
88
89 struct __fman_if *__if = container_of(p, struct __fman_if, __if);
90
91 eth_addr = ETH_ADDR_TO_UINT64(eth);
92
93 if (!(eth_addr & GROUP_ADDRESS))
94 return -1;
95
96 hash = get_mac_hash_code(eth_addr) & HASH_CTRL_ADDR_MASK;
97 hash = hash | HASH_CTRL_MCAST_EN;
98
99 hashtable_ctrl = &((struct memac_regs *)__if->ccsr_map)->hashtable_ctrl;
100 out_be32(hashtable_ctrl, hash);
101
102 return 0;
103 }
104
105 int
106 fman_if_get_primary_mac_addr(struct fman_if *p, uint8_t *eth)
107 {
108 struct __fman_if *__if = container_of(p, struct __fman_if, __if);
109 void *mac_reg =
110 &((struct memac_regs *)__if->ccsr_map)->mac_addr0.mac_addr_l;
111 u32 val = in_be32(mac_reg);
112
113 eth[0] = (val & 0x000000ff) >> 0;
114 eth[1] = (val & 0x0000ff00) >> 8;
115 eth[2] = (val & 0x00ff0000) >> 16;
116 eth[3] = (val & 0xff000000) >> 24;
117
118 mac_reg = &((struct memac_regs *)__if->ccsr_map)->mac_addr0.mac_addr_u;
119 val = in_be32(mac_reg);
120
121 eth[4] = (val & 0x000000ff) >> 0;
122 eth[5] = (val & 0x0000ff00) >> 8;
123
124 return 0;
125 }
126
127 void
128 fman_if_clear_mac_addr(struct fman_if *p, uint8_t addr_num)
129 {
130 struct __fman_if *m = container_of(p, struct __fman_if, __if);
131 void *reg;
132
133 if (addr_num) {
134 reg = &((struct memac_regs *)m->ccsr_map)->
135 mac_addr[addr_num-1].mac_addr_l;
136 out_be32(reg, 0x0);
137 reg = &((struct memac_regs *)m->ccsr_map)->
138 mac_addr[addr_num-1].mac_addr_u;
139 out_be32(reg, 0x0);
140 } else {
141 reg = &((struct memac_regs *)m->ccsr_map)->mac_addr0.mac_addr_l;
142 out_be32(reg, 0x0);
143 reg = &((struct memac_regs *)m->ccsr_map)->mac_addr0.mac_addr_u;
144 out_be32(reg, 0x0);
145 }
146 }
147
148 int
149 fman_if_add_mac_addr(struct fman_if *p, uint8_t *eth, uint8_t addr_num)
150 {
151 struct __fman_if *m = container_of(p, struct __fman_if, __if);
152
153 void *reg;
154 u32 val;
155
156 memcpy(&m->__if.mac_addr, eth, ETHER_ADDR_LEN);
157
158 if (addr_num)
159 reg = &((struct memac_regs *)m->ccsr_map)->
160 mac_addr[addr_num-1].mac_addr_l;
161 else
162 reg = &((struct memac_regs *)m->ccsr_map)->mac_addr0.mac_addr_l;
163
164 val = (m->__if.mac_addr.addr_bytes[0] |
165 (m->__if.mac_addr.addr_bytes[1] << 8) |
166 (m->__if.mac_addr.addr_bytes[2] << 16) |
167 (m->__if.mac_addr.addr_bytes[3] << 24));
168 out_be32(reg, val);
169
170 if (addr_num)
171 reg = &((struct memac_regs *)m->ccsr_map)->
172 mac_addr[addr_num-1].mac_addr_u;
173 else
174 reg = &((struct memac_regs *)m->ccsr_map)->mac_addr0.mac_addr_u;
175
176 val = ((m->__if.mac_addr.addr_bytes[4] << 0) |
177 (m->__if.mac_addr.addr_bytes[5] << 8));
178 out_be32(reg, val);
179
180 return 0;
181 }
182
183 void
184 fman_if_set_rx_ignore_pause_frames(struct fman_if *p, bool enable)
185 {
186 struct __fman_if *__if = container_of(p, struct __fman_if, __if);
187 u32 value = 0;
188 void *cmdcfg;
189
190 assert(fman_ccsr_map_fd != -1);
191
192 /* Set Rx Ignore Pause Frames */
193 cmdcfg = &((struct memac_regs *)__if->ccsr_map)->command_config;
194 if (enable)
195 value = in_be32(cmdcfg) | CMD_CFG_PAUSE_IGNORE;
196 else
197 value = in_be32(cmdcfg) & ~CMD_CFG_PAUSE_IGNORE;
198
199 out_be32(cmdcfg, value);
200 }
201
202 void
203 fman_if_conf_max_frame_len(struct fman_if *p, unsigned int max_frame_len)
204 {
205 struct __fman_if *__if = container_of(p, struct __fman_if, __if);
206 unsigned int *maxfrm;
207
208 assert(fman_ccsr_map_fd != -1);
209
210 /* Set Max frame length */
211 maxfrm = &((struct memac_regs *)__if->ccsr_map)->maxfrm;
212 out_be32(maxfrm, (MAXFRM_RX_MASK & max_frame_len));
213 }
214
215 void
216 fman_if_stats_get(struct fman_if *p, struct rte_eth_stats *stats)
217 {
218 struct __fman_if *m = container_of(p, struct __fman_if, __if);
219 struct memac_regs *regs = m->ccsr_map;
220
221 /* read recved packet count */
222 stats->ipackets = ((u64)in_be32(&regs->rfrm_u)) << 32 |
223 in_be32(&regs->rfrm_l);
224 stats->ibytes = ((u64)in_be32(&regs->roct_u)) << 32 |
225 in_be32(&regs->roct_l);
226 stats->ierrors = ((u64)in_be32(&regs->rerr_u)) << 32 |
227 in_be32(&regs->rerr_l);
228
229 /* read xmited packet count */
230 stats->opackets = ((u64)in_be32(&regs->tfrm_u)) << 32 |
231 in_be32(&regs->tfrm_l);
232 stats->obytes = ((u64)in_be32(&regs->toct_u)) << 32 |
233 in_be32(&regs->toct_l);
234 stats->oerrors = ((u64)in_be32(&regs->terr_u)) << 32 |
235 in_be32(&regs->terr_l);
236 }
237
238 void
239 fman_if_stats_get_all(struct fman_if *p, uint64_t *value, int n)
240 {
241 struct __fman_if *m = container_of(p, struct __fman_if, __if);
242 struct memac_regs *regs = m->ccsr_map;
243 int i;
244 uint64_t base_offset = offsetof(struct memac_regs, reoct_l);
245
246 for (i = 0; i < n; i++)
247 value[i] = ((u64)in_be32((char *)regs
248 + base_offset + 8 * i + 4)) << 32 |
249 ((u64)in_be32((char *)regs
250 + base_offset + 8 * i));
251 }
252
253 void
254 fman_if_stats_reset(struct fman_if *p)
255 {
256 struct __fman_if *m = container_of(p, struct __fman_if, __if);
257 struct memac_regs *regs = m->ccsr_map;
258 uint32_t tmp;
259
260 tmp = in_be32(&regs->statn_config);
261
262 tmp |= STATS_CFG_CLR;
263
264 out_be32(&regs->statn_config, tmp);
265
266 while (in_be32(&regs->statn_config) & STATS_CFG_CLR)
267 ;
268 }
269
270 void
271 fman_if_promiscuous_enable(struct fman_if *p)
272 {
273 struct __fman_if *__if = container_of(p, struct __fman_if, __if);
274 void *cmdcfg;
275
276 assert(fman_ccsr_map_fd != -1);
277
278 /* Enable Rx promiscuous mode */
279 cmdcfg = &((struct memac_regs *)__if->ccsr_map)->command_config;
280 out_be32(cmdcfg, in_be32(cmdcfg) | CMD_CFG_PROMIS_EN);
281 }
282
283 void
284 fman_if_promiscuous_disable(struct fman_if *p)
285 {
286 struct __fman_if *__if = container_of(p, struct __fman_if, __if);
287 void *cmdcfg;
288
289 assert(fman_ccsr_map_fd != -1);
290
291 /* Disable Rx promiscuous mode */
292 cmdcfg = &((struct memac_regs *)__if->ccsr_map)->command_config;
293 out_be32(cmdcfg, in_be32(cmdcfg) & (~CMD_CFG_PROMIS_EN));
294 }
295
296 void
297 fman_if_enable_rx(struct fman_if *p)
298 {
299 struct __fman_if *__if = container_of(p, struct __fman_if, __if);
300
301 assert(fman_ccsr_map_fd != -1);
302
303 /* enable Rx and Tx */
304 out_be32(__if->ccsr_map + 8, in_be32(__if->ccsr_map + 8) | 3);
305 }
306
307 void
308 fman_if_disable_rx(struct fman_if *p)
309 {
310 struct __fman_if *__if = container_of(p, struct __fman_if, __if);
311
312 assert(fman_ccsr_map_fd != -1);
313
314 /* only disable Rx, not Tx */
315 out_be32(__if->ccsr_map + 8, in_be32(__if->ccsr_map + 8) & ~(u32)2);
316 }
317
318 void
319 fman_if_loopback_enable(struct fman_if *p)
320 {
321 struct __fman_if *__if = container_of(p, struct __fman_if, __if);
322
323 assert(fman_ccsr_map_fd != -1);
324
325 /* Enable loopback mode */
326 if ((__if->__if.is_memac) && (__if->__if.is_rgmii)) {
327 unsigned int *ifmode =
328 &((struct memac_regs *)__if->ccsr_map)->if_mode;
329 out_be32(ifmode, in_be32(ifmode) | IF_MODE_RLP);
330 } else{
331 unsigned int *cmdcfg =
332 &((struct memac_regs *)__if->ccsr_map)->command_config;
333 out_be32(cmdcfg, in_be32(cmdcfg) | CMD_CFG_LOOPBACK_EN);
334 }
335 }
336
337 void
338 fman_if_loopback_disable(struct fman_if *p)
339 {
340 struct __fman_if *__if = container_of(p, struct __fman_if, __if);
341
342 assert(fman_ccsr_map_fd != -1);
343 /* Disable loopback mode */
344 if ((__if->__if.is_memac) && (__if->__if.is_rgmii)) {
345 unsigned int *ifmode =
346 &((struct memac_regs *)__if->ccsr_map)->if_mode;
347 out_be32(ifmode, in_be32(ifmode) & ~IF_MODE_RLP);
348 } else {
349 unsigned int *cmdcfg =
350 &((struct memac_regs *)__if->ccsr_map)->command_config;
351 out_be32(cmdcfg, in_be32(cmdcfg) & ~CMD_CFG_LOOPBACK_EN);
352 }
353 }
354
355 void
356 fman_if_set_bp(struct fman_if *fm_if, unsigned num __always_unused,
357 int bpid, size_t bufsize)
358 {
359 u32 fmbm_ebmpi;
360 u32 ebmpi_val_ace = 0xc0000000;
361 u32 ebmpi_mask = 0xffc00000;
362
363 struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
364
365 assert(fman_ccsr_map_fd != -1);
366
367 fmbm_ebmpi =
368 in_be32(&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_ebmpi[0]);
369 fmbm_ebmpi = ebmpi_val_ace | (fmbm_ebmpi & ebmpi_mask) | (bpid << 16) |
370 (bufsize);
371
372 out_be32(&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_ebmpi[0],
373 fmbm_ebmpi);
374 }
375
376 int
377 fman_if_get_fc_threshold(struct fman_if *fm_if)
378 {
379 struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
380 unsigned int *fmbm_mpd;
381
382 assert(fman_ccsr_map_fd != -1);
383
384 fmbm_mpd = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_mpd;
385 return in_be32(fmbm_mpd);
386 }
387
388 int
389 fman_if_set_fc_threshold(struct fman_if *fm_if, u32 high_water,
390 u32 low_water, u32 bpid)
391 {
392 struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
393 unsigned int *fmbm_mpd;
394
395 assert(fman_ccsr_map_fd != -1);
396
397 fmbm_mpd = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_mpd;
398 out_be32(fmbm_mpd, FMAN_ENABLE_BPOOL_DEPLETION);
399 return bm_pool_set_hw_threshold(bpid, low_water, high_water);
400
401 }
402
403 int
404 fman_if_get_fc_quanta(struct fman_if *fm_if)
405 {
406 struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
407
408 assert(fman_ccsr_map_fd != -1);
409
410 return in_be32(&((struct memac_regs *)__if->ccsr_map)->pause_quanta[0]);
411 }
412
413 int
414 fman_if_set_fc_quanta(struct fman_if *fm_if, u16 pause_quanta)
415 {
416 struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
417
418 assert(fman_ccsr_map_fd != -1);
419
420 out_be32(&((struct memac_regs *)__if->ccsr_map)->pause_quanta[0],
421 pause_quanta);
422 return 0;
423 }
424
425 int
426 fman_if_get_fdoff(struct fman_if *fm_if)
427 {
428 u32 fmbm_rebm;
429 int fdoff;
430
431 struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
432
433 assert(fman_ccsr_map_fd != -1);
434
435 fmbm_rebm = in_be32(&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rebm);
436
437 fdoff = (fmbm_rebm >> FMAN_SP_EXT_BUF_MARG_START_SHIFT) & 0x1ff;
438
439 return fdoff;
440 }
441
442 void
443 fman_if_set_err_fqid(struct fman_if *fm_if, uint32_t err_fqid)
444 {
445 struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
446
447 assert(fman_ccsr_map_fd != -1);
448
449 unsigned int *fmbm_refqid =
450 &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_refqid;
451 out_be32(fmbm_refqid, err_fqid);
452 }
453
454 int
455 fman_if_get_ic_params(struct fman_if *fm_if, struct fman_if_ic_params *icp)
456 {
457 struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
458 int val = 0;
459 int iceof_mask = 0x001f0000;
460 int icsz_mask = 0x0000001f;
461 int iciof_mask = 0x00000f00;
462
463 assert(fman_ccsr_map_fd != -1);
464
465 unsigned int *fmbm_ricp =
466 &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_ricp;
467 val = in_be32(fmbm_ricp);
468
469 icp->iceof = (val & iceof_mask) >> 12;
470 icp->iciof = (val & iciof_mask) >> 4;
471 icp->icsz = (val & icsz_mask) << 4;
472
473 return 0;
474 }
475
476 int
477 fman_if_set_ic_params(struct fman_if *fm_if,
478 const struct fman_if_ic_params *icp)
479 {
480 struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
481 int val = 0;
482 int iceof_mask = 0x001f0000;
483 int icsz_mask = 0x0000001f;
484 int iciof_mask = 0x00000f00;
485
486 assert(fman_ccsr_map_fd != -1);
487
488 val |= (icp->iceof << 12) & iceof_mask;
489 val |= (icp->iciof << 4) & iciof_mask;
490 val |= (icp->icsz >> 4) & icsz_mask;
491
492 unsigned int *fmbm_ricp =
493 &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_ricp;
494 out_be32(fmbm_ricp, val);
495
496 return 0;
497 }
498
499 void
500 fman_if_set_fdoff(struct fman_if *fm_if, uint32_t fd_offset)
501 {
502 struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
503 unsigned int *fmbm_rebm;
504 int val = 0;
505 int fmbm_mask = 0x01ff0000;
506
507 val = fd_offset << FMAN_SP_EXT_BUF_MARG_START_SHIFT;
508
509 assert(fman_ccsr_map_fd != -1);
510
511 fmbm_rebm = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rebm;
512
513 out_be32(fmbm_rebm, (in_be32(fmbm_rebm) & ~fmbm_mask) | val);
514 }
515
516 void
517 fman_if_set_maxfrm(struct fman_if *fm_if, uint16_t max_frm)
518 {
519 struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
520 unsigned int *reg_maxfrm;
521
522 assert(fman_ccsr_map_fd != -1);
523
524 reg_maxfrm = &((struct memac_regs *)__if->ccsr_map)->maxfrm;
525
526 out_be32(reg_maxfrm, (in_be32(reg_maxfrm) & 0xFFFF0000) | max_frm);
527 }
528
529 uint16_t
530 fman_if_get_maxfrm(struct fman_if *fm_if)
531 {
532 struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
533 unsigned int *reg_maxfrm;
534
535 assert(fman_ccsr_map_fd != -1);
536
537 reg_maxfrm = &((struct memac_regs *)__if->ccsr_map)->maxfrm;
538
539 return (in_be32(reg_maxfrm) | 0x0000FFFF);
540 }
541
542 /* MSB in fmbm_rebm register
543 * 0 - If BMI cannot store the frame in a single buffer it may select a buffer
544 * of smaller size and store the frame in scatter gather (S/G) buffers
545 * 1 - Scatter gather format is not enabled for frame storage. If BMI cannot
546 * store the frame in a single buffer, the frame is discarded.
547 */
548
549 int
550 fman_if_get_sg_enable(struct fman_if *fm_if)
551 {
552 u32 fmbm_rebm;
553
554 struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
555
556 assert(fman_ccsr_map_fd != -1);
557
558 fmbm_rebm = in_be32(&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rebm);
559
560 return (fmbm_rebm & FMAN_SP_SG_DISABLE) ? 0 : 1;
561 }
562
563 void
564 fman_if_set_sg(struct fman_if *fm_if, int enable)
565 {
566 struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
567 unsigned int *fmbm_rebm;
568 int val;
569 int fmbm_mask = FMAN_SP_SG_DISABLE;
570
571 if (enable)
572 val = 0;
573 else
574 val = FMAN_SP_SG_DISABLE;
575
576 assert(fman_ccsr_map_fd != -1);
577
578 fmbm_rebm = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rebm;
579
580 out_be32(fmbm_rebm, (in_be32(fmbm_rebm) & ~fmbm_mask) | val);
581 }
582
583 void
584 fman_if_set_dnia(struct fman_if *fm_if, uint32_t nia)
585 {
586 struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
587 unsigned int *fmqm_pndn;
588
589 assert(fman_ccsr_map_fd != -1);
590
591 fmqm_pndn = &((struct fman_port_qmi_regs *)__if->qmi_map)->fmqm_pndn;
592
593 out_be32(fmqm_pndn, nia);
594 }
595
596 void
597 fman_if_discard_rx_errors(struct fman_if *fm_if)
598 {
599 struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
600 unsigned int *fmbm_rfsdm, *fmbm_rfsem;
601
602 fmbm_rfsem = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rfsem;
603 out_be32(fmbm_rfsem, 0);
604
605 /* Configure the discard mask to discard the error packets which have
606 * DMA errors, Frame size error, Header error etc. The mask 0x010CE3F0
607 * is to configured discard all the errors which come in the FD[STATUS]
608 */
609 fmbm_rfsdm = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rfsdm;
610 out_be32(fmbm_rfsdm, 0x010CE3F0);
611 }