1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/dcbnl.h>
7 #include <linux/if_ether.h>
8 #include <linux/list.h>
9 #include <linux/netlink.h>
16 struct mlxsw_sp_sb_pr
{
17 enum mlxsw_reg_sbpr_mode mode
;
23 struct mlxsw_cp_sb_occ
{
28 struct mlxsw_sp_sb_cm
{
32 struct mlxsw_cp_sb_occ occ
;
37 #define MLXSW_SP_SB_INFI -1U
39 struct mlxsw_sp_sb_pm
{
42 struct mlxsw_cp_sb_occ occ
;
45 struct mlxsw_sp_sb_mm
{
51 struct mlxsw_sp_sb_pool_des
{
52 enum mlxsw_reg_sbxx_dir dir
;
56 #define MLXSW_SP_SB_POOL_ING 0
57 #define MLXSW_SP_SB_POOL_EGR 4
58 #define MLXSW_SP_SB_POOL_EGR_MC 8
59 #define MLXSW_SP_SB_POOL_ING_CPU 9
60 #define MLXSW_SP_SB_POOL_EGR_CPU 10
62 static const struct mlxsw_sp_sb_pool_des mlxsw_sp1_sb_pool_dess
[] = {
63 {MLXSW_REG_SBXX_DIR_INGRESS
, 0},
64 {MLXSW_REG_SBXX_DIR_INGRESS
, 1},
65 {MLXSW_REG_SBXX_DIR_INGRESS
, 2},
66 {MLXSW_REG_SBXX_DIR_INGRESS
, 3},
67 {MLXSW_REG_SBXX_DIR_EGRESS
, 0},
68 {MLXSW_REG_SBXX_DIR_EGRESS
, 1},
69 {MLXSW_REG_SBXX_DIR_EGRESS
, 2},
70 {MLXSW_REG_SBXX_DIR_EGRESS
, 3},
71 {MLXSW_REG_SBXX_DIR_EGRESS
, 15},
72 {MLXSW_REG_SBXX_DIR_INGRESS
, 4},
73 {MLXSW_REG_SBXX_DIR_EGRESS
, 4},
76 static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess
[] = {
77 {MLXSW_REG_SBXX_DIR_INGRESS
, 0},
78 {MLXSW_REG_SBXX_DIR_INGRESS
, 1},
79 {MLXSW_REG_SBXX_DIR_INGRESS
, 2},
80 {MLXSW_REG_SBXX_DIR_INGRESS
, 3},
81 {MLXSW_REG_SBXX_DIR_EGRESS
, 0},
82 {MLXSW_REG_SBXX_DIR_EGRESS
, 1},
83 {MLXSW_REG_SBXX_DIR_EGRESS
, 2},
84 {MLXSW_REG_SBXX_DIR_EGRESS
, 3},
85 {MLXSW_REG_SBXX_DIR_EGRESS
, 15},
86 {MLXSW_REG_SBXX_DIR_INGRESS
, 4},
87 {MLXSW_REG_SBXX_DIR_EGRESS
, 4},
90 #define MLXSW_SP_SB_ING_TC_COUNT 8
91 #define MLXSW_SP_SB_EG_TC_COUNT 16
93 struct mlxsw_sp_sb_port
{
94 struct mlxsw_sp_sb_cm ing_cms
[MLXSW_SP_SB_ING_TC_COUNT
];
95 struct mlxsw_sp_sb_cm eg_cms
[MLXSW_SP_SB_EG_TC_COUNT
];
96 struct mlxsw_sp_sb_pm
*pms
;
100 struct mlxsw_sp_sb_pr
*prs
;
101 struct mlxsw_sp_sb_port
*ports
;
103 u32 max_headroom_cells
;
107 struct mlxsw_sp_sb_vals
{
108 unsigned int pool_count
;
109 const struct mlxsw_sp_sb_pool_des
*pool_dess
;
110 const struct mlxsw_sp_sb_pm
*pms
;
111 const struct mlxsw_sp_sb_pm
*pms_cpu
;
112 const struct mlxsw_sp_sb_pr
*prs
;
113 const struct mlxsw_sp_sb_mm
*mms
;
114 const struct mlxsw_sp_sb_cm
*cms_ingress
;
115 const struct mlxsw_sp_sb_cm
*cms_egress
;
116 const struct mlxsw_sp_sb_cm
*cms_cpu
;
117 unsigned int mms_count
;
118 unsigned int cms_ingress_count
;
119 unsigned int cms_egress_count
;
120 unsigned int cms_cpu_count
;
123 u32
mlxsw_sp_cells_bytes(const struct mlxsw_sp
*mlxsw_sp
, u32 cells
)
125 return mlxsw_sp
->sb
->cell_size
* cells
;
128 u32
mlxsw_sp_bytes_cells(const struct mlxsw_sp
*mlxsw_sp
, u32 bytes
)
130 return DIV_ROUND_UP(bytes
, mlxsw_sp
->sb
->cell_size
);
133 u32
mlxsw_sp_sb_max_headroom_cells(const struct mlxsw_sp
*mlxsw_sp
)
135 return mlxsw_sp
->sb
->max_headroom_cells
;
138 static struct mlxsw_sp_sb_pr
*mlxsw_sp_sb_pr_get(struct mlxsw_sp
*mlxsw_sp
,
141 return &mlxsw_sp
->sb
->prs
[pool_index
];
144 static bool mlxsw_sp_sb_cm_exists(u8 pg_buff
, enum mlxsw_reg_sbxx_dir dir
)
146 if (dir
== MLXSW_REG_SBXX_DIR_INGRESS
)
147 return pg_buff
< MLXSW_SP_SB_ING_TC_COUNT
;
149 return pg_buff
< MLXSW_SP_SB_EG_TC_COUNT
;
152 static struct mlxsw_sp_sb_cm
*mlxsw_sp_sb_cm_get(struct mlxsw_sp
*mlxsw_sp
,
153 u8 local_port
, u8 pg_buff
,
154 enum mlxsw_reg_sbxx_dir dir
)
156 struct mlxsw_sp_sb_port
*sb_port
= &mlxsw_sp
->sb
->ports
[local_port
];
158 WARN_ON(!mlxsw_sp_sb_cm_exists(pg_buff
, dir
));
159 if (dir
== MLXSW_REG_SBXX_DIR_INGRESS
)
160 return &sb_port
->ing_cms
[pg_buff
];
162 return &sb_port
->eg_cms
[pg_buff
];
165 static struct mlxsw_sp_sb_pm
*mlxsw_sp_sb_pm_get(struct mlxsw_sp
*mlxsw_sp
,
166 u8 local_port
, u16 pool_index
)
168 return &mlxsw_sp
->sb
->ports
[local_port
].pms
[pool_index
];
171 static int mlxsw_sp_sb_pr_write(struct mlxsw_sp
*mlxsw_sp
, u16 pool_index
,
172 enum mlxsw_reg_sbpr_mode mode
,
173 u32 size
, bool infi_size
)
175 const struct mlxsw_sp_sb_pool_des
*des
=
176 &mlxsw_sp
->sb_vals
->pool_dess
[pool_index
];
177 char sbpr_pl
[MLXSW_REG_SBPR_LEN
];
178 struct mlxsw_sp_sb_pr
*pr
;
181 mlxsw_reg_sbpr_pack(sbpr_pl
, des
->pool
, des
->dir
, mode
,
183 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sbpr
), sbpr_pl
);
188 size
= mlxsw_sp_bytes_cells(mlxsw_sp
, mlxsw_sp
->sb
->sb_size
);
189 pr
= mlxsw_sp_sb_pr_get(mlxsw_sp
, pool_index
);
195 static int mlxsw_sp_sb_cm_write(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
196 u8 pg_buff
, u32 min_buff
, u32 max_buff
,
197 bool infi_max
, u16 pool_index
)
199 const struct mlxsw_sp_sb_pool_des
*des
=
200 &mlxsw_sp
->sb_vals
->pool_dess
[pool_index
];
201 char sbcm_pl
[MLXSW_REG_SBCM_LEN
];
202 struct mlxsw_sp_sb_cm
*cm
;
205 mlxsw_reg_sbcm_pack(sbcm_pl
, local_port
, pg_buff
, des
->dir
,
206 min_buff
, max_buff
, infi_max
, des
->pool
);
207 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sbcm
), sbcm_pl
);
211 if (mlxsw_sp_sb_cm_exists(pg_buff
, des
->dir
)) {
213 max_buff
= mlxsw_sp_bytes_cells(mlxsw_sp
,
214 mlxsw_sp
->sb
->sb_size
);
216 cm
= mlxsw_sp_sb_cm_get(mlxsw_sp
, local_port
, pg_buff
,
218 cm
->min_buff
= min_buff
;
219 cm
->max_buff
= max_buff
;
220 cm
->pool_index
= pool_index
;
225 static int mlxsw_sp_sb_pm_write(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
226 u16 pool_index
, u32 min_buff
, u32 max_buff
)
228 const struct mlxsw_sp_sb_pool_des
*des
=
229 &mlxsw_sp
->sb_vals
->pool_dess
[pool_index
];
230 char sbpm_pl
[MLXSW_REG_SBPM_LEN
];
231 struct mlxsw_sp_sb_pm
*pm
;
234 mlxsw_reg_sbpm_pack(sbpm_pl
, local_port
, des
->pool
, des
->dir
, false,
236 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sbpm
), sbpm_pl
);
240 pm
= mlxsw_sp_sb_pm_get(mlxsw_sp
, local_port
, pool_index
);
241 pm
->min_buff
= min_buff
;
242 pm
->max_buff
= max_buff
;
246 static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
247 u16 pool_index
, struct list_head
*bulk_list
)
249 const struct mlxsw_sp_sb_pool_des
*des
=
250 &mlxsw_sp
->sb_vals
->pool_dess
[pool_index
];
251 char sbpm_pl
[MLXSW_REG_SBPM_LEN
];
253 mlxsw_reg_sbpm_pack(sbpm_pl
, local_port
, des
->pool
, des
->dir
,
255 return mlxsw_reg_trans_query(mlxsw_sp
->core
, MLXSW_REG(sbpm
), sbpm_pl
,
259 static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core
*mlxsw_core
,
260 char *sbpm_pl
, size_t sbpm_pl_len
,
261 unsigned long cb_priv
)
263 struct mlxsw_sp_sb_pm
*pm
= (struct mlxsw_sp_sb_pm
*) cb_priv
;
265 mlxsw_reg_sbpm_unpack(sbpm_pl
, &pm
->occ
.cur
, &pm
->occ
.max
);
268 static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
269 u16 pool_index
, struct list_head
*bulk_list
)
271 const struct mlxsw_sp_sb_pool_des
*des
=
272 &mlxsw_sp
->sb_vals
->pool_dess
[pool_index
];
273 char sbpm_pl
[MLXSW_REG_SBPM_LEN
];
274 struct mlxsw_sp_sb_pm
*pm
;
276 pm
= mlxsw_sp_sb_pm_get(mlxsw_sp
, local_port
, pool_index
);
277 mlxsw_reg_sbpm_pack(sbpm_pl
, local_port
, des
->pool
, des
->dir
,
279 return mlxsw_reg_trans_query(mlxsw_sp
->core
, MLXSW_REG(sbpm
), sbpm_pl
,
281 mlxsw_sp_sb_pm_occ_query_cb
,
285 /* 1/4 of a headroom necessary for 100Gbps port and 100m cable. */
286 #define MLXSW_SP_PB_HEADROOM 25632
287 #define MLXSW_SP_PB_UNUSED 8
289 static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
292 [0] = MLXSW_SP_PB_HEADROOM
* mlxsw_sp_port
->mapping
.width
,
293 [9] = MLXSW_PORT_MAX_MTU
,
295 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
296 char pbmc_pl
[MLXSW_REG_PBMC_LEN
];
299 mlxsw_reg_pbmc_pack(pbmc_pl
, mlxsw_sp_port
->local_port
,
301 for (i
= 0; i
< ARRAY_SIZE(pbs
); i
++) {
302 u16 size
= mlxsw_sp_bytes_cells(mlxsw_sp
, pbs
[i
]);
304 if (i
== MLXSW_SP_PB_UNUSED
)
306 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl
, i
, size
);
308 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl
,
309 MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX
, 0);
310 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pbmc
), pbmc_pl
);
313 static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
315 char pptb_pl
[MLXSW_REG_PPTB_LEN
];
318 mlxsw_reg_pptb_pack(pptb_pl
, mlxsw_sp_port
->local_port
);
319 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++)
320 mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl
, i
, 0);
321 return mlxsw_reg_write(mlxsw_sp_port
->mlxsw_sp
->core
, MLXSW_REG(pptb
),
325 static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
329 err
= mlxsw_sp_port_pb_init(mlxsw_sp_port
);
332 return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port
);
335 static int mlxsw_sp_sb_port_init(struct mlxsw_sp
*mlxsw_sp
,
336 struct mlxsw_sp_sb_port
*sb_port
)
338 struct mlxsw_sp_sb_pm
*pms
;
340 pms
= kcalloc(mlxsw_sp
->sb_vals
->pool_count
, sizeof(*pms
),
348 static void mlxsw_sp_sb_port_fini(struct mlxsw_sp_sb_port
*sb_port
)
353 static int mlxsw_sp_sb_ports_init(struct mlxsw_sp
*mlxsw_sp
)
355 unsigned int max_ports
= mlxsw_core_max_ports(mlxsw_sp
->core
);
356 struct mlxsw_sp_sb_pr
*prs
;
360 mlxsw_sp
->sb
->ports
= kcalloc(max_ports
,
361 sizeof(struct mlxsw_sp_sb_port
),
363 if (!mlxsw_sp
->sb
->ports
)
366 prs
= kcalloc(mlxsw_sp
->sb_vals
->pool_count
, sizeof(*prs
),
372 mlxsw_sp
->sb
->prs
= prs
;
374 for (i
= 0; i
< max_ports
; i
++) {
375 err
= mlxsw_sp_sb_port_init(mlxsw_sp
, &mlxsw_sp
->sb
->ports
[i
]);
377 goto err_sb_port_init
;
383 for (i
--; i
>= 0; i
--)
384 mlxsw_sp_sb_port_fini(&mlxsw_sp
->sb
->ports
[i
]);
385 kfree(mlxsw_sp
->sb
->prs
);
387 kfree(mlxsw_sp
->sb
->ports
);
391 static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp
*mlxsw_sp
)
393 int max_ports
= mlxsw_core_max_ports(mlxsw_sp
->core
);
396 for (i
= max_ports
- 1; i
>= 0; i
--)
397 mlxsw_sp_sb_port_fini(&mlxsw_sp
->sb
->ports
[i
]);
398 kfree(mlxsw_sp
->sb
->prs
);
399 kfree(mlxsw_sp
->sb
->ports
);
402 #define MLXSW_SP_SB_PR(_mode, _size) \
408 #define MLXSW_SP_SB_PR_EXT(_mode, _size, _freeze_mode, _freeze_size) \
412 .freeze_mode = _freeze_mode, \
413 .freeze_size = _freeze_size, \
416 #define MLXSW_SP1_SB_PR_INGRESS_SIZE 12440000
417 #define MLXSW_SP1_SB_PR_EGRESS_SIZE 13232000
418 #define MLXSW_SP1_SB_PR_CPU_SIZE (256 * 1000)
420 /* Order according to mlxsw_sp1_sb_pool_dess */
421 static const struct mlxsw_sp_sb_pr mlxsw_sp1_sb_prs
[] = {
422 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC
,
423 MLXSW_SP1_SB_PR_INGRESS_SIZE
),
424 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC
, 0),
425 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC
, 0),
426 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC
, 0),
427 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC
,
428 MLXSW_SP1_SB_PR_EGRESS_SIZE
, true, false),
429 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC
, 0),
430 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC
, 0),
431 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC
, 0),
432 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_STATIC
, MLXSW_SP_SB_INFI
,
434 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC
,
435 MLXSW_SP1_SB_PR_CPU_SIZE
, true, false),
436 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC
,
437 MLXSW_SP1_SB_PR_CPU_SIZE
, true, false),
440 #define MLXSW_SP2_SB_PR_INGRESS_SIZE 35297568
441 #define MLXSW_SP2_SB_PR_EGRESS_SIZE 35297568
442 #define MLXSW_SP2_SB_PR_CPU_SIZE (256 * 1000)
444 /* Order according to mlxsw_sp2_sb_pool_dess */
445 static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs
[] = {
446 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC
,
447 MLXSW_SP2_SB_PR_INGRESS_SIZE
),
448 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC
, 0),
449 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC
, 0),
450 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC
, 0),
451 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC
,
452 MLXSW_SP2_SB_PR_EGRESS_SIZE
, true, false),
453 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC
, 0),
454 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC
, 0),
455 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC
, 0),
456 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_STATIC
, MLXSW_SP_SB_INFI
,
458 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC
,
459 MLXSW_SP2_SB_PR_CPU_SIZE
, true, false),
460 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC
,
461 MLXSW_SP2_SB_PR_CPU_SIZE
, true, false),
464 static int mlxsw_sp_sb_prs_init(struct mlxsw_sp
*mlxsw_sp
,
465 const struct mlxsw_sp_sb_pr
*prs
,
471 for (i
= 0; i
< prs_len
; i
++) {
472 u32 size
= prs
[i
].size
;
475 if (size
== MLXSW_SP_SB_INFI
) {
476 err
= mlxsw_sp_sb_pr_write(mlxsw_sp
, i
, prs
[i
].mode
,
479 size_cells
= mlxsw_sp_bytes_cells(mlxsw_sp
, size
);
480 err
= mlxsw_sp_sb_pr_write(mlxsw_sp
, i
, prs
[i
].mode
,
489 #define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool) \
491 .min_buff = _min_buff, \
492 .max_buff = _max_buff, \
493 .pool_index = _pool, \
496 #define MLXSW_SP_SB_CM_ING(_min_buff, _max_buff) \
498 .min_buff = _min_buff, \
499 .max_buff = _max_buff, \
500 .pool_index = MLXSW_SP_SB_POOL_ING, \
503 #define MLXSW_SP_SB_CM_EGR(_min_buff, _max_buff) \
505 .min_buff = _min_buff, \
506 .max_buff = _max_buff, \
507 .pool_index = MLXSW_SP_SB_POOL_EGR, \
510 #define MLXSW_SP_SB_CM_EGR_MC(_min_buff, _max_buff) \
512 .min_buff = _min_buff, \
513 .max_buff = _max_buff, \
514 .pool_index = MLXSW_SP_SB_POOL_EGR_MC, \
515 .freeze_pool = true, \
516 .freeze_thresh = true, \
519 static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_ingress
[] = {
520 MLXSW_SP_SB_CM_ING(10000, 8),
521 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN
),
522 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN
),
523 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN
),
524 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN
),
525 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN
),
526 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN
),
527 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN
),
528 MLXSW_SP_SB_CM_ING(0, 0), /* dummy, this PG does not exist */
529 MLXSW_SP_SB_CM(10000, 8, MLXSW_SP_SB_POOL_ING_CPU
),
532 static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_ingress
[] = {
533 MLXSW_SP_SB_CM_ING(0, 7),
534 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN
),
535 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN
),
536 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN
),
537 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN
),
538 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN
),
539 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN
),
540 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN
),
541 MLXSW_SP_SB_CM_ING(0, 0), /* dummy, this PG does not exist */
542 MLXSW_SP_SB_CM(10000, 8, MLXSW_SP_SB_POOL_ING_CPU
),
545 static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_egress
[] = {
546 MLXSW_SP_SB_CM_EGR(1500, 9),
547 MLXSW_SP_SB_CM_EGR(1500, 9),
548 MLXSW_SP_SB_CM_EGR(1500, 9),
549 MLXSW_SP_SB_CM_EGR(1500, 9),
550 MLXSW_SP_SB_CM_EGR(1500, 9),
551 MLXSW_SP_SB_CM_EGR(1500, 9),
552 MLXSW_SP_SB_CM_EGR(1500, 9),
553 MLXSW_SP_SB_CM_EGR(1500, 9),
554 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI
),
555 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI
),
556 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI
),
557 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI
),
558 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI
),
559 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI
),
560 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI
),
561 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI
),
562 MLXSW_SP_SB_CM_EGR(1, 0xff),
565 static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress
[] = {
566 MLXSW_SP_SB_CM_EGR(0, 7),
567 MLXSW_SP_SB_CM_EGR(0, 7),
568 MLXSW_SP_SB_CM_EGR(0, 7),
569 MLXSW_SP_SB_CM_EGR(0, 7),
570 MLXSW_SP_SB_CM_EGR(0, 7),
571 MLXSW_SP_SB_CM_EGR(0, 7),
572 MLXSW_SP_SB_CM_EGR(0, 7),
573 MLXSW_SP_SB_CM_EGR(0, 7),
574 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI
),
575 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI
),
576 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI
),
577 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI
),
578 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI
),
579 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI
),
580 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI
),
581 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI
),
582 MLXSW_SP_SB_CM_EGR(1, 0xff),
585 #define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, MLXSW_SP_SB_POOL_EGR_CPU)
587 static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms
[] = {
588 MLXSW_SP_CPU_PORT_SB_CM
,
589 MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU
),
590 MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU
),
591 MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU
),
592 MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU
),
593 MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU
),
594 MLXSW_SP_CPU_PORT_SB_CM
,
595 MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU
),
596 MLXSW_SP_CPU_PORT_SB_CM
,
597 MLXSW_SP_CPU_PORT_SB_CM
,
598 MLXSW_SP_CPU_PORT_SB_CM
,
599 MLXSW_SP_CPU_PORT_SB_CM
,
600 MLXSW_SP_CPU_PORT_SB_CM
,
601 MLXSW_SP_CPU_PORT_SB_CM
,
602 MLXSW_SP_CPU_PORT_SB_CM
,
603 MLXSW_SP_CPU_PORT_SB_CM
,
604 MLXSW_SP_CPU_PORT_SB_CM
,
605 MLXSW_SP_CPU_PORT_SB_CM
,
606 MLXSW_SP_CPU_PORT_SB_CM
,
607 MLXSW_SP_CPU_PORT_SB_CM
,
608 MLXSW_SP_CPU_PORT_SB_CM
,
609 MLXSW_SP_CPU_PORT_SB_CM
,
610 MLXSW_SP_CPU_PORT_SB_CM
,
611 MLXSW_SP_CPU_PORT_SB_CM
,
612 MLXSW_SP_CPU_PORT_SB_CM
,
613 MLXSW_SP_CPU_PORT_SB_CM
,
614 MLXSW_SP_CPU_PORT_SB_CM
,
615 MLXSW_SP_CPU_PORT_SB_CM
,
616 MLXSW_SP_CPU_PORT_SB_CM
,
617 MLXSW_SP_CPU_PORT_SB_CM
,
618 MLXSW_SP_CPU_PORT_SB_CM
,
619 MLXSW_SP_CPU_PORT_SB_CM
,
623 mlxsw_sp_sb_pool_is_static(struct mlxsw_sp
*mlxsw_sp
, u16 pool_index
)
625 struct mlxsw_sp_sb_pr
*pr
= mlxsw_sp_sb_pr_get(mlxsw_sp
, pool_index
);
627 return pr
->mode
== MLXSW_REG_SBPR_MODE_STATIC
;
630 static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
631 enum mlxsw_reg_sbxx_dir dir
,
632 const struct mlxsw_sp_sb_cm
*cms
,
635 const struct mlxsw_sp_sb_vals
*sb_vals
= mlxsw_sp
->sb_vals
;
639 for (i
= 0; i
< cms_len
; i
++) {
640 const struct mlxsw_sp_sb_cm
*cm
;
644 if (i
== 8 && dir
== MLXSW_REG_SBXX_DIR_INGRESS
)
645 continue; /* PG number 8 does not exist, skip it */
647 if (WARN_ON(sb_vals
->pool_dess
[cm
->pool_index
].dir
!= dir
))
650 min_buff
= mlxsw_sp_bytes_cells(mlxsw_sp
, cm
->min_buff
);
651 max_buff
= cm
->max_buff
;
652 if (max_buff
== MLXSW_SP_SB_INFI
) {
653 err
= mlxsw_sp_sb_cm_write(mlxsw_sp
, local_port
, i
,
655 true, cm
->pool_index
);
657 if (mlxsw_sp_sb_pool_is_static(mlxsw_sp
,
659 max_buff
= mlxsw_sp_bytes_cells(mlxsw_sp
,
661 err
= mlxsw_sp_sb_cm_write(mlxsw_sp
, local_port
, i
,
663 false, cm
->pool_index
);
671 static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
673 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
676 err
= __mlxsw_sp_sb_cms_init(mlxsw_sp
,
677 mlxsw_sp_port
->local_port
,
678 MLXSW_REG_SBXX_DIR_INGRESS
,
679 mlxsw_sp
->sb_vals
->cms_ingress
,
680 mlxsw_sp
->sb_vals
->cms_ingress_count
);
683 return __mlxsw_sp_sb_cms_init(mlxsw_sp_port
->mlxsw_sp
,
684 mlxsw_sp_port
->local_port
,
685 MLXSW_REG_SBXX_DIR_EGRESS
,
686 mlxsw_sp
->sb_vals
->cms_egress
,
687 mlxsw_sp
->sb_vals
->cms_egress_count
);
690 static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp
*mlxsw_sp
)
692 return __mlxsw_sp_sb_cms_init(mlxsw_sp
, 0, MLXSW_REG_SBXX_DIR_EGRESS
,
693 mlxsw_sp
->sb_vals
->cms_cpu
,
694 mlxsw_sp
->sb_vals
->cms_cpu_count
);
697 #define MLXSW_SP_SB_PM(_min_buff, _max_buff) \
699 .min_buff = _min_buff, \
700 .max_buff = _max_buff, \
703 /* Order according to mlxsw_sp1_sb_pool_dess */
704 static const struct mlxsw_sp_sb_pm mlxsw_sp1_sb_pms
[] = {
705 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX
),
706 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN
),
707 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN
),
708 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN
),
709 MLXSW_SP_SB_PM(0, 7),
710 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN
),
711 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN
),
712 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN
),
713 MLXSW_SP_SB_PM(10000, 90000),
714 MLXSW_SP_SB_PM(0, 8), /* 50% occupancy */
715 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN
),
718 /* Order according to mlxsw_sp2_sb_pool_dess */
719 static const struct mlxsw_sp_sb_pm mlxsw_sp2_sb_pms
[] = {
720 MLXSW_SP_SB_PM(0, 7),
721 MLXSW_SP_SB_PM(0, 0),
722 MLXSW_SP_SB_PM(0, 0),
723 MLXSW_SP_SB_PM(0, 0),
724 MLXSW_SP_SB_PM(0, 7),
725 MLXSW_SP_SB_PM(0, 0),
726 MLXSW_SP_SB_PM(0, 0),
727 MLXSW_SP_SB_PM(0, 0),
728 MLXSW_SP_SB_PM(10000, 90000),
729 MLXSW_SP_SB_PM(0, 8), /* 50% occupancy */
730 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN
),
733 /* Order according to mlxsw_sp*_sb_pool_dess */
734 static const struct mlxsw_sp_sb_pm mlxsw_sp_cpu_port_sb_pms
[] = {
735 MLXSW_SP_SB_PM(0, 0),
736 MLXSW_SP_SB_PM(0, 0),
737 MLXSW_SP_SB_PM(0, 0),
738 MLXSW_SP_SB_PM(0, 0),
739 MLXSW_SP_SB_PM(0, 0),
740 MLXSW_SP_SB_PM(0, 0),
741 MLXSW_SP_SB_PM(0, 0),
742 MLXSW_SP_SB_PM(0, 0),
743 MLXSW_SP_SB_PM(0, 90000),
744 MLXSW_SP_SB_PM(0, 0),
745 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX
),
748 static int mlxsw_sp_sb_pms_init(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
749 const struct mlxsw_sp_sb_pm
*pms
,
754 for (i
= 0; i
< mlxsw_sp
->sb_vals
->pool_count
; i
++) {
755 const struct mlxsw_sp_sb_pm
*pm
= &pms
[i
];
756 const struct mlxsw_sp_sb_pool_des
*des
;
760 des
= &mlxsw_sp
->sb_vals
->pool_dess
[i
];
761 if (skip_ingress
&& des
->dir
== MLXSW_REG_SBXX_DIR_INGRESS
)
764 min_buff
= mlxsw_sp_bytes_cells(mlxsw_sp
, pm
->min_buff
);
765 max_buff
= pm
->max_buff
;
766 if (mlxsw_sp_sb_pool_is_static(mlxsw_sp
, i
))
767 max_buff
= mlxsw_sp_bytes_cells(mlxsw_sp
, max_buff
);
768 err
= mlxsw_sp_sb_pm_write(mlxsw_sp
, local_port
, i
, min_buff
,
776 static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
778 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
780 return mlxsw_sp_sb_pms_init(mlxsw_sp
, mlxsw_sp_port
->local_port
,
781 mlxsw_sp
->sb_vals
->pms
, false);
784 static int mlxsw_sp_cpu_port_sb_pms_init(struct mlxsw_sp
*mlxsw_sp
)
786 return mlxsw_sp_sb_pms_init(mlxsw_sp
, 0, mlxsw_sp
->sb_vals
->pms_cpu
,
790 #define MLXSW_SP_SB_MM(_min_buff, _max_buff) \
792 .min_buff = _min_buff, \
793 .max_buff = _max_buff, \
794 .pool_index = MLXSW_SP_SB_POOL_EGR, \
797 static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms
[] = {
798 MLXSW_SP_SB_MM(0, 6),
799 MLXSW_SP_SB_MM(0, 6),
800 MLXSW_SP_SB_MM(0, 6),
801 MLXSW_SP_SB_MM(0, 6),
802 MLXSW_SP_SB_MM(0, 6),
803 MLXSW_SP_SB_MM(0, 6),
804 MLXSW_SP_SB_MM(0, 6),
805 MLXSW_SP_SB_MM(0, 6),
806 MLXSW_SP_SB_MM(0, 6),
807 MLXSW_SP_SB_MM(0, 6),
808 MLXSW_SP_SB_MM(0, 6),
809 MLXSW_SP_SB_MM(0, 6),
810 MLXSW_SP_SB_MM(0, 6),
811 MLXSW_SP_SB_MM(0, 6),
812 MLXSW_SP_SB_MM(0, 6),
815 static int mlxsw_sp_sb_mms_init(struct mlxsw_sp
*mlxsw_sp
)
817 char sbmm_pl
[MLXSW_REG_SBMM_LEN
];
821 for (i
= 0; i
< mlxsw_sp
->sb_vals
->mms_count
; i
++) {
822 const struct mlxsw_sp_sb_pool_des
*des
;
823 const struct mlxsw_sp_sb_mm
*mc
;
826 mc
= &mlxsw_sp
->sb_vals
->mms
[i
];
827 des
= &mlxsw_sp
->sb_vals
->pool_dess
[mc
->pool_index
];
828 /* All pools used by sb_mm's are initialized using dynamic
829 * thresholds, therefore 'max_buff' isn't specified in cells.
831 min_buff
= mlxsw_sp_bytes_cells(mlxsw_sp
, mc
->min_buff
);
832 mlxsw_reg_sbmm_pack(sbmm_pl
, i
, min_buff
, mc
->max_buff
,
834 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sbmm
), sbmm_pl
);
841 static void mlxsw_sp_pool_count(struct mlxsw_sp
*mlxsw_sp
,
842 u16
*p_ingress_len
, u16
*p_egress_len
)
846 for (i
= 0; i
< mlxsw_sp
->sb_vals
->pool_count
; ++i
) {
847 if (mlxsw_sp
->sb_vals
->pool_dess
[i
].dir
==
848 MLXSW_REG_SBXX_DIR_INGRESS
)
854 WARN(*p_egress_len
== 0, "No egress pools\n");
857 const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals
= {
858 .pool_count
= ARRAY_SIZE(mlxsw_sp1_sb_pool_dess
),
859 .pool_dess
= mlxsw_sp1_sb_pool_dess
,
860 .pms
= mlxsw_sp1_sb_pms
,
861 .pms_cpu
= mlxsw_sp_cpu_port_sb_pms
,
862 .prs
= mlxsw_sp1_sb_prs
,
863 .mms
= mlxsw_sp_sb_mms
,
864 .cms_ingress
= mlxsw_sp1_sb_cms_ingress
,
865 .cms_egress
= mlxsw_sp1_sb_cms_egress
,
866 .cms_cpu
= mlxsw_sp_cpu_port_sb_cms
,
867 .mms_count
= ARRAY_SIZE(mlxsw_sp_sb_mms
),
868 .cms_ingress_count
= ARRAY_SIZE(mlxsw_sp1_sb_cms_ingress
),
869 .cms_egress_count
= ARRAY_SIZE(mlxsw_sp1_sb_cms_egress
),
870 .cms_cpu_count
= ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms
),
873 const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals
= {
874 .pool_count
= ARRAY_SIZE(mlxsw_sp2_sb_pool_dess
),
875 .pool_dess
= mlxsw_sp2_sb_pool_dess
,
876 .pms
= mlxsw_sp2_sb_pms
,
877 .pms_cpu
= mlxsw_sp_cpu_port_sb_pms
,
878 .prs
= mlxsw_sp2_sb_prs
,
879 .mms
= mlxsw_sp_sb_mms
,
880 .cms_ingress
= mlxsw_sp2_sb_cms_ingress
,
881 .cms_egress
= mlxsw_sp2_sb_cms_egress
,
882 .cms_cpu
= mlxsw_sp_cpu_port_sb_cms
,
883 .mms_count
= ARRAY_SIZE(mlxsw_sp_sb_mms
),
884 .cms_ingress_count
= ARRAY_SIZE(mlxsw_sp2_sb_cms_ingress
),
885 .cms_egress_count
= ARRAY_SIZE(mlxsw_sp2_sb_cms_egress
),
886 .cms_cpu_count
= ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms
),
889 int mlxsw_sp_buffers_init(struct mlxsw_sp
*mlxsw_sp
)
891 u32 max_headroom_size
;
892 u16 ing_pool_count
= 0;
893 u16 eg_pool_count
= 0;
896 if (!MLXSW_CORE_RES_VALID(mlxsw_sp
->core
, CELL_SIZE
))
899 if (!MLXSW_CORE_RES_VALID(mlxsw_sp
->core
, MAX_BUFFER_SIZE
))
902 if (!MLXSW_CORE_RES_VALID(mlxsw_sp
->core
, MAX_HEADROOM_SIZE
))
905 mlxsw_sp
->sb
= kzalloc(sizeof(*mlxsw_sp
->sb
), GFP_KERNEL
);
908 mlxsw_sp
->sb
->cell_size
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
, CELL_SIZE
);
909 mlxsw_sp
->sb
->sb_size
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
,
911 max_headroom_size
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
,
913 /* Round down, because this limit must not be overstepped. */
914 mlxsw_sp
->sb
->max_headroom_cells
= max_headroom_size
/
915 mlxsw_sp
->sb
->cell_size
;
917 err
= mlxsw_sp_sb_ports_init(mlxsw_sp
);
919 goto err_sb_ports_init
;
920 err
= mlxsw_sp_sb_prs_init(mlxsw_sp
, mlxsw_sp
->sb_vals
->prs
,
921 mlxsw_sp
->sb_vals
->pool_count
);
923 goto err_sb_prs_init
;
924 err
= mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp
);
926 goto err_sb_cpu_port_sb_cms_init
;
927 err
= mlxsw_sp_cpu_port_sb_pms_init(mlxsw_sp
);
929 goto err_sb_cpu_port_pms_init
;
930 err
= mlxsw_sp_sb_mms_init(mlxsw_sp
);
932 goto err_sb_mms_init
;
933 mlxsw_sp_pool_count(mlxsw_sp
, &ing_pool_count
, &eg_pool_count
);
934 err
= devlink_sb_register(priv_to_devlink(mlxsw_sp
->core
), 0,
935 mlxsw_sp
->sb
->sb_size
,
938 MLXSW_SP_SB_ING_TC_COUNT
,
939 MLXSW_SP_SB_EG_TC_COUNT
);
941 goto err_devlink_sb_register
;
945 err_devlink_sb_register
:
947 err_sb_cpu_port_pms_init
:
948 err_sb_cpu_port_sb_cms_init
:
950 mlxsw_sp_sb_ports_fini(mlxsw_sp
);
956 void mlxsw_sp_buffers_fini(struct mlxsw_sp
*mlxsw_sp
)
958 devlink_sb_unregister(priv_to_devlink(mlxsw_sp
->core
), 0);
959 mlxsw_sp_sb_ports_fini(mlxsw_sp
);
963 int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
967 err
= mlxsw_sp_port_headroom_init(mlxsw_sp_port
);
970 err
= mlxsw_sp_port_sb_cms_init(mlxsw_sp_port
);
973 err
= mlxsw_sp_port_sb_pms_init(mlxsw_sp_port
);
978 int mlxsw_sp_sb_pool_get(struct mlxsw_core
*mlxsw_core
,
979 unsigned int sb_index
, u16 pool_index
,
980 struct devlink_sb_pool_info
*pool_info
)
982 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
983 enum mlxsw_reg_sbxx_dir dir
;
984 struct mlxsw_sp_sb_pr
*pr
;
986 dir
= mlxsw_sp
->sb_vals
->pool_dess
[pool_index
].dir
;
987 pr
= mlxsw_sp_sb_pr_get(mlxsw_sp
, pool_index
);
988 pool_info
->pool_type
= (enum devlink_sb_pool_type
) dir
;
989 pool_info
->size
= mlxsw_sp_cells_bytes(mlxsw_sp
, pr
->size
);
990 pool_info
->threshold_type
= (enum devlink_sb_threshold_type
) pr
->mode
;
991 pool_info
->cell_size
= mlxsw_sp
->sb
->cell_size
;
995 int mlxsw_sp_sb_pool_set(struct mlxsw_core
*mlxsw_core
,
996 unsigned int sb_index
, u16 pool_index
, u32 size
,
997 enum devlink_sb_threshold_type threshold_type
,
998 struct netlink_ext_ack
*extack
)
1000 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
1001 u32 pool_size
= mlxsw_sp_bytes_cells(mlxsw_sp
, size
);
1002 const struct mlxsw_sp_sb_pr
*pr
;
1003 enum mlxsw_reg_sbpr_mode mode
;
1005 mode
= (enum mlxsw_reg_sbpr_mode
) threshold_type
;
1006 pr
= &mlxsw_sp
->sb_vals
->prs
[pool_index
];
1008 if (size
> MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_BUFFER_SIZE
)) {
1009 NL_SET_ERR_MSG_MOD(extack
, "Exceeded shared buffer size");
1013 if (pr
->freeze_mode
&& pr
->mode
!= mode
) {
1014 NL_SET_ERR_MSG_MOD(extack
, "Changing this pool's threshold type is forbidden");
1018 if (pr
->freeze_size
&& pr
->size
!= size
) {
1019 NL_SET_ERR_MSG_MOD(extack
, "Changing this pool's size is forbidden");
1023 return mlxsw_sp_sb_pr_write(mlxsw_sp
, pool_index
, mode
,
1027 #define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */
1029 static u32
mlxsw_sp_sb_threshold_out(struct mlxsw_sp
*mlxsw_sp
, u16 pool_index
,
1032 struct mlxsw_sp_sb_pr
*pr
= mlxsw_sp_sb_pr_get(mlxsw_sp
, pool_index
);
1034 if (pr
->mode
== MLXSW_REG_SBPR_MODE_DYNAMIC
)
1035 return max_buff
- MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET
;
1036 return mlxsw_sp_cells_bytes(mlxsw_sp
, max_buff
);
1039 static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp
*mlxsw_sp
, u16 pool_index
,
1040 u32 threshold
, u32
*p_max_buff
,
1041 struct netlink_ext_ack
*extack
)
1043 struct mlxsw_sp_sb_pr
*pr
= mlxsw_sp_sb_pr_get(mlxsw_sp
, pool_index
);
1045 if (pr
->mode
== MLXSW_REG_SBPR_MODE_DYNAMIC
) {
1048 val
= threshold
+ MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET
;
1049 if (val
< MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN
||
1050 val
> MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX
) {
1051 NL_SET_ERR_MSG_MOD(extack
, "Invalid dynamic threshold value");
1056 *p_max_buff
= mlxsw_sp_bytes_cells(mlxsw_sp
, threshold
);
1061 int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port
*mlxsw_core_port
,
1062 unsigned int sb_index
, u16 pool_index
,
1065 struct mlxsw_sp_port
*mlxsw_sp_port
=
1066 mlxsw_core_port_driver_priv(mlxsw_core_port
);
1067 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1068 u8 local_port
= mlxsw_sp_port
->local_port
;
1069 struct mlxsw_sp_sb_pm
*pm
= mlxsw_sp_sb_pm_get(mlxsw_sp
, local_port
,
1072 *p_threshold
= mlxsw_sp_sb_threshold_out(mlxsw_sp
, pool_index
,
1077 int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port
*mlxsw_core_port
,
1078 unsigned int sb_index
, u16 pool_index
,
1079 u32 threshold
, struct netlink_ext_ack
*extack
)
1081 struct mlxsw_sp_port
*mlxsw_sp_port
=
1082 mlxsw_core_port_driver_priv(mlxsw_core_port
);
1083 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1084 u8 local_port
= mlxsw_sp_port
->local_port
;
1088 err
= mlxsw_sp_sb_threshold_in(mlxsw_sp
, pool_index
,
1089 threshold
, &max_buff
, extack
);
1093 return mlxsw_sp_sb_pm_write(mlxsw_sp
, local_port
, pool_index
,
1097 int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port
*mlxsw_core_port
,
1098 unsigned int sb_index
, u16 tc_index
,
1099 enum devlink_sb_pool_type pool_type
,
1100 u16
*p_pool_index
, u32
*p_threshold
)
1102 struct mlxsw_sp_port
*mlxsw_sp_port
=
1103 mlxsw_core_port_driver_priv(mlxsw_core_port
);
1104 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1105 u8 local_port
= mlxsw_sp_port
->local_port
;
1106 u8 pg_buff
= tc_index
;
1107 enum mlxsw_reg_sbxx_dir dir
= (enum mlxsw_reg_sbxx_dir
) pool_type
;
1108 struct mlxsw_sp_sb_cm
*cm
= mlxsw_sp_sb_cm_get(mlxsw_sp
, local_port
,
1111 *p_threshold
= mlxsw_sp_sb_threshold_out(mlxsw_sp
, cm
->pool_index
,
1113 *p_pool_index
= cm
->pool_index
;
1117 int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port
*mlxsw_core_port
,
1118 unsigned int sb_index
, u16 tc_index
,
1119 enum devlink_sb_pool_type pool_type
,
1120 u16 pool_index
, u32 threshold
,
1121 struct netlink_ext_ack
*extack
)
1123 struct mlxsw_sp_port
*mlxsw_sp_port
=
1124 mlxsw_core_port_driver_priv(mlxsw_core_port
);
1125 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1126 u8 local_port
= mlxsw_sp_port
->local_port
;
1127 const struct mlxsw_sp_sb_cm
*cm
;
1128 u8 pg_buff
= tc_index
;
1129 enum mlxsw_reg_sbxx_dir dir
= (enum mlxsw_reg_sbxx_dir
) pool_type
;
1133 if (dir
!= mlxsw_sp
->sb_vals
->pool_dess
[pool_index
].dir
) {
1134 NL_SET_ERR_MSG_MOD(extack
, "Binding egress TC to ingress pool and vice versa is forbidden");
1138 if (dir
== MLXSW_REG_SBXX_DIR_INGRESS
)
1139 cm
= &mlxsw_sp
->sb_vals
->cms_ingress
[tc_index
];
1141 cm
= &mlxsw_sp
->sb_vals
->cms_egress
[tc_index
];
1143 if (cm
->freeze_pool
&& cm
->pool_index
!= pool_index
) {
1144 NL_SET_ERR_MSG_MOD(extack
, "Binding this TC to a different pool is forbidden");
1148 if (cm
->freeze_thresh
&& cm
->max_buff
!= threshold
) {
1149 NL_SET_ERR_MSG_MOD(extack
, "Changing this TC's threshold is forbidden");
1153 err
= mlxsw_sp_sb_threshold_in(mlxsw_sp
, pool_index
,
1154 threshold
, &max_buff
, extack
);
1158 return mlxsw_sp_sb_cm_write(mlxsw_sp
, local_port
, pg_buff
,
1159 0, max_buff
, false, pool_index
);
1162 #define MASKED_COUNT_MAX \
1163 (MLXSW_REG_SBSR_REC_MAX_COUNT / \
1164 (MLXSW_SP_SB_ING_TC_COUNT + MLXSW_SP_SB_EG_TC_COUNT))
1166 struct mlxsw_sp_sb_sr_occ_query_cb_ctx
{
1171 static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core
*mlxsw_core
,
1172 char *sbsr_pl
, size_t sbsr_pl_len
,
1173 unsigned long cb_priv
)
1175 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
1176 struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx
;
1180 struct mlxsw_sp_sb_cm
*cm
;
1183 memcpy(&cb_ctx
, &cb_priv
, sizeof(cb_ctx
));
1186 for (local_port
= cb_ctx
.local_port_1
;
1187 local_port
< mlxsw_core_max_ports(mlxsw_core
); local_port
++) {
1188 if (!mlxsw_sp
->ports
[local_port
])
1190 for (i
= 0; i
< MLXSW_SP_SB_ING_TC_COUNT
; i
++) {
1191 cm
= mlxsw_sp_sb_cm_get(mlxsw_sp
, local_port
, i
,
1192 MLXSW_REG_SBXX_DIR_INGRESS
);
1193 mlxsw_reg_sbsr_rec_unpack(sbsr_pl
, rec_index
++,
1194 &cm
->occ
.cur
, &cm
->occ
.max
);
1196 if (++masked_count
== cb_ctx
.masked_count
)
1200 for (local_port
= cb_ctx
.local_port_1
;
1201 local_port
< mlxsw_core_max_ports(mlxsw_core
); local_port
++) {
1202 if (!mlxsw_sp
->ports
[local_port
])
1204 for (i
= 0; i
< MLXSW_SP_SB_EG_TC_COUNT
; i
++) {
1205 cm
= mlxsw_sp_sb_cm_get(mlxsw_sp
, local_port
, i
,
1206 MLXSW_REG_SBXX_DIR_EGRESS
);
1207 mlxsw_reg_sbsr_rec_unpack(sbsr_pl
, rec_index
++,
1208 &cm
->occ
.cur
, &cm
->occ
.max
);
1210 if (++masked_count
== cb_ctx
.masked_count
)
1215 int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core
*mlxsw_core
,
1216 unsigned int sb_index
)
1218 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
1219 struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx
;
1220 unsigned long cb_priv
;
1221 LIST_HEAD(bulk_list
);
1230 sbsr_pl
= kmalloc(MLXSW_REG_SBSR_LEN
, GFP_KERNEL
);
1236 local_port_1
= local_port
;
1238 mlxsw_reg_sbsr_pack(sbsr_pl
, false);
1239 for (i
= 0; i
< MLXSW_SP_SB_ING_TC_COUNT
; i
++)
1240 mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl
, i
, 1);
1241 for (i
= 0; i
< MLXSW_SP_SB_EG_TC_COUNT
; i
++)
1242 mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl
, i
, 1);
1243 for (; local_port
< mlxsw_core_max_ports(mlxsw_core
); local_port
++) {
1244 if (!mlxsw_sp
->ports
[local_port
])
1246 mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl
, local_port
, 1);
1247 mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl
, local_port
, 1);
1248 for (i
= 0; i
< mlxsw_sp
->sb_vals
->pool_count
; i
++) {
1249 err
= mlxsw_sp_sb_pm_occ_query(mlxsw_sp
, local_port
, i
,
1254 if (++masked_count
== MASKED_COUNT_MAX
)
1259 cb_ctx
.masked_count
= masked_count
;
1260 cb_ctx
.local_port_1
= local_port_1
;
1261 memcpy(&cb_priv
, &cb_ctx
, sizeof(cb_ctx
));
1262 err
= mlxsw_reg_trans_query(mlxsw_core
, MLXSW_REG(sbsr
), sbsr_pl
,
1263 &bulk_list
, mlxsw_sp_sb_sr_occ_query_cb
,
1267 if (local_port
< mlxsw_core_max_ports(mlxsw_core
))
1271 err2
= mlxsw_reg_trans_bulk_wait(&bulk_list
);
1278 int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core
*mlxsw_core
,
1279 unsigned int sb_index
)
1281 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
1282 LIST_HEAD(bulk_list
);
1284 unsigned int masked_count
;
1290 sbsr_pl
= kmalloc(MLXSW_REG_SBSR_LEN
, GFP_KERNEL
);
1297 mlxsw_reg_sbsr_pack(sbsr_pl
, true);
1298 for (i
= 0; i
< MLXSW_SP_SB_ING_TC_COUNT
; i
++)
1299 mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl
, i
, 1);
1300 for (i
= 0; i
< MLXSW_SP_SB_EG_TC_COUNT
; i
++)
1301 mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl
, i
, 1);
1302 for (; local_port
< mlxsw_core_max_ports(mlxsw_core
); local_port
++) {
1303 if (!mlxsw_sp
->ports
[local_port
])
1305 mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl
, local_port
, 1);
1306 mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl
, local_port
, 1);
1307 for (i
= 0; i
< mlxsw_sp
->sb_vals
->pool_count
; i
++) {
1308 err
= mlxsw_sp_sb_pm_occ_clear(mlxsw_sp
, local_port
, i
,
1313 if (++masked_count
== MASKED_COUNT_MAX
)
1318 err
= mlxsw_reg_trans_query(mlxsw_core
, MLXSW_REG(sbsr
), sbsr_pl
,
1319 &bulk_list
, NULL
, 0);
1322 if (local_port
< mlxsw_core_max_ports(mlxsw_core
))
1326 err2
= mlxsw_reg_trans_bulk_wait(&bulk_list
);
1333 int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port
*mlxsw_core_port
,
1334 unsigned int sb_index
, u16 pool_index
,
1335 u32
*p_cur
, u32
*p_max
)
1337 struct mlxsw_sp_port
*mlxsw_sp_port
=
1338 mlxsw_core_port_driver_priv(mlxsw_core_port
);
1339 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1340 u8 local_port
= mlxsw_sp_port
->local_port
;
1341 struct mlxsw_sp_sb_pm
*pm
= mlxsw_sp_sb_pm_get(mlxsw_sp
, local_port
,
1344 *p_cur
= mlxsw_sp_cells_bytes(mlxsw_sp
, pm
->occ
.cur
);
1345 *p_max
= mlxsw_sp_cells_bytes(mlxsw_sp
, pm
->occ
.max
);
1349 int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port
*mlxsw_core_port
,
1350 unsigned int sb_index
, u16 tc_index
,
1351 enum devlink_sb_pool_type pool_type
,
1352 u32
*p_cur
, u32
*p_max
)
1354 struct mlxsw_sp_port
*mlxsw_sp_port
=
1355 mlxsw_core_port_driver_priv(mlxsw_core_port
);
1356 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1357 u8 local_port
= mlxsw_sp_port
->local_port
;
1358 u8 pg_buff
= tc_index
;
1359 enum mlxsw_reg_sbxx_dir dir
= (enum mlxsw_reg_sbxx_dir
) pool_type
;
1360 struct mlxsw_sp_sb_cm
*cm
= mlxsw_sp_sb_cm_get(mlxsw_sp
, local_port
,
1363 *p_cur
= mlxsw_sp_cells_bytes(mlxsw_sp
, cm
->occ
.cur
);
1364 *p_max
= mlxsw_sp_cells_bytes(mlxsw_sp
, cm
->occ
.max
);