1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/dcbnl.h>
7 #include <linux/if_ether.h>
8 #include <linux/list.h>
15 struct mlxsw_sp_sb_pr {
16 enum mlxsw_reg_sbpr_mode mode;
20 struct mlxsw_cp_sb_occ {
25 struct mlxsw_sp_sb_cm {
29 struct mlxsw_cp_sb_occ occ;
32 struct mlxsw_sp_sb_pm {
35 struct mlxsw_cp_sb_occ occ;
38 #define MLXSW_SP_SB_POOL_COUNT 4
39 #define MLXSW_SP_SB_TC_COUNT 8
41 struct mlxsw_sp_sb_port {
42 struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT];
43 struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT];
47 struct mlxsw_sp_sb_pr prs[2][MLXSW_SP_SB_POOL_COUNT];
48 struct mlxsw_sp_sb_port *ports;
52 u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells)
54 return mlxsw_sp->sb->cell_size * cells;
57 u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes)
59 return DIV_ROUND_UP(bytes, mlxsw_sp->sb->cell_size);
62 static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
64 enum mlxsw_reg_sbxx_dir dir)
66 return &mlxsw_sp->sb->prs[dir][pool];
69 static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
70 u8 local_port, u8 pg_buff,
71 enum mlxsw_reg_sbxx_dir dir)
73 return &mlxsw_sp->sb->ports[local_port].cms[dir][pg_buff];
76 static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
77 u8 local_port, u8 pool,
78 enum mlxsw_reg_sbxx_dir dir)
80 return &mlxsw_sp->sb->ports[local_port].pms[dir][pool];
83 static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u8 pool,
84 enum mlxsw_reg_sbxx_dir dir,
85 enum mlxsw_reg_sbpr_mode mode, u32 size)
87 char sbpr_pl[MLXSW_REG_SBPR_LEN];
88 struct mlxsw_sp_sb_pr *pr;
91 mlxsw_reg_sbpr_pack(sbpr_pl, pool, dir, mode, size);
92 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
96 pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
102 static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
103 u8 pg_buff, enum mlxsw_reg_sbxx_dir dir,
104 u32 min_buff, u32 max_buff, u8 pool)
106 char sbcm_pl[MLXSW_REG_SBCM_LEN];
109 mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, dir,
110 min_buff, max_buff, pool);
111 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
114 if (pg_buff < MLXSW_SP_SB_TC_COUNT) {
115 struct mlxsw_sp_sb_cm *cm;
117 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff, dir);
118 cm->min_buff = min_buff;
119 cm->max_buff = max_buff;
125 static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
126 u8 pool, enum mlxsw_reg_sbxx_dir dir,
127 u32 min_buff, u32 max_buff)
129 char sbpm_pl[MLXSW_REG_SBPM_LEN];
130 struct mlxsw_sp_sb_pm *pm;
133 mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false,
135 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl);
139 pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir);
140 pm->min_buff = min_buff;
141 pm->max_buff = max_buff;
145 static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port,
146 u8 pool, enum mlxsw_reg_sbxx_dir dir,
147 struct list_head *bulk_list)
149 char sbpm_pl[MLXSW_REG_SBPM_LEN];
151 mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, true, 0, 0);
152 return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
156 static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core,
157 char *sbpm_pl, size_t sbpm_pl_len,
158 unsigned long cb_priv)
160 struct mlxsw_sp_sb_pm *pm = (struct mlxsw_sp_sb_pm *) cb_priv;
162 mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max);
165 static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
166 u8 pool, enum mlxsw_reg_sbxx_dir dir,
167 struct list_head *bulk_list)
169 char sbpm_pl[MLXSW_REG_SBPM_LEN];
170 struct mlxsw_sp_sb_pm *pm;
172 pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir);
173 mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false, 0, 0);
174 return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
176 mlxsw_sp_sb_pm_occ_query_cb,
180 static const u16 mlxsw_sp_pbs[] = {
181 [0] = 2 * ETH_FRAME_LEN,
182 [9] = 2 * MLXSW_PORT_MAX_MTU,
185 #define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs)
186 #define MLXSW_SP_PB_UNUSED 8
188 static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
190 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
191 char pbmc_pl[MLXSW_REG_PBMC_LEN];
194 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
196 for (i = 0; i < MLXSW_SP_PBS_LEN; i++) {
197 u16 size = mlxsw_sp_bytes_cells(mlxsw_sp, mlxsw_sp_pbs[i]);
199 if (i == MLXSW_SP_PB_UNUSED)
201 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, size);
203 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
204 MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
205 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
208 static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port)
210 char pptb_pl[MLXSW_REG_PPTB_LEN];
213 mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
214 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
215 mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, 0);
216 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
220 static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
224 err = mlxsw_sp_port_pb_init(mlxsw_sp_port);
227 return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port);
230 static int mlxsw_sp_sb_ports_init(struct mlxsw_sp *mlxsw_sp)
232 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
234 mlxsw_sp->sb->ports = kcalloc(max_ports,
235 sizeof(struct mlxsw_sp_sb_port),
237 if (!mlxsw_sp->sb->ports)
242 static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp)
244 kfree(mlxsw_sp->sb->ports);
247 #define MLXSW_SP_SB_PR_INGRESS_SIZE 12440000
248 #define MLXSW_SP_SB_PR_INGRESS_MNG_SIZE (200 * 1000)
249 #define MLXSW_SP_SB_PR_EGRESS_SIZE 13232000
251 #define MLXSW_SP_SB_PR(_mode, _size) \
257 static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_ingress[] = {
258 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
259 MLXSW_SP_SB_PR_INGRESS_SIZE),
260 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
261 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
262 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
263 MLXSW_SP_SB_PR_INGRESS_MNG_SIZE),
266 #define MLXSW_SP_SB_PRS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_ingress)
268 static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_egress[] = {
269 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_PR_EGRESS_SIZE),
270 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
271 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
272 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
275 #define MLXSW_SP_SB_PRS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_egress)
277 static int __mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
278 enum mlxsw_reg_sbxx_dir dir,
279 const struct mlxsw_sp_sb_pr *prs,
285 for (i = 0; i < prs_len; i++) {
286 u32 size = mlxsw_sp_bytes_cells(mlxsw_sp, prs[i].size);
288 err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, dir, prs[i].mode, size);
295 static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp)
299 err = __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_INGRESS,
300 mlxsw_sp_sb_prs_ingress,
301 MLXSW_SP_SB_PRS_INGRESS_LEN);
304 return __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_EGRESS,
305 mlxsw_sp_sb_prs_egress,
306 MLXSW_SP_SB_PRS_EGRESS_LEN);
309 #define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool) \
311 .min_buff = _min_buff, \
312 .max_buff = _max_buff, \
316 static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = {
317 MLXSW_SP_SB_CM(10000, 8, 0),
318 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
319 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
320 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
321 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
322 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
323 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
324 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
325 MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */
326 MLXSW_SP_SB_CM(20000, 1, 3),
329 #define MLXSW_SP_SB_CMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_ingress)
331 static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
332 MLXSW_SP_SB_CM(1500, 9, 0),
333 MLXSW_SP_SB_CM(1500, 9, 0),
334 MLXSW_SP_SB_CM(1500, 9, 0),
335 MLXSW_SP_SB_CM(1500, 9, 0),
336 MLXSW_SP_SB_CM(1500, 9, 0),
337 MLXSW_SP_SB_CM(1500, 9, 0),
338 MLXSW_SP_SB_CM(1500, 9, 0),
339 MLXSW_SP_SB_CM(1500, 9, 0),
340 MLXSW_SP_SB_CM(0, 0, 0),
341 MLXSW_SP_SB_CM(0, 0, 0),
342 MLXSW_SP_SB_CM(0, 0, 0),
343 MLXSW_SP_SB_CM(0, 0, 0),
344 MLXSW_SP_SB_CM(0, 0, 0),
345 MLXSW_SP_SB_CM(0, 0, 0),
346 MLXSW_SP_SB_CM(0, 0, 0),
347 MLXSW_SP_SB_CM(0, 0, 0),
348 MLXSW_SP_SB_CM(1, 0xff, 0),
351 #define MLXSW_SP_SB_CMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_egress)
353 #define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, 0)
355 static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
356 MLXSW_SP_CPU_PORT_SB_CM,
357 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
358 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
359 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
360 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
361 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
362 MLXSW_SP_CPU_PORT_SB_CM,
363 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
364 MLXSW_SP_CPU_PORT_SB_CM,
365 MLXSW_SP_CPU_PORT_SB_CM,
366 MLXSW_SP_CPU_PORT_SB_CM,
367 MLXSW_SP_CPU_PORT_SB_CM,
368 MLXSW_SP_CPU_PORT_SB_CM,
369 MLXSW_SP_CPU_PORT_SB_CM,
370 MLXSW_SP_CPU_PORT_SB_CM,
371 MLXSW_SP_CPU_PORT_SB_CM,
372 MLXSW_SP_CPU_PORT_SB_CM,
373 MLXSW_SP_CPU_PORT_SB_CM,
374 MLXSW_SP_CPU_PORT_SB_CM,
375 MLXSW_SP_CPU_PORT_SB_CM,
376 MLXSW_SP_CPU_PORT_SB_CM,
377 MLXSW_SP_CPU_PORT_SB_CM,
378 MLXSW_SP_CPU_PORT_SB_CM,
379 MLXSW_SP_CPU_PORT_SB_CM,
380 MLXSW_SP_CPU_PORT_SB_CM,
381 MLXSW_SP_CPU_PORT_SB_CM,
382 MLXSW_SP_CPU_PORT_SB_CM,
383 MLXSW_SP_CPU_PORT_SB_CM,
384 MLXSW_SP_CPU_PORT_SB_CM,
385 MLXSW_SP_CPU_PORT_SB_CM,
386 MLXSW_SP_CPU_PORT_SB_CM,
387 MLXSW_SP_CPU_PORT_SB_CM,
390 #define MLXSW_SP_CPU_PORT_SB_MCS_LEN \
391 ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms)
393 static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
394 enum mlxsw_reg_sbxx_dir dir,
395 const struct mlxsw_sp_sb_cm *cms,
401 for (i = 0; i < cms_len; i++) {
402 const struct mlxsw_sp_sb_cm *cm;
405 if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
406 continue; /* PG number 8 does not exist, skip it */
408 /* All pools are initialized using dynamic thresholds,
409 * therefore 'max_buff' isn't specified in cells.
411 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, cm->min_buff);
412 err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i, dir,
413 min_buff, cm->max_buff, cm->pool);
420 static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port)
424 err = __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
425 mlxsw_sp_port->local_port,
426 MLXSW_REG_SBXX_DIR_INGRESS,
427 mlxsw_sp_sb_cms_ingress,
428 MLXSW_SP_SB_CMS_INGRESS_LEN);
431 return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
432 mlxsw_sp_port->local_port,
433 MLXSW_REG_SBXX_DIR_EGRESS,
434 mlxsw_sp_sb_cms_egress,
435 MLXSW_SP_SB_CMS_EGRESS_LEN);
438 static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
440 return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS,
441 mlxsw_sp_cpu_port_sb_cms,
442 MLXSW_SP_CPU_PORT_SB_MCS_LEN);
445 #define MLXSW_SP_SB_PM(_min_buff, _max_buff) \
447 .min_buff = _min_buff, \
448 .max_buff = _max_buff, \
451 static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_ingress[] = {
452 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
453 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
454 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
455 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
458 #define MLXSW_SP_SB_PMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_ingress)
460 static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_egress[] = {
461 MLXSW_SP_SB_PM(0, 7),
462 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
463 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
464 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
467 #define MLXSW_SP_SB_PMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_egress)
469 static int __mlxsw_sp_port_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
470 enum mlxsw_reg_sbxx_dir dir,
471 const struct mlxsw_sp_sb_pm *pms,
477 for (i = 0; i < pms_len; i++) {
478 const struct mlxsw_sp_sb_pm *pm;
481 err = mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, i, dir,
482 pm->min_buff, pm->max_buff);
489 static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
493 err = __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp,
494 mlxsw_sp_port->local_port,
495 MLXSW_REG_SBXX_DIR_INGRESS,
496 mlxsw_sp_sb_pms_ingress,
497 MLXSW_SP_SB_PMS_INGRESS_LEN);
500 return __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp,
501 mlxsw_sp_port->local_port,
502 MLXSW_REG_SBXX_DIR_EGRESS,
503 mlxsw_sp_sb_pms_egress,
504 MLXSW_SP_SB_PMS_EGRESS_LEN);
507 struct mlxsw_sp_sb_mm {
513 #define MLXSW_SP_SB_MM(_min_buff, _max_buff, _pool) \
515 .min_buff = _min_buff, \
516 .max_buff = _max_buff, \
520 static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
521 MLXSW_SP_SB_MM(20000, 0xff, 0),
522 MLXSW_SP_SB_MM(20000, 0xff, 0),
523 MLXSW_SP_SB_MM(20000, 0xff, 0),
524 MLXSW_SP_SB_MM(20000, 0xff, 0),
525 MLXSW_SP_SB_MM(20000, 0xff, 0),
526 MLXSW_SP_SB_MM(20000, 0xff, 0),
527 MLXSW_SP_SB_MM(20000, 0xff, 0),
528 MLXSW_SP_SB_MM(20000, 0xff, 0),
529 MLXSW_SP_SB_MM(20000, 0xff, 0),
530 MLXSW_SP_SB_MM(20000, 0xff, 0),
531 MLXSW_SP_SB_MM(20000, 0xff, 0),
532 MLXSW_SP_SB_MM(20000, 0xff, 0),
533 MLXSW_SP_SB_MM(20000, 0xff, 0),
534 MLXSW_SP_SB_MM(20000, 0xff, 0),
535 MLXSW_SP_SB_MM(20000, 0xff, 0),
538 #define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms)
540 static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
542 char sbmm_pl[MLXSW_REG_SBMM_LEN];
546 for (i = 0; i < MLXSW_SP_SB_MMS_LEN; i++) {
547 const struct mlxsw_sp_sb_mm *mc;
550 mc = &mlxsw_sp_sb_mms[i];
551 /* All pools are initialized using dynamic thresholds,
552 * therefore 'max_buff' isn't specified in cells.
554 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, mc->min_buff);
555 mlxsw_reg_sbmm_pack(sbmm_pl, i, min_buff, mc->max_buff,
557 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
564 int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
569 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE))
572 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_BUFFER_SIZE))
574 sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE);
576 mlxsw_sp->sb = kzalloc(sizeof(*mlxsw_sp->sb), GFP_KERNEL);
579 mlxsw_sp->sb->cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);
581 err = mlxsw_sp_sb_ports_init(mlxsw_sp);
583 goto err_sb_ports_init;
584 err = mlxsw_sp_sb_prs_init(mlxsw_sp);
586 goto err_sb_prs_init;
587 err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
589 goto err_sb_cpu_port_sb_cms_init;
590 err = mlxsw_sp_sb_mms_init(mlxsw_sp);
592 goto err_sb_mms_init;
593 err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0, sb_size,
594 MLXSW_SP_SB_POOL_COUNT,
595 MLXSW_SP_SB_POOL_COUNT,
596 MLXSW_SP_SB_TC_COUNT,
597 MLXSW_SP_SB_TC_COUNT);
599 goto err_devlink_sb_register;
603 err_devlink_sb_register:
605 err_sb_cpu_port_sb_cms_init:
607 mlxsw_sp_sb_ports_fini(mlxsw_sp);
613 void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
615 devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
616 mlxsw_sp_sb_ports_fini(mlxsw_sp);
620 int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
624 err = mlxsw_sp_port_headroom_init(mlxsw_sp_port);
627 err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
630 err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port);
635 static u8 pool_get(u16 pool_index)
637 return pool_index % MLXSW_SP_SB_POOL_COUNT;
640 static u16 pool_index_get(u8 pool, enum mlxsw_reg_sbxx_dir dir)
645 if (dir == MLXSW_REG_SBXX_DIR_EGRESS)
646 pool_index += MLXSW_SP_SB_POOL_COUNT;
650 static enum mlxsw_reg_sbxx_dir dir_get(u16 pool_index)
652 return pool_index < MLXSW_SP_SB_POOL_COUNT ?
653 MLXSW_REG_SBXX_DIR_INGRESS : MLXSW_REG_SBXX_DIR_EGRESS;
656 int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
657 unsigned int sb_index, u16 pool_index,
658 struct devlink_sb_pool_info *pool_info)
660 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
661 u8 pool = pool_get(pool_index);
662 enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
663 struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
665 pool_info->pool_type = (enum devlink_sb_pool_type) dir;
666 pool_info->size = mlxsw_sp_cells_bytes(mlxsw_sp, pr->size);
667 pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode;
671 int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
672 unsigned int sb_index, u16 pool_index, u32 size,
673 enum devlink_sb_threshold_type threshold_type)
675 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
676 u32 pool_size = mlxsw_sp_bytes_cells(mlxsw_sp, size);
677 u8 pool = pool_get(pool_index);
678 enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
679 enum mlxsw_reg_sbpr_mode mode;
681 if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE))
684 mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
685 return mlxsw_sp_sb_pr_write(mlxsw_sp, pool, dir, mode, pool_size);
688 #define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */
690 static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u8 pool,
691 enum mlxsw_reg_sbxx_dir dir, u32 max_buff)
693 struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
695 if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC)
696 return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
697 return mlxsw_sp_cells_bytes(mlxsw_sp, max_buff);
700 static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u8 pool,
701 enum mlxsw_reg_sbxx_dir dir, u32 threshold,
704 struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
706 if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) {
709 val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
710 if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN ||
711 val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX)
715 *p_max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, threshold);
720 int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
721 unsigned int sb_index, u16 pool_index,
724 struct mlxsw_sp_port *mlxsw_sp_port =
725 mlxsw_core_port_driver_priv(mlxsw_core_port);
726 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
727 u8 local_port = mlxsw_sp_port->local_port;
728 u8 pool = pool_get(pool_index);
729 enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
730 struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
733 *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool, dir,
738 int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
739 unsigned int sb_index, u16 pool_index,
742 struct mlxsw_sp_port *mlxsw_sp_port =
743 mlxsw_core_port_driver_priv(mlxsw_core_port);
744 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
745 u8 local_port = mlxsw_sp_port->local_port;
746 u8 pool = pool_get(pool_index);
747 enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
751 err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
752 threshold, &max_buff);
756 return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool, dir,
760 int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
761 unsigned int sb_index, u16 tc_index,
762 enum devlink_sb_pool_type pool_type,
763 u16 *p_pool_index, u32 *p_threshold)
765 struct mlxsw_sp_port *mlxsw_sp_port =
766 mlxsw_core_port_driver_priv(mlxsw_core_port);
767 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
768 u8 local_port = mlxsw_sp_port->local_port;
769 u8 pg_buff = tc_index;
770 enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
771 struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
774 *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool, dir,
776 *p_pool_index = pool_index_get(cm->pool, dir);
780 int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
781 unsigned int sb_index, u16 tc_index,
782 enum devlink_sb_pool_type pool_type,
783 u16 pool_index, u32 threshold)
785 struct mlxsw_sp_port *mlxsw_sp_port =
786 mlxsw_core_port_driver_priv(mlxsw_core_port);
787 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
788 u8 local_port = mlxsw_sp_port->local_port;
789 u8 pg_buff = tc_index;
790 enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
791 u8 pool = pool_get(pool_index);
795 if (dir != dir_get(pool_index))
798 err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
799 threshold, &max_buff);
803 return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff, dir,
807 #define MASKED_COUNT_MAX \
808 (MLXSW_REG_SBSR_REC_MAX_COUNT / (MLXSW_SP_SB_TC_COUNT * 2))
810 struct mlxsw_sp_sb_sr_occ_query_cb_ctx {
815 static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
816 char *sbsr_pl, size_t sbsr_pl_len,
817 unsigned long cb_priv)
819 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
820 struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
824 struct mlxsw_sp_sb_cm *cm;
827 memcpy(&cb_ctx, &cb_priv, sizeof(cb_ctx));
830 for (local_port = cb_ctx.local_port_1;
831 local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
832 if (!mlxsw_sp->ports[local_port])
834 for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
835 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
836 MLXSW_REG_SBXX_DIR_INGRESS);
837 mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
838 &cm->occ.cur, &cm->occ.max);
840 if (++masked_count == cb_ctx.masked_count)
844 for (local_port = cb_ctx.local_port_1;
845 local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
846 if (!mlxsw_sp->ports[local_port])
848 for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
849 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
850 MLXSW_REG_SBXX_DIR_EGRESS);
851 mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
852 &cm->occ.cur, &cm->occ.max);
854 if (++masked_count == cb_ctx.masked_count)
859 int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
860 unsigned int sb_index)
862 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
863 struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
864 unsigned long cb_priv;
865 LIST_HEAD(bulk_list);
874 sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
880 local_port_1 = local_port;
882 mlxsw_reg_sbsr_pack(sbsr_pl, false);
883 for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
884 mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
885 mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
887 for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
888 if (!mlxsw_sp->ports[local_port])
890 mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
891 mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
892 for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) {
893 err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
894 MLXSW_REG_SBXX_DIR_INGRESS,
898 err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
899 MLXSW_REG_SBXX_DIR_EGRESS,
904 if (++masked_count == MASKED_COUNT_MAX)
909 cb_ctx.masked_count = masked_count;
910 cb_ctx.local_port_1 = local_port_1;
911 memcpy(&cb_priv, &cb_ctx, sizeof(cb_ctx));
912 err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
913 &bulk_list, mlxsw_sp_sb_sr_occ_query_cb,
917 if (local_port < mlxsw_core_max_ports(mlxsw_core))
921 err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
928 int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
929 unsigned int sb_index)
931 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
932 LIST_HEAD(bulk_list);
934 unsigned int masked_count;
940 sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
947 mlxsw_reg_sbsr_pack(sbsr_pl, true);
948 for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
949 mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
950 mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
952 for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
953 if (!mlxsw_sp->ports[local_port])
955 mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
956 mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
957 for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) {
958 err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
959 MLXSW_REG_SBXX_DIR_INGRESS,
963 err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
964 MLXSW_REG_SBXX_DIR_EGRESS,
969 if (++masked_count == MASKED_COUNT_MAX)
974 err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
975 &bulk_list, NULL, 0);
978 if (local_port < mlxsw_core_max_ports(mlxsw_core))
982 err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
989 int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
990 unsigned int sb_index, u16 pool_index,
991 u32 *p_cur, u32 *p_max)
993 struct mlxsw_sp_port *mlxsw_sp_port =
994 mlxsw_core_port_driver_priv(mlxsw_core_port);
995 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
996 u8 local_port = mlxsw_sp_port->local_port;
997 u8 pool = pool_get(pool_index);
998 enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
999 struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
1002 *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.cur);
1003 *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.max);
1007 int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
1008 unsigned int sb_index, u16 tc_index,
1009 enum devlink_sb_pool_type pool_type,
1010 u32 *p_cur, u32 *p_max)
1012 struct mlxsw_sp_port *mlxsw_sp_port =
1013 mlxsw_core_port_driver_priv(mlxsw_core_port);
1014 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1015 u8 local_port = mlxsw_sp_port->local_port;
1016 u8 pg_buff = tc_index;
1017 enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1018 struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
1021 *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.cur);
1022 *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.max);