1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/dcbnl.h>
7 #include <linux/if_ether.h>
8 #include <linux/list.h>
9
10 #include "spectrum.h"
11 #include "core.h"
12 #include "port.h"
13 #include "reg.h"
14
15 struct mlxsw_sp_sb_pr {
16 enum mlxsw_reg_sbpr_mode mode;
17 u32 size;
18 };
19
20 struct mlxsw_cp_sb_occ {
21 u32 cur;
22 u32 max;
23 };
24
25 struct mlxsw_sp_sb_cm {
26 u32 min_buff;
27 u32 max_buff;
28 u8 pool;
29 struct mlxsw_cp_sb_occ occ;
30 };
31
32 struct mlxsw_sp_sb_pm {
33 u32 min_buff;
34 u32 max_buff;
35 struct mlxsw_cp_sb_occ occ;
36 };
37
38 #define MLXSW_SP_SB_POOL_COUNT 4
39 #define MLXSW_SP_SB_TC_COUNT 8
40
41 struct mlxsw_sp_sb_port {
42 struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT];
43 struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT];
44 };
45
46 struct mlxsw_sp_sb {
47 struct mlxsw_sp_sb_pr prs[2][MLXSW_SP_SB_POOL_COUNT];
48 struct mlxsw_sp_sb_port *ports;
49 u32 cell_size;
50 };
51
mlxsw_sp_cells_bytes(const struct mlxsw_sp * mlxsw_sp,u32 cells)52 u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells)
53 {
54 return mlxsw_sp->sb->cell_size * cells;
55 }
56
mlxsw_sp_bytes_cells(const struct mlxsw_sp * mlxsw_sp,u32 bytes)57 u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes)
58 {
59 return DIV_ROUND_UP(bytes, mlxsw_sp->sb->cell_size);
60 }
61
mlxsw_sp_sb_pr_get(struct mlxsw_sp * mlxsw_sp,u8 pool,enum mlxsw_reg_sbxx_dir dir)62 static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
63 u8 pool,
64 enum mlxsw_reg_sbxx_dir dir)
65 {
66 return &mlxsw_sp->sb->prs[dir][pool];
67 }
68
mlxsw_sp_sb_cm_get(struct mlxsw_sp * mlxsw_sp,u8 local_port,u8 pg_buff,enum mlxsw_reg_sbxx_dir dir)69 static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
70 u8 local_port, u8 pg_buff,
71 enum mlxsw_reg_sbxx_dir dir)
72 {
73 return &mlxsw_sp->sb->ports[local_port].cms[dir][pg_buff];
74 }
75
mlxsw_sp_sb_pm_get(struct mlxsw_sp * mlxsw_sp,u8 local_port,u8 pool,enum mlxsw_reg_sbxx_dir dir)76 static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
77 u8 local_port, u8 pool,
78 enum mlxsw_reg_sbxx_dir dir)
79 {
80 return &mlxsw_sp->sb->ports[local_port].pms[dir][pool];
81 }
82
mlxsw_sp_sb_pr_write(struct mlxsw_sp * mlxsw_sp,u8 pool,enum mlxsw_reg_sbxx_dir dir,enum mlxsw_reg_sbpr_mode mode,u32 size)83 static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u8 pool,
84 enum mlxsw_reg_sbxx_dir dir,
85 enum mlxsw_reg_sbpr_mode mode, u32 size)
86 {
87 char sbpr_pl[MLXSW_REG_SBPR_LEN];
88 struct mlxsw_sp_sb_pr *pr;
89 int err;
90
91 mlxsw_reg_sbpr_pack(sbpr_pl, pool, dir, mode, size);
92 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
93 if (err)
94 return err;
95
96 pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
97 pr->mode = mode;
98 pr->size = size;
99 return 0;
100 }
101
mlxsw_sp_sb_cm_write(struct mlxsw_sp * mlxsw_sp,u8 local_port,u8 pg_buff,enum mlxsw_reg_sbxx_dir dir,u32 min_buff,u32 max_buff,u8 pool)102 static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
103 u8 pg_buff, enum mlxsw_reg_sbxx_dir dir,
104 u32 min_buff, u32 max_buff, u8 pool)
105 {
106 char sbcm_pl[MLXSW_REG_SBCM_LEN];
107 int err;
108
109 mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, dir,
110 min_buff, max_buff, pool);
111 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
112 if (err)
113 return err;
114 if (pg_buff < MLXSW_SP_SB_TC_COUNT) {
115 struct mlxsw_sp_sb_cm *cm;
116
117 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff, dir);
118 cm->min_buff = min_buff;
119 cm->max_buff = max_buff;
120 cm->pool = pool;
121 }
122 return 0;
123 }
124
mlxsw_sp_sb_pm_write(struct mlxsw_sp * mlxsw_sp,u8 local_port,u8 pool,enum mlxsw_reg_sbxx_dir dir,u32 min_buff,u32 max_buff)125 static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
126 u8 pool, enum mlxsw_reg_sbxx_dir dir,
127 u32 min_buff, u32 max_buff)
128 {
129 char sbpm_pl[MLXSW_REG_SBPM_LEN];
130 struct mlxsw_sp_sb_pm *pm;
131 int err;
132
133 mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false,
134 min_buff, max_buff);
135 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl);
136 if (err)
137 return err;
138
139 pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir);
140 pm->min_buff = min_buff;
141 pm->max_buff = max_buff;
142 return 0;
143 }
144
mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp * mlxsw_sp,u8 local_port,u8 pool,enum mlxsw_reg_sbxx_dir dir,struct list_head * bulk_list)145 static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port,
146 u8 pool, enum mlxsw_reg_sbxx_dir dir,
147 struct list_head *bulk_list)
148 {
149 char sbpm_pl[MLXSW_REG_SBPM_LEN];
150
151 mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, true, 0, 0);
152 return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
153 bulk_list, NULL, 0);
154 }
155
mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core * mlxsw_core,char * sbpm_pl,size_t sbpm_pl_len,unsigned long cb_priv)156 static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core,
157 char *sbpm_pl, size_t sbpm_pl_len,
158 unsigned long cb_priv)
159 {
160 struct mlxsw_sp_sb_pm *pm = (struct mlxsw_sp_sb_pm *) cb_priv;
161
162 mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max);
163 }
164
mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp * mlxsw_sp,u8 local_port,u8 pool,enum mlxsw_reg_sbxx_dir dir,struct list_head * bulk_list)165 static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
166 u8 pool, enum mlxsw_reg_sbxx_dir dir,
167 struct list_head *bulk_list)
168 {
169 char sbpm_pl[MLXSW_REG_SBPM_LEN];
170 struct mlxsw_sp_sb_pm *pm;
171
172 pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir);
173 mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false, 0, 0);
174 return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
175 bulk_list,
176 mlxsw_sp_sb_pm_occ_query_cb,
177 (unsigned long) pm);
178 }
179
180 static const u16 mlxsw_sp_pbs[] = {
181 [0] = 2 * ETH_FRAME_LEN,
182 [9] = 2 * MLXSW_PORT_MAX_MTU,
183 };
184
185 #define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs)
186 #define MLXSW_SP_PB_UNUSED 8
187
mlxsw_sp_port_pb_init(struct mlxsw_sp_port * mlxsw_sp_port)188 static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
189 {
190 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
191 char pbmc_pl[MLXSW_REG_PBMC_LEN];
192 int i;
193
194 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
195 0xffff, 0xffff / 2);
196 for (i = 0; i < MLXSW_SP_PBS_LEN; i++) {
197 u16 size = mlxsw_sp_bytes_cells(mlxsw_sp, mlxsw_sp_pbs[i]);
198
199 if (i == MLXSW_SP_PB_UNUSED)
200 continue;
201 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, size);
202 }
203 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
204 MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
205 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
206 }
207
mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port * mlxsw_sp_port)208 static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port)
209 {
210 char pptb_pl[MLXSW_REG_PPTB_LEN];
211 int i;
212
213 mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
214 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
215 mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, 0);
216 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
217 pptb_pl);
218 }
219
mlxsw_sp_port_headroom_init(struct mlxsw_sp_port * mlxsw_sp_port)220 static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
221 {
222 int err;
223
224 err = mlxsw_sp_port_pb_init(mlxsw_sp_port);
225 if (err)
226 return err;
227 return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port);
228 }
229
mlxsw_sp_sb_ports_init(struct mlxsw_sp * mlxsw_sp)230 static int mlxsw_sp_sb_ports_init(struct mlxsw_sp *mlxsw_sp)
231 {
232 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
233
234 mlxsw_sp->sb->ports = kcalloc(max_ports,
235 sizeof(struct mlxsw_sp_sb_port),
236 GFP_KERNEL);
237 if (!mlxsw_sp->sb->ports)
238 return -ENOMEM;
239 return 0;
240 }
241
mlxsw_sp_sb_ports_fini(struct mlxsw_sp * mlxsw_sp)242 static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp)
243 {
244 kfree(mlxsw_sp->sb->ports);
245 }
246
247 #define MLXSW_SP_SB_PR_INGRESS_SIZE 12440000
248 #define MLXSW_SP_SB_PR_INGRESS_MNG_SIZE (200 * 1000)
249 #define MLXSW_SP_SB_PR_EGRESS_SIZE 13232000
250
251 #define MLXSW_SP_SB_PR(_mode, _size) \
252 { \
253 .mode = _mode, \
254 .size = _size, \
255 }
256
257 static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_ingress[] = {
258 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
259 MLXSW_SP_SB_PR_INGRESS_SIZE),
260 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
261 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
262 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
263 MLXSW_SP_SB_PR_INGRESS_MNG_SIZE),
264 };
265
266 #define MLXSW_SP_SB_PRS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_ingress)
267
268 static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_egress[] = {
269 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_PR_EGRESS_SIZE),
270 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
271 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
272 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
273 };
274
275 #define MLXSW_SP_SB_PRS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_egress)
276
__mlxsw_sp_sb_prs_init(struct mlxsw_sp * mlxsw_sp,enum mlxsw_reg_sbxx_dir dir,const struct mlxsw_sp_sb_pr * prs,size_t prs_len)277 static int __mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
278 enum mlxsw_reg_sbxx_dir dir,
279 const struct mlxsw_sp_sb_pr *prs,
280 size_t prs_len)
281 {
282 int i;
283 int err;
284
285 for (i = 0; i < prs_len; i++) {
286 u32 size = mlxsw_sp_bytes_cells(mlxsw_sp, prs[i].size);
287
288 err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, dir, prs[i].mode, size);
289 if (err)
290 return err;
291 }
292 return 0;
293 }
294
mlxsw_sp_sb_prs_init(struct mlxsw_sp * mlxsw_sp)295 static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp)
296 {
297 int err;
298
299 err = __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_INGRESS,
300 mlxsw_sp_sb_prs_ingress,
301 MLXSW_SP_SB_PRS_INGRESS_LEN);
302 if (err)
303 return err;
304 return __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_EGRESS,
305 mlxsw_sp_sb_prs_egress,
306 MLXSW_SP_SB_PRS_EGRESS_LEN);
307 }
308
309 #define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool) \
310 { \
311 .min_buff = _min_buff, \
312 .max_buff = _max_buff, \
313 .pool = _pool, \
314 }
315
316 static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = {
317 MLXSW_SP_SB_CM(10000, 8, 0),
318 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
319 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
320 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
321 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
322 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
323 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
324 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
325 MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */
326 MLXSW_SP_SB_CM(20000, 1, 3),
327 };
328
329 #define MLXSW_SP_SB_CMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_ingress)
330
331 static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
332 MLXSW_SP_SB_CM(1500, 9, 0),
333 MLXSW_SP_SB_CM(1500, 9, 0),
334 MLXSW_SP_SB_CM(1500, 9, 0),
335 MLXSW_SP_SB_CM(1500, 9, 0),
336 MLXSW_SP_SB_CM(1500, 9, 0),
337 MLXSW_SP_SB_CM(1500, 9, 0),
338 MLXSW_SP_SB_CM(1500, 9, 0),
339 MLXSW_SP_SB_CM(1500, 9, 0),
340 MLXSW_SP_SB_CM(0, 140000, 15),
341 MLXSW_SP_SB_CM(0, 140000, 15),
342 MLXSW_SP_SB_CM(0, 140000, 15),
343 MLXSW_SP_SB_CM(0, 140000, 15),
344 MLXSW_SP_SB_CM(0, 140000, 15),
345 MLXSW_SP_SB_CM(0, 140000, 15),
346 MLXSW_SP_SB_CM(0, 140000, 15),
347 MLXSW_SP_SB_CM(0, 140000, 15),
348 MLXSW_SP_SB_CM(1, 0xff, 0),
349 };
350
351 #define MLXSW_SP_SB_CMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_egress)
352
353 #define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, 0)
354
355 static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
356 MLXSW_SP_CPU_PORT_SB_CM,
357 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
358 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
359 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
360 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
361 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
362 MLXSW_SP_CPU_PORT_SB_CM,
363 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
364 MLXSW_SP_CPU_PORT_SB_CM,
365 MLXSW_SP_CPU_PORT_SB_CM,
366 MLXSW_SP_CPU_PORT_SB_CM,
367 MLXSW_SP_CPU_PORT_SB_CM,
368 MLXSW_SP_CPU_PORT_SB_CM,
369 MLXSW_SP_CPU_PORT_SB_CM,
370 MLXSW_SP_CPU_PORT_SB_CM,
371 MLXSW_SP_CPU_PORT_SB_CM,
372 MLXSW_SP_CPU_PORT_SB_CM,
373 MLXSW_SP_CPU_PORT_SB_CM,
374 MLXSW_SP_CPU_PORT_SB_CM,
375 MLXSW_SP_CPU_PORT_SB_CM,
376 MLXSW_SP_CPU_PORT_SB_CM,
377 MLXSW_SP_CPU_PORT_SB_CM,
378 MLXSW_SP_CPU_PORT_SB_CM,
379 MLXSW_SP_CPU_PORT_SB_CM,
380 MLXSW_SP_CPU_PORT_SB_CM,
381 MLXSW_SP_CPU_PORT_SB_CM,
382 MLXSW_SP_CPU_PORT_SB_CM,
383 MLXSW_SP_CPU_PORT_SB_CM,
384 MLXSW_SP_CPU_PORT_SB_CM,
385 MLXSW_SP_CPU_PORT_SB_CM,
386 MLXSW_SP_CPU_PORT_SB_CM,
387 MLXSW_SP_CPU_PORT_SB_CM,
388 };
389
390 #define MLXSW_SP_CPU_PORT_SB_MCS_LEN \
391 ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms)
392
__mlxsw_sp_sb_cms_init(struct mlxsw_sp * mlxsw_sp,u8 local_port,enum mlxsw_reg_sbxx_dir dir,const struct mlxsw_sp_sb_cm * cms,size_t cms_len)393 static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
394 enum mlxsw_reg_sbxx_dir dir,
395 const struct mlxsw_sp_sb_cm *cms,
396 size_t cms_len)
397 {
398 int i;
399 int err;
400
401 for (i = 0; i < cms_len; i++) {
402 const struct mlxsw_sp_sb_cm *cm;
403 u32 min_buff;
404
405 if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
406 continue; /* PG number 8 does not exist, skip it */
407 cm = &cms[i];
408 /* All pools are initialized using dynamic thresholds,
409 * therefore 'max_buff' isn't specified in cells.
410 */
411 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, cm->min_buff);
412 err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i, dir,
413 min_buff, cm->max_buff, cm->pool);
414 if (err)
415 return err;
416 }
417 return 0;
418 }
419
mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port * mlxsw_sp_port)420 static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port)
421 {
422 int err;
423
424 err = __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
425 mlxsw_sp_port->local_port,
426 MLXSW_REG_SBXX_DIR_INGRESS,
427 mlxsw_sp_sb_cms_ingress,
428 MLXSW_SP_SB_CMS_INGRESS_LEN);
429 if (err)
430 return err;
431 return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
432 mlxsw_sp_port->local_port,
433 MLXSW_REG_SBXX_DIR_EGRESS,
434 mlxsw_sp_sb_cms_egress,
435 MLXSW_SP_SB_CMS_EGRESS_LEN);
436 }
437
mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp * mlxsw_sp)438 static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
439 {
440 return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS,
441 mlxsw_sp_cpu_port_sb_cms,
442 MLXSW_SP_CPU_PORT_SB_MCS_LEN);
443 }
444
445 #define MLXSW_SP_SB_PM(_min_buff, _max_buff) \
446 { \
447 .min_buff = _min_buff, \
448 .max_buff = _max_buff, \
449 }
450
451 static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_ingress[] = {
452 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
453 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
454 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
455 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
456 };
457
458 #define MLXSW_SP_SB_PMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_ingress)
459
460 static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_egress[] = {
461 MLXSW_SP_SB_PM(0, 7),
462 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
463 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
464 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
465 };
466
467 #define MLXSW_SP_SB_PMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_egress)
468
__mlxsw_sp_port_sb_pms_init(struct mlxsw_sp * mlxsw_sp,u8 local_port,enum mlxsw_reg_sbxx_dir dir,const struct mlxsw_sp_sb_pm * pms,size_t pms_len)469 static int __mlxsw_sp_port_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
470 enum mlxsw_reg_sbxx_dir dir,
471 const struct mlxsw_sp_sb_pm *pms,
472 size_t pms_len)
473 {
474 int i;
475 int err;
476
477 for (i = 0; i < pms_len; i++) {
478 const struct mlxsw_sp_sb_pm *pm;
479
480 pm = &pms[i];
481 err = mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, i, dir,
482 pm->min_buff, pm->max_buff);
483 if (err)
484 return err;
485 }
486 return 0;
487 }
488
mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port * mlxsw_sp_port)489 static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
490 {
491 int err;
492
493 err = __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp,
494 mlxsw_sp_port->local_port,
495 MLXSW_REG_SBXX_DIR_INGRESS,
496 mlxsw_sp_sb_pms_ingress,
497 MLXSW_SP_SB_PMS_INGRESS_LEN);
498 if (err)
499 return err;
500 return __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp,
501 mlxsw_sp_port->local_port,
502 MLXSW_REG_SBXX_DIR_EGRESS,
503 mlxsw_sp_sb_pms_egress,
504 MLXSW_SP_SB_PMS_EGRESS_LEN);
505 }
506
507 struct mlxsw_sp_sb_mm {
508 u32 min_buff;
509 u32 max_buff;
510 u8 pool;
511 };
512
513 #define MLXSW_SP_SB_MM(_min_buff, _max_buff, _pool) \
514 { \
515 .min_buff = _min_buff, \
516 .max_buff = _max_buff, \
517 .pool = _pool, \
518 }
519
520 static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
521 MLXSW_SP_SB_MM(20000, 0xff, 0),
522 MLXSW_SP_SB_MM(20000, 0xff, 0),
523 MLXSW_SP_SB_MM(20000, 0xff, 0),
524 MLXSW_SP_SB_MM(20000, 0xff, 0),
525 MLXSW_SP_SB_MM(20000, 0xff, 0),
526 MLXSW_SP_SB_MM(20000, 0xff, 0),
527 MLXSW_SP_SB_MM(20000, 0xff, 0),
528 MLXSW_SP_SB_MM(20000, 0xff, 0),
529 MLXSW_SP_SB_MM(20000, 0xff, 0),
530 MLXSW_SP_SB_MM(20000, 0xff, 0),
531 MLXSW_SP_SB_MM(20000, 0xff, 0),
532 MLXSW_SP_SB_MM(20000, 0xff, 0),
533 MLXSW_SP_SB_MM(20000, 0xff, 0),
534 MLXSW_SP_SB_MM(20000, 0xff, 0),
535 MLXSW_SP_SB_MM(20000, 0xff, 0),
536 };
537
538 #define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms)
539
mlxsw_sp_sb_mms_init(struct mlxsw_sp * mlxsw_sp)540 static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
541 {
542 char sbmm_pl[MLXSW_REG_SBMM_LEN];
543 int i;
544 int err;
545
546 for (i = 0; i < MLXSW_SP_SB_MMS_LEN; i++) {
547 const struct mlxsw_sp_sb_mm *mc;
548 u32 min_buff;
549
550 mc = &mlxsw_sp_sb_mms[i];
551 /* All pools are initialized using dynamic thresholds,
552 * therefore 'max_buff' isn't specified in cells.
553 */
554 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, mc->min_buff);
555 mlxsw_reg_sbmm_pack(sbmm_pl, i, min_buff, mc->max_buff,
556 mc->pool);
557 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
558 if (err)
559 return err;
560 }
561 return 0;
562 }
563
mlxsw_sp_buffers_init(struct mlxsw_sp * mlxsw_sp)564 int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
565 {
566 u64 sb_size;
567 int err;
568
569 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE))
570 return -EIO;
571
572 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_BUFFER_SIZE))
573 return -EIO;
574 sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE);
575
576 mlxsw_sp->sb = kzalloc(sizeof(*mlxsw_sp->sb), GFP_KERNEL);
577 if (!mlxsw_sp->sb)
578 return -ENOMEM;
579 mlxsw_sp->sb->cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);
580
581 err = mlxsw_sp_sb_ports_init(mlxsw_sp);
582 if (err)
583 goto err_sb_ports_init;
584 err = mlxsw_sp_sb_prs_init(mlxsw_sp);
585 if (err)
586 goto err_sb_prs_init;
587 err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
588 if (err)
589 goto err_sb_cpu_port_sb_cms_init;
590 err = mlxsw_sp_sb_mms_init(mlxsw_sp);
591 if (err)
592 goto err_sb_mms_init;
593 err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0, sb_size,
594 MLXSW_SP_SB_POOL_COUNT,
595 MLXSW_SP_SB_POOL_COUNT,
596 MLXSW_SP_SB_TC_COUNT,
597 MLXSW_SP_SB_TC_COUNT);
598 if (err)
599 goto err_devlink_sb_register;
600
601 return 0;
602
603 err_devlink_sb_register:
604 err_sb_mms_init:
605 err_sb_cpu_port_sb_cms_init:
606 err_sb_prs_init:
607 mlxsw_sp_sb_ports_fini(mlxsw_sp);
608 err_sb_ports_init:
609 kfree(mlxsw_sp->sb);
610 return err;
611 }
612
mlxsw_sp_buffers_fini(struct mlxsw_sp * mlxsw_sp)613 void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
614 {
615 devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
616 mlxsw_sp_sb_ports_fini(mlxsw_sp);
617 kfree(mlxsw_sp->sb);
618 }
619
mlxsw_sp_port_buffers_init(struct mlxsw_sp_port * mlxsw_sp_port)620 int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
621 {
622 int err;
623
624 err = mlxsw_sp_port_headroom_init(mlxsw_sp_port);
625 if (err)
626 return err;
627 err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
628 if (err)
629 return err;
630 err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port);
631
632 return err;
633 }
634
pool_get(u16 pool_index)635 static u8 pool_get(u16 pool_index)
636 {
637 return pool_index % MLXSW_SP_SB_POOL_COUNT;
638 }
639
pool_index_get(u8 pool,enum mlxsw_reg_sbxx_dir dir)640 static u16 pool_index_get(u8 pool, enum mlxsw_reg_sbxx_dir dir)
641 {
642 u16 pool_index;
643
644 pool_index = pool;
645 if (dir == MLXSW_REG_SBXX_DIR_EGRESS)
646 pool_index += MLXSW_SP_SB_POOL_COUNT;
647 return pool_index;
648 }
649
dir_get(u16 pool_index)650 static enum mlxsw_reg_sbxx_dir dir_get(u16 pool_index)
651 {
652 return pool_index < MLXSW_SP_SB_POOL_COUNT ?
653 MLXSW_REG_SBXX_DIR_INGRESS : MLXSW_REG_SBXX_DIR_EGRESS;
654 }
655
mlxsw_sp_sb_pool_get(struct mlxsw_core * mlxsw_core,unsigned int sb_index,u16 pool_index,struct devlink_sb_pool_info * pool_info)656 int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
657 unsigned int sb_index, u16 pool_index,
658 struct devlink_sb_pool_info *pool_info)
659 {
660 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
661 u8 pool = pool_get(pool_index);
662 enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
663 struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
664
665 pool_info->pool_type = (enum devlink_sb_pool_type) dir;
666 pool_info->size = mlxsw_sp_cells_bytes(mlxsw_sp, pr->size);
667 pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode;
668 return 0;
669 }
670
mlxsw_sp_sb_pool_set(struct mlxsw_core * mlxsw_core,unsigned int sb_index,u16 pool_index,u32 size,enum devlink_sb_threshold_type threshold_type)671 int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
672 unsigned int sb_index, u16 pool_index, u32 size,
673 enum devlink_sb_threshold_type threshold_type)
674 {
675 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
676 u32 pool_size = mlxsw_sp_bytes_cells(mlxsw_sp, size);
677 u8 pool = pool_get(pool_index);
678 enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
679 enum mlxsw_reg_sbpr_mode mode;
680
681 if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE))
682 return -EINVAL;
683
684 mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
685 return mlxsw_sp_sb_pr_write(mlxsw_sp, pool, dir, mode, pool_size);
686 }
687
688 #define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */
689
mlxsw_sp_sb_threshold_out(struct mlxsw_sp * mlxsw_sp,u8 pool,enum mlxsw_reg_sbxx_dir dir,u32 max_buff)690 static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u8 pool,
691 enum mlxsw_reg_sbxx_dir dir, u32 max_buff)
692 {
693 struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
694
695 if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC)
696 return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
697 return mlxsw_sp_cells_bytes(mlxsw_sp, max_buff);
698 }
699
mlxsw_sp_sb_threshold_in(struct mlxsw_sp * mlxsw_sp,u8 pool,enum mlxsw_reg_sbxx_dir dir,u32 threshold,u32 * p_max_buff)700 static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u8 pool,
701 enum mlxsw_reg_sbxx_dir dir, u32 threshold,
702 u32 *p_max_buff)
703 {
704 struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
705
706 if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) {
707 int val;
708
709 val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
710 if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN ||
711 val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX)
712 return -EINVAL;
713 *p_max_buff = val;
714 } else {
715 *p_max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, threshold);
716 }
717 return 0;
718 }
719
mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port * mlxsw_core_port,unsigned int sb_index,u16 pool_index,u32 * p_threshold)720 int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
721 unsigned int sb_index, u16 pool_index,
722 u32 *p_threshold)
723 {
724 struct mlxsw_sp_port *mlxsw_sp_port =
725 mlxsw_core_port_driver_priv(mlxsw_core_port);
726 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
727 u8 local_port = mlxsw_sp_port->local_port;
728 u8 pool = pool_get(pool_index);
729 enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
730 struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
731 pool, dir);
732
733 *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool, dir,
734 pm->max_buff);
735 return 0;
736 }
737
mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port * mlxsw_core_port,unsigned int sb_index,u16 pool_index,u32 threshold)738 int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
739 unsigned int sb_index, u16 pool_index,
740 u32 threshold)
741 {
742 struct mlxsw_sp_port *mlxsw_sp_port =
743 mlxsw_core_port_driver_priv(mlxsw_core_port);
744 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
745 u8 local_port = mlxsw_sp_port->local_port;
746 u8 pool = pool_get(pool_index);
747 enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
748 u32 max_buff;
749 int err;
750
751 err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
752 threshold, &max_buff);
753 if (err)
754 return err;
755
756 return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool, dir,
757 0, max_buff);
758 }
759
mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port * mlxsw_core_port,unsigned int sb_index,u16 tc_index,enum devlink_sb_pool_type pool_type,u16 * p_pool_index,u32 * p_threshold)760 int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
761 unsigned int sb_index, u16 tc_index,
762 enum devlink_sb_pool_type pool_type,
763 u16 *p_pool_index, u32 *p_threshold)
764 {
765 struct mlxsw_sp_port *mlxsw_sp_port =
766 mlxsw_core_port_driver_priv(mlxsw_core_port);
767 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
768 u8 local_port = mlxsw_sp_port->local_port;
769 u8 pg_buff = tc_index;
770 enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
771 struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
772 pg_buff, dir);
773
774 *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool, dir,
775 cm->max_buff);
776 *p_pool_index = pool_index_get(cm->pool, dir);
777 return 0;
778 }
779
mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port * mlxsw_core_port,unsigned int sb_index,u16 tc_index,enum devlink_sb_pool_type pool_type,u16 pool_index,u32 threshold)780 int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
781 unsigned int sb_index, u16 tc_index,
782 enum devlink_sb_pool_type pool_type,
783 u16 pool_index, u32 threshold)
784 {
785 struct mlxsw_sp_port *mlxsw_sp_port =
786 mlxsw_core_port_driver_priv(mlxsw_core_port);
787 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
788 u8 local_port = mlxsw_sp_port->local_port;
789 u8 pg_buff = tc_index;
790 enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
791 u8 pool = pool_get(pool_index);
792 u32 max_buff;
793 int err;
794
795 if (dir != dir_get(pool_index))
796 return -EINVAL;
797
798 err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
799 threshold, &max_buff);
800 if (err)
801 return err;
802
803 return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff, dir,
804 0, max_buff, pool);
805 }
806
807 #define MASKED_COUNT_MAX \
808 (MLXSW_REG_SBSR_REC_MAX_COUNT / (MLXSW_SP_SB_TC_COUNT * 2))
809
810 struct mlxsw_sp_sb_sr_occ_query_cb_ctx {
811 u8 masked_count;
812 u8 local_port_1;
813 };
814
mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core * mlxsw_core,char * sbsr_pl,size_t sbsr_pl_len,unsigned long cb_priv)815 static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
816 char *sbsr_pl, size_t sbsr_pl_len,
817 unsigned long cb_priv)
818 {
819 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
820 struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
821 u8 masked_count;
822 u8 local_port;
823 int rec_index = 0;
824 struct mlxsw_sp_sb_cm *cm;
825 int i;
826
827 memcpy(&cb_ctx, &cb_priv, sizeof(cb_ctx));
828
829 masked_count = 0;
830 for (local_port = cb_ctx.local_port_1;
831 local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
832 if (!mlxsw_sp->ports[local_port])
833 continue;
834 for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
835 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
836 MLXSW_REG_SBXX_DIR_INGRESS);
837 mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
838 &cm->occ.cur, &cm->occ.max);
839 }
840 if (++masked_count == cb_ctx.masked_count)
841 break;
842 }
843 masked_count = 0;
844 for (local_port = cb_ctx.local_port_1;
845 local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
846 if (!mlxsw_sp->ports[local_port])
847 continue;
848 for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
849 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
850 MLXSW_REG_SBXX_DIR_EGRESS);
851 mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
852 &cm->occ.cur, &cm->occ.max);
853 }
854 if (++masked_count == cb_ctx.masked_count)
855 break;
856 }
857 }
858
mlxsw_sp_sb_occ_snapshot(struct mlxsw_core * mlxsw_core,unsigned int sb_index)859 int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
860 unsigned int sb_index)
861 {
862 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
863 struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
864 unsigned long cb_priv;
865 LIST_HEAD(bulk_list);
866 char *sbsr_pl;
867 u8 masked_count;
868 u8 local_port_1;
869 u8 local_port = 0;
870 int i;
871 int err;
872 int err2;
873
874 sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
875 if (!sbsr_pl)
876 return -ENOMEM;
877
878 next_batch:
879 local_port++;
880 local_port_1 = local_port;
881 masked_count = 0;
882 mlxsw_reg_sbsr_pack(sbsr_pl, false);
883 for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
884 mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
885 mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
886 }
887 for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
888 if (!mlxsw_sp->ports[local_port])
889 continue;
890 mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
891 mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
892 for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) {
893 err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
894 MLXSW_REG_SBXX_DIR_INGRESS,
895 &bulk_list);
896 if (err)
897 goto out;
898 err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
899 MLXSW_REG_SBXX_DIR_EGRESS,
900 &bulk_list);
901 if (err)
902 goto out;
903 }
904 if (++masked_count == MASKED_COUNT_MAX)
905 goto do_query;
906 }
907
908 do_query:
909 cb_ctx.masked_count = masked_count;
910 cb_ctx.local_port_1 = local_port_1;
911 memcpy(&cb_priv, &cb_ctx, sizeof(cb_ctx));
912 err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
913 &bulk_list, mlxsw_sp_sb_sr_occ_query_cb,
914 cb_priv);
915 if (err)
916 goto out;
917 if (local_port < mlxsw_core_max_ports(mlxsw_core))
918 goto next_batch;
919
920 out:
921 err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
922 if (!err)
923 err = err2;
924 kfree(sbsr_pl);
925 return err;
926 }
927
mlxsw_sp_sb_occ_max_clear(struct mlxsw_core * mlxsw_core,unsigned int sb_index)928 int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
929 unsigned int sb_index)
930 {
931 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
932 LIST_HEAD(bulk_list);
933 char *sbsr_pl;
934 unsigned int masked_count;
935 u8 local_port = 0;
936 int i;
937 int err;
938 int err2;
939
940 sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
941 if (!sbsr_pl)
942 return -ENOMEM;
943
944 next_batch:
945 local_port++;
946 masked_count = 0;
947 mlxsw_reg_sbsr_pack(sbsr_pl, true);
948 for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
949 mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
950 mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
951 }
952 for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
953 if (!mlxsw_sp->ports[local_port])
954 continue;
955 mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
956 mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
957 for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) {
958 err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
959 MLXSW_REG_SBXX_DIR_INGRESS,
960 &bulk_list);
961 if (err)
962 goto out;
963 err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
964 MLXSW_REG_SBXX_DIR_EGRESS,
965 &bulk_list);
966 if (err)
967 goto out;
968 }
969 if (++masked_count == MASKED_COUNT_MAX)
970 goto do_query;
971 }
972
973 do_query:
974 err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
975 &bulk_list, NULL, 0);
976 if (err)
977 goto out;
978 if (local_port < mlxsw_core_max_ports(mlxsw_core))
979 goto next_batch;
980
981 out:
982 err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
983 if (!err)
984 err = err2;
985 kfree(sbsr_pl);
986 return err;
987 }
988
mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port * mlxsw_core_port,unsigned int sb_index,u16 pool_index,u32 * p_cur,u32 * p_max)989 int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
990 unsigned int sb_index, u16 pool_index,
991 u32 *p_cur, u32 *p_max)
992 {
993 struct mlxsw_sp_port *mlxsw_sp_port =
994 mlxsw_core_port_driver_priv(mlxsw_core_port);
995 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
996 u8 local_port = mlxsw_sp_port->local_port;
997 u8 pool = pool_get(pool_index);
998 enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
999 struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
1000 pool, dir);
1001
1002 *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.cur);
1003 *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.max);
1004 return 0;
1005 }
1006
mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port * mlxsw_core_port,unsigned int sb_index,u16 tc_index,enum devlink_sb_pool_type pool_type,u32 * p_cur,u32 * p_max)1007 int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
1008 unsigned int sb_index, u16 tc_index,
1009 enum devlink_sb_pool_type pool_type,
1010 u32 *p_cur, u32 *p_max)
1011 {
1012 struct mlxsw_sp_port *mlxsw_sp_port =
1013 mlxsw_core_port_driver_priv(mlxsw_core_port);
1014 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1015 u8 local_port = mlxsw_sp_port->local_port;
1016 u8 pg_buff = tc_index;
1017 enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1018 struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
1019 pg_buff, dir);
1020
1021 *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.cur);
1022 *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.max);
1023 return 0;
1024 }
1025