]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/mellanox/mlxsw/spectrum.h
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum.h
1 /*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.h
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #ifndef _MLXSW_SPECTRUM_H
38 #define _MLXSW_SPECTRUM_H
39
40 #include <linux/types.h>
41 #include <linux/netdevice.h>
42 #include <linux/rhashtable.h>
43 #include <linux/bitops.h>
44 #include <linux/if_vlan.h>
45 #include <linux/list.h>
46 #include <linux/dcbnl.h>
47 #include <linux/in6.h>
48 #include <linux/notifier.h>
49
50 #include "port.h"
51 #include "core.h"
52
53 #define MLXSW_SP_VFID_BASE VLAN_N_VID
54 #define MLXSW_SP_VFID_MAX 6656 /* Bridged VLAN interfaces */
55
56 #define MLXSW_SP_RFID_BASE 15360
57 #define MLXSW_SP_INVALID_RIF 0xffff
58
59 #define MLXSW_SP_MID_MAX 7000
60
61 #define MLXSW_SP_PORTS_PER_CLUSTER_MAX 4
62
63 #define MLXSW_SP_LPM_TREE_MIN 2 /* trees 0 and 1 are reserved */
64 #define MLXSW_SP_LPM_TREE_MAX 22
65 #define MLXSW_SP_LPM_TREE_COUNT (MLXSW_SP_LPM_TREE_MAX - MLXSW_SP_LPM_TREE_MIN)
66
67 #define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */
68
69 #define MLXSW_SP_BYTES_PER_CELL 96
70
71 #define MLXSW_SP_BYTES_TO_CELLS(b) DIV_ROUND_UP(b, MLXSW_SP_BYTES_PER_CELL)
72 #define MLXSW_SP_CELLS_TO_BYTES(c) (c * MLXSW_SP_BYTES_PER_CELL)
73
74 #define MLXSW_SP_KVD_LINEAR_SIZE 65536 /* entries */
75 #define MLXSW_SP_KVD_GRANULARITY 128
76
77 /* Maximum delay buffer needed in case of PAUSE frames, in cells.
78 * Assumes 100m cable and maximum MTU.
79 */
80 #define MLXSW_SP_PAUSE_DELAY 612
81
82 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */
83
84 static inline u16 mlxsw_sp_pfc_delay_get(int mtu, u16 delay)
85 {
86 delay = MLXSW_SP_BYTES_TO_CELLS(DIV_ROUND_UP(delay, BITS_PER_BYTE));
87 return MLXSW_SP_CELL_FACTOR * delay + MLXSW_SP_BYTES_TO_CELLS(mtu);
88 }
89
90 struct mlxsw_sp_port;
91
92 struct mlxsw_sp_upper {
93 struct net_device *dev;
94 unsigned int ref_count;
95 };
96
97 struct mlxsw_sp_fid {
98 void (*leave)(struct mlxsw_sp_port *mlxsw_sp_vport);
99 struct list_head list;
100 unsigned int ref_count;
101 struct net_device *dev;
102 struct mlxsw_sp_rif *r;
103 u16 fid;
104 };
105
106 struct mlxsw_sp_rif {
107 struct net_device *dev;
108 unsigned int ref_count;
109 struct mlxsw_sp_fid *f;
110 unsigned char addr[ETH_ALEN];
111 int mtu;
112 u16 rif;
113 };
114
115 struct mlxsw_sp_mid {
116 struct list_head list;
117 unsigned char addr[ETH_ALEN];
118 u16 fid;
119 u16 mid;
120 unsigned int ref_count;
121 };
122
123 static inline u16 mlxsw_sp_vfid_to_fid(u16 vfid)
124 {
125 return MLXSW_SP_VFID_BASE + vfid;
126 }
127
128 static inline u16 mlxsw_sp_fid_to_vfid(u16 fid)
129 {
130 return fid - MLXSW_SP_VFID_BASE;
131 }
132
133 static inline bool mlxsw_sp_fid_is_vfid(u16 fid)
134 {
135 return fid >= MLXSW_SP_VFID_BASE && fid < MLXSW_SP_RFID_BASE;
136 }
137
138 static inline bool mlxsw_sp_fid_is_rfid(u16 fid)
139 {
140 return fid >= MLXSW_SP_RFID_BASE;
141 }
142
143 static inline u16 mlxsw_sp_rif_sp_to_fid(u16 rif)
144 {
145 return MLXSW_SP_RFID_BASE + rif;
146 }
147
148 struct mlxsw_sp_sb_pr {
149 enum mlxsw_reg_sbpr_mode mode;
150 u32 size;
151 };
152
153 struct mlxsw_cp_sb_occ {
154 u32 cur;
155 u32 max;
156 };
157
158 struct mlxsw_sp_sb_cm {
159 u32 min_buff;
160 u32 max_buff;
161 u8 pool;
162 struct mlxsw_cp_sb_occ occ;
163 };
164
165 struct mlxsw_sp_sb_pm {
166 u32 min_buff;
167 u32 max_buff;
168 struct mlxsw_cp_sb_occ occ;
169 };
170
171 #define MLXSW_SP_SB_POOL_COUNT 4
172 #define MLXSW_SP_SB_TC_COUNT 8
173
174 struct mlxsw_sp_sb {
175 struct mlxsw_sp_sb_pr prs[2][MLXSW_SP_SB_POOL_COUNT];
176 struct {
177 struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT];
178 struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT];
179 } ports[MLXSW_PORT_MAX_PORTS];
180 };
181
182 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE)
183
184 struct mlxsw_sp_prefix_usage {
185 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
186 };
187
188 enum mlxsw_sp_l3proto {
189 MLXSW_SP_L3_PROTO_IPV4,
190 MLXSW_SP_L3_PROTO_IPV6,
191 };
192
193 struct mlxsw_sp_lpm_tree {
194 u8 id; /* tree ID */
195 unsigned int ref_count;
196 enum mlxsw_sp_l3proto proto;
197 struct mlxsw_sp_prefix_usage prefix_usage;
198 };
199
200 struct mlxsw_sp_fib;
201
202 struct mlxsw_sp_vr {
203 u16 id; /* virtual router ID */
204 bool used;
205 enum mlxsw_sp_l3proto proto;
206 u32 tb_id; /* kernel fib table id */
207 struct mlxsw_sp_lpm_tree *lpm_tree;
208 struct mlxsw_sp_fib *fib;
209 };
210
211 enum mlxsw_sp_span_type {
212 MLXSW_SP_SPAN_EGRESS,
213 MLXSW_SP_SPAN_INGRESS
214 };
215
216 struct mlxsw_sp_span_inspected_port {
217 struct list_head list;
218 enum mlxsw_sp_span_type type;
219 u8 local_port;
220 };
221
222 struct mlxsw_sp_span_entry {
223 u8 local_port;
224 bool used;
225 struct list_head bound_ports_list;
226 int ref_count;
227 int id;
228 };
229
230 enum mlxsw_sp_port_mall_action_type {
231 MLXSW_SP_PORT_MALL_MIRROR,
232 };
233
234 struct mlxsw_sp_port_mall_mirror_tc_entry {
235 u8 to_local_port;
236 bool ingress;
237 };
238
239 struct mlxsw_sp_port_mall_tc_entry {
240 struct list_head list;
241 unsigned long cookie;
242 enum mlxsw_sp_port_mall_action_type type;
243 union {
244 struct mlxsw_sp_port_mall_mirror_tc_entry mirror;
245 };
246 };
247
248 struct mlxsw_sp_router {
249 struct mlxsw_sp_lpm_tree lpm_trees[MLXSW_SP_LPM_TREE_COUNT];
250 struct mlxsw_sp_vr *vrs;
251 struct rhashtable neigh_ht;
252 struct {
253 struct delayed_work dw;
254 unsigned long interval; /* ms */
255 } neighs_update;
256 struct delayed_work nexthop_probe_dw;
257 #define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
258 struct list_head nexthop_group_list;
259 struct list_head nexthop_neighs_list;
260 bool aborted;
261 };
262
263 struct mlxsw_sp {
264 struct {
265 struct list_head list;
266 DECLARE_BITMAP(mapped, MLXSW_SP_VFID_MAX);
267 } vfids;
268 struct {
269 struct list_head list;
270 DECLARE_BITMAP(mapped, MLXSW_SP_MID_MAX);
271 } br_mids;
272 struct list_head fids; /* VLAN-aware bridge FIDs */
273 struct mlxsw_sp_rif **rifs;
274 struct mlxsw_sp_port **ports;
275 struct mlxsw_core *core;
276 const struct mlxsw_bus_info *bus_info;
277 unsigned char base_mac[ETH_ALEN];
278 struct {
279 struct delayed_work dw;
280 #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
281 unsigned int interval; /* ms */
282 } fdb_notify;
283 #define MLXSW_SP_MIN_AGEING_TIME 10
284 #define MLXSW_SP_MAX_AGEING_TIME 1000000
285 #define MLXSW_SP_DEFAULT_AGEING_TIME 300
286 u32 ageing_time;
287 struct mlxsw_sp_upper master_bridge;
288 struct mlxsw_sp_upper *lags;
289 u8 port_to_module[MLXSW_PORT_MAX_PORTS];
290 struct mlxsw_sp_sb sb;
291 struct mlxsw_sp_router router;
292 struct {
293 DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE);
294 } kvdl;
295
296 struct {
297 struct mlxsw_sp_span_entry *entries;
298 int entries_count;
299 } span;
300 struct notifier_block fib_nb;
301 };
302
303 static inline struct mlxsw_sp_upper *
304 mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
305 {
306 return &mlxsw_sp->lags[lag_id];
307 }
308
309 struct mlxsw_sp_port_pcpu_stats {
310 u64 rx_packets;
311 u64 rx_bytes;
312 u64 tx_packets;
313 u64 tx_bytes;
314 struct u64_stats_sync syncp;
315 u32 tx_dropped;
316 };
317
318 struct mlxsw_sp_port {
319 struct net_device *dev;
320 struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats;
321 struct mlxsw_sp *mlxsw_sp;
322 u8 local_port;
323 u8 stp_state;
324 u8 learning:1,
325 learning_sync:1,
326 uc_flood:1,
327 bridged:1,
328 lagged:1,
329 split:1;
330 u16 pvid;
331 u16 lag_id;
332 struct {
333 struct list_head list;
334 struct mlxsw_sp_fid *f;
335 u16 vid;
336 } vport;
337 struct {
338 u8 tx_pause:1,
339 rx_pause:1,
340 autoneg:1;
341 } link;
342 struct {
343 struct ieee_ets *ets;
344 struct ieee_maxrate *maxrate;
345 struct ieee_pfc *pfc;
346 } dcb;
347 struct {
348 u8 module;
349 u8 width;
350 u8 lane;
351 } mapping;
352 /* 802.1Q bridge VLANs */
353 unsigned long *active_vlans;
354 unsigned long *untagged_vlans;
355 /* VLAN interfaces */
356 struct list_head vports_list;
357 /* TC handles */
358 struct list_head mall_tc_list;
359 struct {
360 #define MLXSW_HW_STATS_UPDATE_TIME HZ
361 struct rtnl_link_stats64 *cache;
362 struct delayed_work update_dw;
363 } hw_stats;
364 };
365
366 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
367 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);
368
369 static inline bool
370 mlxsw_sp_port_is_pause_en(const struct mlxsw_sp_port *mlxsw_sp_port)
371 {
372 return mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause;
373 }
374
375 static inline struct mlxsw_sp_port *
376 mlxsw_sp_port_lagged_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 port_index)
377 {
378 struct mlxsw_sp_port *mlxsw_sp_port;
379 u8 local_port;
380
381 local_port = mlxsw_core_lag_mapping_get(mlxsw_sp->core,
382 lag_id, port_index);
383 mlxsw_sp_port = mlxsw_sp->ports[local_port];
384 return mlxsw_sp_port && mlxsw_sp_port->lagged ? mlxsw_sp_port : NULL;
385 }
386
387 static inline u16
388 mlxsw_sp_vport_vid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
389 {
390 return mlxsw_sp_vport->vport.vid;
391 }
392
393 static inline bool
394 mlxsw_sp_port_is_vport(const struct mlxsw_sp_port *mlxsw_sp_port)
395 {
396 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
397
398 return vid != 0;
399 }
400
401 static inline void mlxsw_sp_vport_fid_set(struct mlxsw_sp_port *mlxsw_sp_vport,
402 struct mlxsw_sp_fid *f)
403 {
404 mlxsw_sp_vport->vport.f = f;
405 }
406
407 static inline struct mlxsw_sp_fid *
408 mlxsw_sp_vport_fid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
409 {
410 return mlxsw_sp_vport->vport.f;
411 }
412
413 static inline struct net_device *
414 mlxsw_sp_vport_dev_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
415 {
416 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
417
418 return f ? f->dev : NULL;
419 }
420
421 static inline struct mlxsw_sp_port *
422 mlxsw_sp_port_vport_find(const struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
423 {
424 struct mlxsw_sp_port *mlxsw_sp_vport;
425
426 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
427 vport.list) {
428 if (mlxsw_sp_vport_vid_get(mlxsw_sp_vport) == vid)
429 return mlxsw_sp_vport;
430 }
431
432 return NULL;
433 }
434
435 static inline struct mlxsw_sp_port *
436 mlxsw_sp_port_vport_find_by_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
437 u16 fid)
438 {
439 struct mlxsw_sp_port *mlxsw_sp_vport;
440
441 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
442 vport.list) {
443 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
444
445 if (f && f->fid == fid)
446 return mlxsw_sp_vport;
447 }
448
449 return NULL;
450 }
451
452 static inline struct mlxsw_sp_fid *mlxsw_sp_fid_find(struct mlxsw_sp *mlxsw_sp,
453 u16 fid)
454 {
455 struct mlxsw_sp_fid *f;
456
457 list_for_each_entry(f, &mlxsw_sp->fids, list)
458 if (f->fid == fid)
459 return f;
460
461 return NULL;
462 }
463
464 static inline struct mlxsw_sp_fid *
465 mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp,
466 const struct net_device *br_dev)
467 {
468 struct mlxsw_sp_fid *f;
469
470 list_for_each_entry(f, &mlxsw_sp->vfids.list, list)
471 if (f->dev == br_dev)
472 return f;
473
474 return NULL;
475 }
476
477 static inline struct mlxsw_sp_rif *
478 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
479 const struct net_device *dev)
480 {
481 int i;
482
483 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
484 if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev)
485 return mlxsw_sp->rifs[i];
486
487 return NULL;
488 }
489
490 enum mlxsw_sp_flood_table {
491 MLXSW_SP_FLOOD_TABLE_UC,
492 MLXSW_SP_FLOOD_TABLE_BM,
493 };
494
495 int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp);
496 void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp);
497 int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port);
498 int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
499 unsigned int sb_index, u16 pool_index,
500 struct devlink_sb_pool_info *pool_info);
501 int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
502 unsigned int sb_index, u16 pool_index, u32 size,
503 enum devlink_sb_threshold_type threshold_type);
504 int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
505 unsigned int sb_index, u16 pool_index,
506 u32 *p_threshold);
507 int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
508 unsigned int sb_index, u16 pool_index,
509 u32 threshold);
510 int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
511 unsigned int sb_index, u16 tc_index,
512 enum devlink_sb_pool_type pool_type,
513 u16 *p_pool_index, u32 *p_threshold);
514 int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
515 unsigned int sb_index, u16 tc_index,
516 enum devlink_sb_pool_type pool_type,
517 u16 pool_index, u32 threshold);
518 int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
519 unsigned int sb_index);
520 int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
521 unsigned int sb_index);
522 int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
523 unsigned int sb_index, u16 pool_index,
524 u32 *p_cur, u32 *p_max);
525 int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
526 unsigned int sb_index, u16 tc_index,
527 enum devlink_sb_pool_type pool_type,
528 u32 *p_cur, u32 *p_max);
529
530 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp);
531 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp);
532 int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port);
533 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port);
534 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port);
535 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
536 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
537 u16 vid);
538 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
539 u16 vid_end, bool is_member, bool untagged);
540 int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
541 bool set);
542 void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
543 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
544 int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid);
545 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
546 bool adding);
547 struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid);
548 void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f);
549 void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
550 struct mlxsw_sp_rif *r);
551 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
552 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
553 bool dwrr, u8 dwrr_weight);
554 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
555 u8 switch_prio, u8 tclass);
556 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
557 u8 *prio_tc, bool pause_en,
558 struct ieee_pfc *my_pfc);
559 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
560 enum mlxsw_reg_qeec_hr hr, u8 index,
561 u8 next_index, u32 maxrate);
562 int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
563 u16 vid_begin, u16 vid_end,
564 bool learn_enable);
565
566 #ifdef CONFIG_MLXSW_SPECTRUM_DCB
567
568 int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port);
569 void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port);
570
571 #else
572
573 static inline int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port)
574 {
575 return 0;
576 }
577
578 static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
579 {}
580
581 #endif
582
583 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
584 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
585 int mlxsw_sp_router_neigh_construct(struct net_device *dev,
586 struct neighbour *n);
587 void mlxsw_sp_router_neigh_destroy(struct net_device *dev,
588 struct neighbour *n);
589 int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
590 unsigned long event, void *ptr);
591
592 int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count);
593 void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
594
595 #endif