]>
Commit | Line | Data |
---|---|---|
a629ef21 PM |
1 | /* |
2 | * drivers/net/ethernet/mellanox/mlxsw/mlxsw_span.c | |
3 | * Copyright (c) 2018 Mellanox Technologies. All rights reserved. | |
4 | * | |
5 | * Redistribution and use in source and binary forms, with or without | |
6 | * modification, are permitted provided that the following conditions are met: | |
7 | * | |
8 | * 1. Redistributions of source code must retain the above copyright | |
9 | * notice, this list of conditions and the following disclaimer. | |
10 | * 2. Redistributions in binary form must reproduce the above copyright | |
11 | * notice, this list of conditions and the following disclaimer in the | |
12 | * documentation and/or other materials provided with the distribution. | |
13 | * 3. Neither the names of the copyright holders nor the names of its | |
14 | * contributors may be used to endorse or promote products derived from | |
15 | * this software without specific prior written permission. | |
16 | * | |
17 | * Alternatively, this software may be distributed under the terms of the | |
18 | * GNU General Public License ("GPL") version 2 as published by the Free | |
19 | * Software Foundation. | |
20 | * | |
21 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | |
22 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
23 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
24 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | |
25 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
26 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
27 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |
28 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |
29 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
30 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
31 | * POSSIBILITY OF SUCH DAMAGE. | |
32 | */ | |
33 | ||
34 | #include <linux/list.h> | |
35 | ||
36 | #include "spectrum.h" | |
37 | #include "spectrum_span.h" | |
38 | ||
39 | int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp) | |
40 | { | |
41 | int i; | |
42 | ||
43 | if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN)) | |
44 | return -EIO; | |
45 | ||
46 | mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, | |
47 | MAX_SPAN); | |
48 | mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count, | |
49 | sizeof(struct mlxsw_sp_span_entry), | |
50 | GFP_KERNEL); | |
51 | if (!mlxsw_sp->span.entries) | |
52 | return -ENOMEM; | |
53 | ||
54 | for (i = 0; i < mlxsw_sp->span.entries_count; i++) | |
55 | INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list); | |
56 | ||
57 | return 0; | |
58 | } | |
59 | ||
60 | void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp) | |
61 | { | |
62 | int i; | |
63 | ||
64 | for (i = 0; i < mlxsw_sp->span.entries_count; i++) { | |
65 | struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; | |
66 | ||
67 | WARN_ON_ONCE(!list_empty(&curr->bound_ports_list)); | |
68 | } | |
69 | kfree(mlxsw_sp->span.entries); | |
70 | } | |
71 | ||
72 | static struct mlxsw_sp_span_entry * | |
73 | mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port) | |
74 | { | |
75 | struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; | |
76 | struct mlxsw_sp_span_entry *span_entry; | |
77 | char mpat_pl[MLXSW_REG_MPAT_LEN]; | |
78 | u8 local_port = port->local_port; | |
79 | int index; | |
80 | int i; | |
81 | int err; | |
82 | ||
83 | /* find a free entry to use */ | |
84 | index = -1; | |
85 | for (i = 0; i < mlxsw_sp->span.entries_count; i++) { | |
86 | if (!mlxsw_sp->span.entries[i].ref_count) { | |
87 | index = i; | |
88 | span_entry = &mlxsw_sp->span.entries[i]; | |
89 | break; | |
90 | } | |
91 | } | |
92 | if (index < 0) | |
93 | return NULL; | |
94 | ||
95 | /* create a new port analayzer entry for local_port */ | |
96 | mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true); | |
97 | err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); | |
98 | if (err) | |
99 | return NULL; | |
100 | ||
101 | span_entry->id = index; | |
102 | span_entry->ref_count = 1; | |
103 | span_entry->local_port = local_port; | |
104 | return span_entry; | |
105 | } | |
106 | ||
107 | static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp, | |
108 | struct mlxsw_sp_span_entry *span_entry) | |
109 | { | |
110 | u8 local_port = span_entry->local_port; | |
111 | char mpat_pl[MLXSW_REG_MPAT_LEN]; | |
112 | int pa_id = span_entry->id; | |
113 | ||
114 | mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false); | |
115 | mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); | |
116 | } | |
117 | ||
118 | struct mlxsw_sp_span_entry * | |
119 | mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port) | |
120 | { | |
121 | int i; | |
122 | ||
123 | for (i = 0; i < mlxsw_sp->span.entries_count; i++) { | |
124 | struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; | |
125 | ||
126 | if (curr->ref_count && curr->local_port == local_port) | |
127 | return curr; | |
128 | } | |
129 | return NULL; | |
130 | } | |
131 | ||
132 | static struct mlxsw_sp_span_entry * | |
133 | mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port) | |
134 | { | |
135 | struct mlxsw_sp_span_entry *span_entry; | |
136 | ||
137 | span_entry = mlxsw_sp_span_entry_find(port->mlxsw_sp, | |
138 | port->local_port); | |
139 | if (span_entry) { | |
140 | /* Already exists, just take a reference */ | |
141 | span_entry->ref_count++; | |
142 | return span_entry; | |
143 | } | |
144 | ||
145 | return mlxsw_sp_span_entry_create(port); | |
146 | } | |
147 | ||
148 | static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, | |
149 | struct mlxsw_sp_span_entry *span_entry) | |
150 | { | |
151 | WARN_ON(!span_entry->ref_count); | |
152 | if (--span_entry->ref_count == 0) | |
153 | mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry); | |
154 | return 0; | |
155 | } | |
156 | ||
157 | static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port) | |
158 | { | |
159 | struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; | |
160 | struct mlxsw_sp_span_inspected_port *p; | |
161 | int i; | |
162 | ||
163 | for (i = 0; i < mlxsw_sp->span.entries_count; i++) { | |
164 | struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; | |
165 | ||
166 | list_for_each_entry(p, &curr->bound_ports_list, list) | |
167 | if (p->local_port == port->local_port && | |
168 | p->type == MLXSW_SP_SPAN_EGRESS) | |
169 | return true; | |
170 | } | |
171 | ||
172 | return false; | |
173 | } | |
174 | ||
175 | static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp, | |
176 | int mtu) | |
177 | { | |
178 | return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1; | |
179 | } | |
180 | ||
181 | int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu) | |
182 | { | |
183 | struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; | |
184 | char sbib_pl[MLXSW_REG_SBIB_LEN]; | |
185 | int err; | |
186 | ||
187 | /* If port is egress mirrored, the shared buffer size should be | |
188 | * updated according to the mtu value | |
189 | */ | |
190 | if (mlxsw_sp_span_is_egress_mirror(port)) { | |
191 | u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu); | |
192 | ||
193 | mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize); | |
194 | err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); | |
195 | if (err) { | |
196 | netdev_err(port->dev, "Could not update shared buffer for mirroring\n"); | |
197 | return err; | |
198 | } | |
199 | } | |
200 | ||
201 | return 0; | |
202 | } | |
203 | ||
204 | static struct mlxsw_sp_span_inspected_port * | |
205 | mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port, | |
206 | struct mlxsw_sp_span_entry *span_entry) | |
207 | { | |
208 | struct mlxsw_sp_span_inspected_port *p; | |
209 | ||
210 | list_for_each_entry(p, &span_entry->bound_ports_list, list) | |
211 | if (port->local_port == p->local_port) | |
212 | return p; | |
213 | return NULL; | |
214 | } | |
215 | ||
216 | static int | |
217 | mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port, | |
218 | struct mlxsw_sp_span_entry *span_entry, | |
219 | enum mlxsw_sp_span_type type, | |
220 | bool bind) | |
221 | { | |
222 | struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; | |
223 | char mpar_pl[MLXSW_REG_MPAR_LEN]; | |
224 | int pa_id = span_entry->id; | |
225 | ||
226 | /* bind the port to the SPAN entry */ | |
227 | mlxsw_reg_mpar_pack(mpar_pl, port->local_port, | |
228 | (enum mlxsw_reg_mpar_i_e)type, bind, pa_id); | |
229 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); | |
230 | } | |
231 | ||
232 | static int | |
233 | mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port, | |
234 | struct mlxsw_sp_span_entry *span_entry, | |
235 | enum mlxsw_sp_span_type type, | |
236 | bool bind) | |
237 | { | |
238 | struct mlxsw_sp_span_inspected_port *inspected_port; | |
239 | struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; | |
240 | char sbib_pl[MLXSW_REG_SBIB_LEN]; | |
241 | int err; | |
242 | ||
243 | /* if it is an egress SPAN, bind a shared buffer to it */ | |
244 | if (type == MLXSW_SP_SPAN_EGRESS) { | |
245 | u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, | |
246 | port->dev->mtu); | |
247 | ||
248 | mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize); | |
249 | err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); | |
250 | if (err) { | |
251 | netdev_err(port->dev, "Could not create shared buffer for mirroring\n"); | |
252 | return err; | |
253 | } | |
254 | } | |
255 | ||
256 | if (bind) { | |
257 | err = mlxsw_sp_span_inspected_port_bind(port, span_entry, type, | |
258 | true); | |
259 | if (err) | |
260 | goto err_port_bind; | |
261 | } | |
262 | ||
263 | inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL); | |
264 | if (!inspected_port) { | |
265 | err = -ENOMEM; | |
266 | goto err_inspected_port_alloc; | |
267 | } | |
268 | inspected_port->local_port = port->local_port; | |
269 | inspected_port->type = type; | |
270 | list_add_tail(&inspected_port->list, &span_entry->bound_ports_list); | |
271 | ||
272 | return 0; | |
273 | ||
274 | err_inspected_port_alloc: | |
275 | if (bind) | |
276 | mlxsw_sp_span_inspected_port_bind(port, span_entry, type, | |
277 | false); | |
278 | err_port_bind: | |
279 | if (type == MLXSW_SP_SPAN_EGRESS) { | |
280 | mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); | |
281 | mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); | |
282 | } | |
283 | return err; | |
284 | } | |
285 | ||
286 | static void | |
287 | mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port *port, | |
288 | struct mlxsw_sp_span_entry *span_entry, | |
289 | enum mlxsw_sp_span_type type, | |
290 | bool bind) | |
291 | { | |
292 | struct mlxsw_sp_span_inspected_port *inspected_port; | |
293 | struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; | |
294 | char sbib_pl[MLXSW_REG_SBIB_LEN]; | |
295 | ||
296 | inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry); | |
297 | if (!inspected_port) | |
298 | return; | |
299 | ||
300 | if (bind) | |
301 | mlxsw_sp_span_inspected_port_bind(port, span_entry, type, | |
302 | false); | |
303 | /* remove the SBIB buffer if it was egress SPAN */ | |
304 | if (type == MLXSW_SP_SPAN_EGRESS) { | |
305 | mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); | |
306 | mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); | |
307 | } | |
308 | ||
309 | mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); | |
310 | ||
311 | list_del(&inspected_port->list); | |
312 | kfree(inspected_port); | |
313 | } | |
314 | ||
315 | int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from, | |
316 | struct mlxsw_sp_port *to, | |
317 | enum mlxsw_sp_span_type type, bool bind) | |
318 | { | |
319 | struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp; | |
320 | struct mlxsw_sp_span_entry *span_entry; | |
321 | int err; | |
322 | ||
323 | span_entry = mlxsw_sp_span_entry_get(to); | |
324 | if (!span_entry) | |
325 | return -ENOENT; | |
326 | ||
327 | netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n", | |
328 | span_entry->id); | |
329 | ||
330 | err = mlxsw_sp_span_inspected_port_add(from, span_entry, type, bind); | |
331 | if (err) | |
332 | goto err_port_bind; | |
333 | ||
334 | return 0; | |
335 | ||
336 | err_port_bind: | |
337 | mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); | |
338 | return err; | |
339 | } | |
340 | ||
341 | void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, u8 destination_port, | |
342 | enum mlxsw_sp_span_type type, bool bind) | |
343 | { | |
344 | struct mlxsw_sp_span_entry *span_entry; | |
345 | ||
346 | span_entry = mlxsw_sp_span_entry_find(from->mlxsw_sp, | |
347 | destination_port); | |
348 | if (!span_entry) { | |
349 | netdev_err(from->dev, "no span entry found\n"); | |
350 | return; | |
351 | } | |
352 | ||
353 | netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n", | |
354 | span_entry->id); | |
355 | mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind); | |
356 | } |