]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | /*- |
2 | * BSD LICENSE | |
3 | * | |
4 | * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. | |
5 | * All rights reserved. | |
6 | * | |
7 | * Redistribution and use in source and binary forms, with or without | |
8 | * modification, are permitted provided that the following conditions | |
9 | * are met: | |
10 | * | |
11 | * * Redistributions of source code must retain the above copyright | |
12 | * notice, this list of conditions and the following disclaimer. | |
13 | * * Redistributions in binary form must reproduce the above copyright | |
14 | * notice, this list of conditions and the following disclaimer in | |
15 | * the documentation and/or other materials provided with the | |
16 | * distribution. | |
17 | * * Neither the name of Intel Corporation nor the names of its | |
18 | * contributors may be used to endorse or promote products derived | |
19 | * from this software without specific prior written permission. | |
20 | * | |
21 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
22 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
23 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
24 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
25 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
26 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
27 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
28 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
29 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
31 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
32 | */ | |
33 | ||
34 | #include <stddef.h> | |
35 | #include <string.h> | |
36 | #include <stdbool.h> | |
37 | ||
38 | #include <rte_alarm.h> | |
39 | #include <rte_malloc.h> | |
40 | #include <rte_errno.h> | |
41 | #include <rte_cycles.h> | |
42 | #include <rte_compat.h> | |
43 | ||
44 | #include "rte_eth_bond_private.h" | |
45 | ||
46 | static void bond_mode_8023ad_ext_periodic_cb(void *arg); | |
47 | ||
48 | #ifdef RTE_LIBRTE_BOND_DEBUG_8023AD | |
49 | #define MODE4_DEBUG(fmt, ...) RTE_LOG(DEBUG, PMD, "%6u [Port %u: %s] " fmt, \ | |
50 | bond_dbg_get_time_diff_ms(), slave_id, \ | |
51 | __func__, ##__VA_ARGS__) | |
52 | ||
53 | static uint64_t start_time; | |
54 | ||
55 | static unsigned | |
56 | bond_dbg_get_time_diff_ms(void) | |
57 | { | |
58 | uint64_t now; | |
59 | ||
60 | now = rte_rdtsc(); | |
61 | if (start_time == 0) | |
62 | start_time = now; | |
63 | ||
64 | return ((now - start_time) * 1000) / rte_get_tsc_hz(); | |
65 | } | |
66 | ||
67 | static void | |
68 | bond_print_lacp(struct lacpdu *l) | |
69 | { | |
70 | char a_address[18]; | |
71 | char p_address[18]; | |
72 | char a_state[256] = { 0 }; | |
73 | char p_state[256] = { 0 }; | |
74 | ||
75 | static const char * const state_labels[] = { | |
76 | "ACT", "TIMEOUT", "AGG", "SYNC", "COL", "DIST", "DEF", "EXP" | |
77 | }; | |
78 | ||
79 | int a_len = 0; | |
80 | int p_len = 0; | |
81 | uint8_t i; | |
82 | uint8_t *addr; | |
83 | ||
84 | addr = l->actor.port_params.system.addr_bytes; | |
85 | snprintf(a_address, sizeof(a_address), "%02X:%02X:%02X:%02X:%02X:%02X", | |
86 | addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); | |
87 | ||
88 | addr = l->partner.port_params.system.addr_bytes; | |
89 | snprintf(p_address, sizeof(p_address), "%02X:%02X:%02X:%02X:%02X:%02X", | |
90 | addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); | |
91 | ||
92 | for (i = 0; i < 8; i++) { | |
93 | if ((l->actor.state >> i) & 1) { | |
94 | a_len += snprintf(&a_state[a_len], RTE_DIM(a_state) - a_len, "%s ", | |
95 | state_labels[i]); | |
96 | } | |
97 | ||
98 | if ((l->partner.state >> i) & 1) { | |
99 | p_len += snprintf(&p_state[p_len], RTE_DIM(p_state) - p_len, "%s ", | |
100 | state_labels[i]); | |
101 | } | |
102 | } | |
103 | ||
104 | if (a_len && a_state[a_len-1] == ' ') | |
105 | a_state[a_len-1] = '\0'; | |
106 | ||
107 | if (p_len && p_state[p_len-1] == ' ') | |
108 | p_state[p_len-1] = '\0'; | |
109 | ||
110 | RTE_LOG(DEBUG, PMD, "LACP: {\n"\ | |
111 | " subtype= %02X\n"\ | |
112 | " ver_num=%02X\n"\ | |
113 | " actor={ tlv=%02X, len=%02X\n"\ | |
114 | " pri=%04X, system=%s, key=%04X, p_pri=%04X p_num=%04X\n"\ | |
115 | " state={ %s }\n"\ | |
116 | " }\n"\ | |
117 | " partner={ tlv=%02X, len=%02X\n"\ | |
118 | " pri=%04X, system=%s, key=%04X, p_pri=%04X p_num=%04X\n"\ | |
119 | " state={ %s }\n"\ | |
120 | " }\n"\ | |
121 | " collector={info=%02X, length=%02X, max_delay=%04X\n, " \ | |
122 | "type_term=%02X, terminator_length = %02X}\n",\ | |
123 | l->subtype,\ | |
124 | l->version_number,\ | |
125 | l->actor.tlv_type_info,\ | |
126 | l->actor.info_length,\ | |
127 | l->actor.port_params.system_priority,\ | |
128 | a_address,\ | |
129 | l->actor.port_params.key,\ | |
130 | l->actor.port_params.port_priority,\ | |
131 | l->actor.port_params.port_number,\ | |
132 | a_state,\ | |
133 | l->partner.tlv_type_info,\ | |
134 | l->partner.info_length,\ | |
135 | l->partner.port_params.system_priority,\ | |
136 | p_address,\ | |
137 | l->partner.port_params.key,\ | |
138 | l->partner.port_params.port_priority,\ | |
139 | l->partner.port_params.port_number,\ | |
140 | p_state,\ | |
141 | l->tlv_type_collector_info,\ | |
142 | l->collector_info_length,\ | |
143 | l->collector_max_delay,\ | |
144 | l->tlv_type_terminator,\ | |
145 | l->terminator_length); | |
146 | ||
147 | } | |
148 | #define BOND_PRINT_LACP(lacpdu) bond_print_lacp(lacpdu) | |
149 | #else | |
150 | #define BOND_PRINT_LACP(lacpdu) do { } while (0) | |
151 | #define MODE4_DEBUG(fmt, ...) do { } while (0) | |
152 | #endif | |
153 | ||
154 | static const struct ether_addr lacp_mac_addr = { | |
155 | .addr_bytes = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x02 } | |
156 | }; | |
157 | ||
158 | struct port mode_8023ad_ports[RTE_MAX_ETHPORTS]; | |
159 | ||
160 | static void | |
161 | timer_cancel(uint64_t *timer) | |
162 | { | |
163 | *timer = 0; | |
164 | } | |
165 | ||
166 | static void | |
167 | timer_set(uint64_t *timer, uint64_t timeout) | |
168 | { | |
169 | *timer = rte_rdtsc() + timeout; | |
170 | } | |
171 | ||
172 | /* Forces given timer to be in expired state. */ | |
173 | static void | |
174 | timer_force_expired(uint64_t *timer) | |
175 | { | |
176 | *timer = rte_rdtsc(); | |
177 | } | |
178 | ||
179 | static bool | |
180 | timer_is_stopped(uint64_t *timer) | |
181 | { | |
182 | return *timer == 0; | |
183 | } | |
184 | ||
185 | static bool | |
186 | timer_is_expired(uint64_t *timer) | |
187 | { | |
188 | return *timer < rte_rdtsc(); | |
189 | } | |
190 | ||
191 | /* Timer is in running state if it is not stopped nor expired */ | |
192 | static bool | |
193 | timer_is_running(uint64_t *timer) | |
194 | { | |
195 | return !timer_is_stopped(timer) && !timer_is_expired(timer); | |
196 | } | |
197 | ||
198 | static void | |
199 | set_warning_flags(struct port *port, uint16_t flags) | |
200 | { | |
201 | int retval; | |
202 | uint16_t old; | |
203 | uint16_t new_flag = 0; | |
204 | ||
205 | do { | |
206 | old = port->warnings_to_show; | |
207 | new_flag = old | flags; | |
208 | retval = rte_atomic16_cmpset(&port->warnings_to_show, old, new_flag); | |
209 | } while (unlikely(retval == 0)); | |
210 | } | |
211 | ||
212 | static void | |
213 | show_warnings(uint8_t slave_id) | |
214 | { | |
215 | struct port *port = &mode_8023ad_ports[slave_id]; | |
216 | uint8_t warnings; | |
217 | ||
218 | do { | |
219 | warnings = port->warnings_to_show; | |
220 | } while (rte_atomic16_cmpset(&port->warnings_to_show, warnings, 0) == 0); | |
221 | ||
222 | if (!warnings) | |
223 | return; | |
224 | ||
225 | if (!timer_is_expired(&port->warning_timer)) | |
226 | return; | |
227 | ||
228 | ||
229 | timer_set(&port->warning_timer, BOND_8023AD_WARNINGS_PERIOD_MS * | |
230 | rte_get_tsc_hz() / 1000); | |
231 | ||
232 | if (warnings & WRN_RX_QUEUE_FULL) { | |
233 | RTE_LOG(DEBUG, PMD, | |
234 | "Slave %u: failed to enqueue LACP packet into RX ring.\n" | |
235 | "Receive and transmit functions must be invoked on bonded\n" | |
236 | "interface at least 10 times per second or LACP will not\n" | |
237 | "work correctly\n", slave_id); | |
238 | } | |
239 | ||
240 | if (warnings & WRN_TX_QUEUE_FULL) { | |
241 | RTE_LOG(DEBUG, PMD, | |
242 | "Slave %u: failed to enqueue LACP packet into TX ring.\n" | |
243 | "Receive and transmit functions must be invoked on bonded\n" | |
244 | "interface at least 10 times per second or LACP will not\n" | |
245 | "work correctly\n", slave_id); | |
246 | } | |
247 | ||
248 | if (warnings & WRN_RX_MARKER_TO_FAST) | |
249 | RTE_LOG(INFO, PMD, "Slave %u: marker to early - ignoring.\n", slave_id); | |
250 | ||
251 | if (warnings & WRN_UNKNOWN_SLOW_TYPE) { | |
252 | RTE_LOG(INFO, PMD, | |
253 | "Slave %u: ignoring unknown slow protocol frame type", slave_id); | |
254 | } | |
255 | ||
256 | if (warnings & WRN_UNKNOWN_MARKER_TYPE) | |
257 | RTE_LOG(INFO, PMD, "Slave %u: ignoring unknown marker type", slave_id); | |
258 | ||
259 | if (warnings & WRN_NOT_LACP_CAPABLE) | |
260 | MODE4_DEBUG("Port %u is not LACP capable!\n", slave_id); | |
261 | } | |
262 | ||
263 | static void | |
264 | record_default(struct port *port) | |
265 | { | |
266 | /* Record default parameters for partner. Partner admin parameters | |
267 | * are not implemented so set them to arbitrary default (last known) and | |
268 | * mark actor that parner is in defaulted state. */ | |
269 | port->partner_state = STATE_LACP_ACTIVE; | |
270 | ACTOR_STATE_SET(port, DEFAULTED); | |
271 | } | |
272 | ||
273 | /** Function handles rx state machine. | |
274 | * | |
275 | * This function implements Receive State Machine from point 5.4.12 in | |
276 | * 802.1AX documentation. It should be called periodically. | |
277 | * | |
278 | * @param lacpdu LACPDU received. | |
279 | * @param port Port on which LACPDU was received. | |
280 | */ | |
281 | static void | |
282 | rx_machine(struct bond_dev_private *internals, uint8_t slave_id, | |
283 | struct lacpdu *lacp) | |
284 | { | |
285 | struct port *agg, *port = &mode_8023ad_ports[slave_id]; | |
286 | uint64_t timeout; | |
287 | ||
288 | if (SM_FLAG(port, BEGIN)) { | |
289 | /* Initialize stuff */ | |
290 | MODE4_DEBUG("-> INITIALIZE\n"); | |
291 | SM_FLAG_CLR(port, MOVED); | |
292 | port->selected = UNSELECTED; | |
293 | ||
294 | record_default(port); | |
295 | ||
296 | ACTOR_STATE_CLR(port, EXPIRED); | |
297 | timer_cancel(&port->current_while_timer); | |
298 | ||
299 | /* DISABLED: On initialization partner is out of sync */ | |
300 | PARTNER_STATE_CLR(port, SYNCHRONIZATION); | |
301 | ||
302 | /* LACP DISABLED stuff if LACP not enabled on this port */ | |
303 | if (!SM_FLAG(port, LACP_ENABLED)) | |
304 | PARTNER_STATE_CLR(port, AGGREGATION); | |
305 | else | |
306 | PARTNER_STATE_SET(port, AGGREGATION); | |
307 | } | |
308 | ||
309 | if (!SM_FLAG(port, LACP_ENABLED)) { | |
310 | /* Update parameters only if state changed */ | |
311 | if (!timer_is_stopped(&port->current_while_timer)) { | |
312 | port->selected = UNSELECTED; | |
313 | record_default(port); | |
314 | PARTNER_STATE_CLR(port, AGGREGATION); | |
315 | ACTOR_STATE_CLR(port, EXPIRED); | |
316 | timer_cancel(&port->current_while_timer); | |
317 | } | |
318 | return; | |
319 | } | |
320 | ||
321 | if (lacp) { | |
322 | MODE4_DEBUG("LACP -> CURRENT\n"); | |
323 | BOND_PRINT_LACP(lacp); | |
324 | /* Update selected flag. If partner parameters are defaulted assume they | |
325 | * are match. If not defaulted compare LACP actor with ports parner | |
326 | * params. */ | |
327 | if (!ACTOR_STATE(port, DEFAULTED) && | |
328 | (ACTOR_STATE(port, AGGREGATION) != PARTNER_STATE(port, AGGREGATION) | |
329 | || memcmp(&port->partner, &lacp->actor.port_params, | |
330 | sizeof(port->partner)) != 0)) { | |
331 | MODE4_DEBUG("selected <- UNSELECTED\n"); | |
332 | port->selected = UNSELECTED; | |
333 | } | |
334 | ||
335 | /* Record this PDU actor params as partner params */ | |
336 | memcpy(&port->partner, &lacp->actor.port_params, | |
337 | sizeof(struct port_params)); | |
338 | port->partner_state = lacp->actor.state; | |
339 | ||
340 | /* Partner parameters are not defaulted any more */ | |
341 | ACTOR_STATE_CLR(port, DEFAULTED); | |
342 | ||
343 | /* If LACP partner params match this port actor params */ | |
344 | agg = &mode_8023ad_ports[port->aggregator_port_id]; | |
345 | bool match = port->actor.system_priority == | |
346 | lacp->partner.port_params.system_priority && | |
347 | is_same_ether_addr(&agg->actor.system, | |
348 | &lacp->partner.port_params.system) && | |
349 | port->actor.port_priority == | |
350 | lacp->partner.port_params.port_priority && | |
351 | port->actor.port_number == | |
352 | lacp->partner.port_params.port_number; | |
353 | ||
354 | /* Update NTT if partners information are outdated (xored and masked | |
355 | * bits are set)*/ | |
356 | uint8_t state_mask = STATE_LACP_ACTIVE | STATE_LACP_SHORT_TIMEOUT | | |
357 | STATE_SYNCHRONIZATION | STATE_AGGREGATION; | |
358 | ||
359 | if (((port->actor_state ^ lacp->partner.state) & state_mask) || | |
360 | match == false) { | |
361 | SM_FLAG_SET(port, NTT); | |
362 | } | |
363 | ||
364 | /* If LACP partner params match this port actor params */ | |
365 | if (match == true && ACTOR_STATE(port, AGGREGATION) == | |
366 | PARTNER_STATE(port, AGGREGATION)) | |
367 | PARTNER_STATE_SET(port, SYNCHRONIZATION); | |
368 | else if (!PARTNER_STATE(port, AGGREGATION) && ACTOR_STATE(port, | |
369 | AGGREGATION)) | |
370 | PARTNER_STATE_SET(port, SYNCHRONIZATION); | |
371 | else | |
372 | PARTNER_STATE_CLR(port, SYNCHRONIZATION); | |
373 | ||
374 | if (ACTOR_STATE(port, LACP_SHORT_TIMEOUT)) | |
375 | timeout = internals->mode4.short_timeout; | |
376 | else | |
377 | timeout = internals->mode4.long_timeout; | |
378 | ||
379 | timer_set(&port->current_while_timer, timeout); | |
380 | ACTOR_STATE_CLR(port, EXPIRED); | |
381 | return; /* No state change */ | |
382 | } | |
383 | ||
384 | /* If CURRENT state timer is not running (stopped or expired) | |
385 | * transit to EXPIRED state from DISABLED or CURRENT */ | |
386 | if (!timer_is_running(&port->current_while_timer)) { | |
387 | ACTOR_STATE_SET(port, EXPIRED); | |
388 | PARTNER_STATE_CLR(port, SYNCHRONIZATION); | |
389 | PARTNER_STATE_SET(port, LACP_SHORT_TIMEOUT); | |
390 | timer_set(&port->current_while_timer, internals->mode4.short_timeout); | |
391 | } | |
392 | } | |
393 | ||
394 | /** | |
395 | * Function handles periodic tx state machine. | |
396 | * | |
397 | * Function implements Periodic Transmission state machine from point 5.4.13 | |
398 | * in 802.1AX documentation. It should be called periodically. | |
399 | * | |
400 | * @param port Port to handle state machine. | |
401 | */ | |
402 | static void | |
403 | periodic_machine(struct bond_dev_private *internals, uint8_t slave_id) | |
404 | { | |
405 | struct port *port = &mode_8023ad_ports[slave_id]; | |
406 | /* Calculate if either site is LACP enabled */ | |
407 | uint64_t timeout; | |
408 | uint8_t active = ACTOR_STATE(port, LACP_ACTIVE) || | |
409 | PARTNER_STATE(port, LACP_ACTIVE); | |
410 | ||
411 | uint8_t is_partner_fast, was_partner_fast; | |
412 | /* No periodic is on BEGIN, LACP DISABLE or when both sides are pasive */ | |
413 | if (SM_FLAG(port, BEGIN) || !SM_FLAG(port, LACP_ENABLED) || !active) { | |
414 | timer_cancel(&port->periodic_timer); | |
415 | timer_force_expired(&port->tx_machine_timer); | |
416 | SM_FLAG_CLR(port, PARTNER_SHORT_TIMEOUT); | |
417 | ||
418 | MODE4_DEBUG("-> NO_PERIODIC ( %s%s%s)\n", | |
419 | SM_FLAG(port, BEGIN) ? "begind " : "", | |
420 | SM_FLAG(port, LACP_ENABLED) ? "" : "LACP disabled ", | |
421 | active ? "LACP active " : "LACP pasive "); | |
422 | return; | |
423 | } | |
424 | ||
425 | is_partner_fast = PARTNER_STATE(port, LACP_SHORT_TIMEOUT); | |
426 | was_partner_fast = SM_FLAG(port, PARTNER_SHORT_TIMEOUT); | |
427 | ||
428 | /* If periodic timer is not started, transit from NO PERIODIC to FAST/SLOW. | |
429 | * Other case: check if timer expire or partners settings changed. */ | |
430 | if (!timer_is_stopped(&port->periodic_timer)) { | |
431 | if (timer_is_expired(&port->periodic_timer)) { | |
432 | SM_FLAG_SET(port, NTT); | |
433 | } else if (is_partner_fast != was_partner_fast) { | |
434 | /* Partners timeout was slow and now it is fast -> send LACP. | |
435 | * In other case (was fast and now it is slow) just switch | |
436 | * timeout to slow without forcing send of LACP (because standard | |
437 | * say so)*/ | |
438 | if (!is_partner_fast) | |
439 | SM_FLAG_SET(port, NTT); | |
440 | } else | |
441 | return; /* Nothing changed */ | |
442 | } | |
443 | ||
444 | /* Handle state transition to FAST/SLOW LACP timeout */ | |
445 | if (is_partner_fast) { | |
446 | timeout = internals->mode4.fast_periodic_timeout; | |
447 | SM_FLAG_SET(port, PARTNER_SHORT_TIMEOUT); | |
448 | } else { | |
449 | timeout = internals->mode4.slow_periodic_timeout; | |
450 | SM_FLAG_CLR(port, PARTNER_SHORT_TIMEOUT); | |
451 | } | |
452 | ||
453 | timer_set(&port->periodic_timer, timeout); | |
454 | } | |
455 | ||
456 | /** | |
457 | * Function handles mux state machine. | |
458 | * | |
459 | * Function implements Mux Machine from point 5.4.15 in 802.1AX documentation. | |
460 | * It should be called periodically. | |
461 | * | |
462 | * @param port Port to handle state machine. | |
463 | */ | |
464 | static void | |
465 | mux_machine(struct bond_dev_private *internals, uint8_t slave_id) | |
466 | { | |
467 | struct port *port = &mode_8023ad_ports[slave_id]; | |
468 | ||
469 | /* Save current state for later use */ | |
470 | const uint8_t state_mask = STATE_SYNCHRONIZATION | STATE_DISTRIBUTING | | |
471 | STATE_COLLECTING; | |
472 | ||
473 | /* Enter DETACHED state on BEGIN condition or from any other state if | |
474 | * port was unselected */ | |
475 | if (SM_FLAG(port, BEGIN) || | |
476 | port->selected == UNSELECTED || (port->selected == STANDBY && | |
477 | (port->actor_state & state_mask) != 0)) { | |
478 | /* detach mux from aggregator */ | |
479 | port->actor_state &= ~state_mask; | |
480 | /* Set ntt to true if BEGIN condition or transition from any other state | |
481 | * which is indicated that wait_while_timer was started */ | |
482 | if (SM_FLAG(port, BEGIN) || | |
483 | !timer_is_stopped(&port->wait_while_timer)) { | |
484 | SM_FLAG_SET(port, NTT); | |
485 | MODE4_DEBUG("-> DETACHED\n"); | |
486 | } | |
487 | timer_cancel(&port->wait_while_timer); | |
488 | } | |
489 | ||
490 | if (timer_is_stopped(&port->wait_while_timer)) { | |
491 | if (port->selected == SELECTED || port->selected == STANDBY) { | |
492 | timer_set(&port->wait_while_timer, | |
493 | internals->mode4.aggregate_wait_timeout); | |
494 | ||
495 | MODE4_DEBUG("DETACHED -> WAITING\n"); | |
496 | } | |
497 | /* Waiting state entered */ | |
498 | return; | |
499 | } | |
500 | ||
501 | /* Transit next state if port is ready */ | |
502 | if (!timer_is_expired(&port->wait_while_timer)) | |
503 | return; | |
504 | ||
505 | if ((ACTOR_STATE(port, DISTRIBUTING) || ACTOR_STATE(port, COLLECTING)) && | |
506 | !PARTNER_STATE(port, SYNCHRONIZATION)) { | |
507 | /* If in COLLECTING or DISTRIBUTING state and partner becomes out of | |
508 | * sync transit to ATACHED state. */ | |
509 | ACTOR_STATE_CLR(port, DISTRIBUTING); | |
510 | ACTOR_STATE_CLR(port, COLLECTING); | |
511 | /* Clear actor sync to activate transit ATACHED in condition bellow */ | |
512 | ACTOR_STATE_CLR(port, SYNCHRONIZATION); | |
513 | MODE4_DEBUG("Out of sync -> ATTACHED\n"); | |
514 | } | |
515 | ||
516 | if (!ACTOR_STATE(port, SYNCHRONIZATION)) { | |
517 | /* attach mux to aggregator */ | |
518 | RTE_ASSERT((port->actor_state & (STATE_COLLECTING | | |
519 | STATE_DISTRIBUTING)) == 0); | |
520 | ||
521 | ACTOR_STATE_SET(port, SYNCHRONIZATION); | |
522 | SM_FLAG_SET(port, NTT); | |
523 | MODE4_DEBUG("ATTACHED Entered\n"); | |
524 | } else if (!ACTOR_STATE(port, COLLECTING)) { | |
525 | /* Start collecting if in sync */ | |
526 | if (PARTNER_STATE(port, SYNCHRONIZATION)) { | |
527 | MODE4_DEBUG("ATTACHED -> COLLECTING\n"); | |
528 | ACTOR_STATE_SET(port, COLLECTING); | |
529 | SM_FLAG_SET(port, NTT); | |
530 | } | |
531 | } else if (ACTOR_STATE(port, COLLECTING)) { | |
532 | /* Check if partner is in COLLECTING state. If so this port can | |
533 | * distribute frames to it */ | |
534 | if (!ACTOR_STATE(port, DISTRIBUTING)) { | |
535 | if (PARTNER_STATE(port, COLLECTING)) { | |
536 | /* Enable DISTRIBUTING if partner is collecting */ | |
537 | ACTOR_STATE_SET(port, DISTRIBUTING); | |
538 | SM_FLAG_SET(port, NTT); | |
539 | MODE4_DEBUG("COLLECTING -> DISTRIBUTING\n"); | |
540 | RTE_LOG(INFO, PMD, | |
541 | "Bond %u: slave id %u distributing started.\n", | |
542 | internals->port_id, slave_id); | |
543 | } | |
544 | } else { | |
545 | if (!PARTNER_STATE(port, COLLECTING)) { | |
546 | /* Disable DISTRIBUTING (enter COLLECTING state) if partner | |
547 | * is not collecting */ | |
548 | ACTOR_STATE_CLR(port, DISTRIBUTING); | |
549 | SM_FLAG_SET(port, NTT); | |
550 | MODE4_DEBUG("DISTRIBUTING -> COLLECTING\n"); | |
551 | RTE_LOG(INFO, PMD, | |
552 | "Bond %u: slave id %u distributing stopped.\n", | |
553 | internals->port_id, slave_id); | |
554 | } | |
555 | } | |
556 | } | |
557 | } | |
558 | ||
559 | /** | |
560 | * Function handles transmit state machine. | |
561 | * | |
562 | * Function implements Transmit Machine from point 5.4.16 in 802.1AX | |
563 | * documentation. | |
564 | * | |
565 | * @param port | |
566 | */ | |
567 | static void | |
568 | tx_machine(struct bond_dev_private *internals, uint8_t slave_id) | |
569 | { | |
570 | struct port *agg, *port = &mode_8023ad_ports[slave_id]; | |
571 | ||
572 | struct rte_mbuf *lacp_pkt = NULL; | |
573 | struct lacpdu_header *hdr; | |
574 | struct lacpdu *lacpdu; | |
575 | ||
576 | /* If periodic timer is not running periodic machine is in NO PERIODIC and | |
577 | * according to 802.3ax standard tx machine should not transmit any frames | |
578 | * and set ntt to false. */ | |
579 | if (timer_is_stopped(&port->periodic_timer)) | |
580 | SM_FLAG_CLR(port, NTT); | |
581 | ||
582 | if (!SM_FLAG(port, NTT)) | |
583 | return; | |
584 | ||
585 | if (!timer_is_expired(&port->tx_machine_timer)) | |
586 | return; | |
587 | ||
588 | lacp_pkt = rte_pktmbuf_alloc(port->mbuf_pool); | |
589 | if (lacp_pkt == NULL) { | |
590 | RTE_LOG(ERR, PMD, "Failed to allocate LACP packet from pool\n"); | |
591 | return; | |
592 | } | |
593 | ||
594 | lacp_pkt->data_len = sizeof(*hdr); | |
595 | lacp_pkt->pkt_len = sizeof(*hdr); | |
596 | ||
597 | hdr = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *); | |
598 | ||
599 | /* Source and destination MAC */ | |
600 | ether_addr_copy(&lacp_mac_addr, &hdr->eth_hdr.d_addr); | |
601 | rte_eth_macaddr_get(slave_id, &hdr->eth_hdr.s_addr); | |
602 | hdr->eth_hdr.ether_type = rte_cpu_to_be_16(ETHER_TYPE_SLOW); | |
603 | ||
604 | lacpdu = &hdr->lacpdu; | |
605 | memset(lacpdu, 0, sizeof(*lacpdu)); | |
606 | ||
607 | /* Initialize LACP part */ | |
608 | lacpdu->subtype = SLOW_SUBTYPE_LACP; | |
609 | lacpdu->version_number = 1; | |
610 | ||
611 | /* ACTOR */ | |
612 | lacpdu->actor.tlv_type_info = TLV_TYPE_ACTOR_INFORMATION; | |
613 | lacpdu->actor.info_length = sizeof(struct lacpdu_actor_partner_params); | |
614 | memcpy(&hdr->lacpdu.actor.port_params, &port->actor, | |
615 | sizeof(port->actor)); | |
616 | agg = &mode_8023ad_ports[port->aggregator_port_id]; | |
617 | ether_addr_copy(&agg->actor.system, &hdr->lacpdu.actor.port_params.system); | |
618 | lacpdu->actor.state = port->actor_state; | |
619 | ||
620 | /* PARTNER */ | |
621 | lacpdu->partner.tlv_type_info = TLV_TYPE_PARTNER_INFORMATION; | |
622 | lacpdu->partner.info_length = sizeof(struct lacpdu_actor_partner_params); | |
623 | memcpy(&lacpdu->partner.port_params, &port->partner, | |
624 | sizeof(struct port_params)); | |
625 | lacpdu->partner.state = port->partner_state; | |
626 | ||
627 | /* Other fields */ | |
628 | lacpdu->tlv_type_collector_info = TLV_TYPE_COLLECTOR_INFORMATION; | |
629 | lacpdu->collector_info_length = 0x10; | |
630 | lacpdu->collector_max_delay = 0; | |
631 | ||
632 | lacpdu->tlv_type_terminator = TLV_TYPE_TERMINATOR_INFORMATION; | |
633 | lacpdu->terminator_length = 0; | |
634 | ||
635 | if (rte_ring_enqueue(port->tx_ring, lacp_pkt) == -ENOBUFS) { | |
636 | /* If TX ring full, drop packet and free message. Retransmission | |
637 | * will happen in next function call. */ | |
638 | rte_pktmbuf_free(lacp_pkt); | |
639 | set_warning_flags(port, WRN_TX_QUEUE_FULL); | |
640 | return; | |
641 | } | |
642 | ||
643 | MODE4_DEBUG("sending LACP frame\n"); | |
644 | BOND_PRINT_LACP(lacpdu); | |
645 | ||
646 | timer_set(&port->tx_machine_timer, internals->mode4.tx_period_timeout); | |
647 | SM_FLAG_CLR(port, NTT); | |
648 | } | |
649 | ||
650 | /** | |
651 | * Function assigns port to aggregator. | |
652 | * | |
653 | * @param bond_dev_private Pointer to bond_dev_private structure. | |
654 | * @param port_pos Port to assign. | |
655 | */ | |
656 | static void | |
657 | selection_logic(struct bond_dev_private *internals, uint8_t slave_id) | |
658 | { | |
659 | struct port *agg, *port; | |
660 | uint8_t slaves_count, new_agg_id, i; | |
661 | uint8_t *slaves; | |
662 | ||
663 | slaves = internals->active_slaves; | |
664 | slaves_count = internals->active_slave_count; | |
665 | port = &mode_8023ad_ports[slave_id]; | |
666 | ||
667 | /* Search for aggregator suitable for this port */ | |
668 | for (i = 0; i < slaves_count; ++i) { | |
669 | agg = &mode_8023ad_ports[slaves[i]]; | |
670 | /* Skip ports that are not aggreagators */ | |
671 | if (agg->aggregator_port_id != slaves[i]) | |
672 | continue; | |
673 | ||
674 | /* Actors system ID is not checked since all slave device have the same | |
675 | * ID (MAC address). */ | |
676 | if ((agg->actor.key == port->actor.key && | |
677 | agg->partner.system_priority == port->partner.system_priority && | |
678 | is_same_ether_addr(&agg->partner.system, &port->partner.system) == 1 | |
679 | && (agg->partner.key == port->partner.key)) && | |
680 | is_zero_ether_addr(&port->partner.system) != 1 && | |
681 | (agg->actor.key & | |
682 | rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)) != 0) { | |
683 | ||
684 | break; | |
685 | } | |
686 | } | |
687 | ||
688 | /* By default, port uses it self as agregator */ | |
689 | if (i == slaves_count) | |
690 | new_agg_id = slave_id; | |
691 | else | |
692 | new_agg_id = slaves[i]; | |
693 | ||
694 | if (new_agg_id != port->aggregator_port_id) { | |
695 | port->aggregator_port_id = new_agg_id; | |
696 | ||
697 | MODE4_DEBUG("-> SELECTED: ID=%3u\n" | |
698 | "\t%s aggregator ID=%3u\n", | |
699 | port->aggregator_port_id, | |
700 | port->aggregator_port_id == slave_id ? | |
701 | "aggregator not found, using default" : "aggregator found", | |
702 | port->aggregator_port_id); | |
703 | } | |
704 | ||
705 | port->selected = SELECTED; | |
706 | } | |
707 | ||
708 | /* Function maps DPDK speed to bonding speed stored in key field */ | |
709 | static uint16_t | |
710 | link_speed_key(uint16_t speed) { | |
711 | uint16_t key_speed; | |
712 | ||
713 | switch (speed) { | |
714 | case ETH_SPEED_NUM_NONE: | |
715 | key_speed = 0x00; | |
716 | break; | |
717 | case ETH_SPEED_NUM_10M: | |
718 | key_speed = BOND_LINK_SPEED_KEY_10M; | |
719 | break; | |
720 | case ETH_SPEED_NUM_100M: | |
721 | key_speed = BOND_LINK_SPEED_KEY_100M; | |
722 | break; | |
723 | case ETH_SPEED_NUM_1G: | |
724 | key_speed = BOND_LINK_SPEED_KEY_1000M; | |
725 | break; | |
726 | case ETH_SPEED_NUM_10G: | |
727 | key_speed = BOND_LINK_SPEED_KEY_10G; | |
728 | break; | |
729 | case ETH_SPEED_NUM_20G: | |
730 | key_speed = BOND_LINK_SPEED_KEY_20G; | |
731 | break; | |
732 | case ETH_SPEED_NUM_40G: | |
733 | key_speed = BOND_LINK_SPEED_KEY_40G; | |
734 | break; | |
735 | default: | |
736 | /* Unknown speed*/ | |
737 | key_speed = 0xFFFF; | |
738 | } | |
739 | ||
740 | return key_speed; | |
741 | } | |
742 | ||
743 | static void | |
744 | bond_mode_8023ad_periodic_cb(void *arg) | |
745 | { | |
746 | struct rte_eth_dev *bond_dev = arg; | |
747 | struct bond_dev_private *internals = bond_dev->data->dev_private; | |
748 | struct port *port; | |
749 | struct rte_eth_link link_info; | |
750 | struct ether_addr slave_addr; | |
751 | ||
752 | void *pkt = NULL; | |
753 | uint8_t i, slave_id; | |
754 | ||
755 | ||
756 | /* Update link status on each port */ | |
757 | for (i = 0; i < internals->active_slave_count; i++) { | |
758 | uint16_t key; | |
759 | ||
760 | slave_id = internals->active_slaves[i]; | |
761 | rte_eth_link_get(slave_id, &link_info); | |
762 | rte_eth_macaddr_get(slave_id, &slave_addr); | |
763 | ||
764 | if (link_info.link_status != 0) { | |
765 | key = link_speed_key(link_info.link_speed) << 1; | |
766 | if (link_info.link_duplex == ETH_LINK_FULL_DUPLEX) | |
767 | key |= BOND_LINK_FULL_DUPLEX_KEY; | |
768 | } else | |
769 | key = 0; | |
770 | ||
771 | port = &mode_8023ad_ports[slave_id]; | |
772 | ||
773 | key = rte_cpu_to_be_16(key); | |
774 | if (key != port->actor.key) { | |
775 | if (!(key & rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY))) | |
776 | set_warning_flags(port, WRN_NOT_LACP_CAPABLE); | |
777 | ||
778 | port->actor.key = key; | |
779 | SM_FLAG_SET(port, NTT); | |
780 | } | |
781 | ||
782 | if (!is_same_ether_addr(&port->actor.system, &slave_addr)) { | |
783 | ether_addr_copy(&slave_addr, &port->actor.system); | |
784 | if (port->aggregator_port_id == slave_id) | |
785 | SM_FLAG_SET(port, NTT); | |
786 | } | |
787 | } | |
788 | ||
789 | for (i = 0; i < internals->active_slave_count; i++) { | |
790 | slave_id = internals->active_slaves[i]; | |
791 | port = &mode_8023ad_ports[slave_id]; | |
792 | ||
793 | if ((port->actor.key & | |
794 | rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)) == 0) { | |
795 | ||
796 | SM_FLAG_SET(port, BEGIN); | |
797 | ||
798 | /* LACP is disabled on half duples or link is down */ | |
799 | if (SM_FLAG(port, LACP_ENABLED)) { | |
800 | /* If port was enabled set it to BEGIN state */ | |
801 | SM_FLAG_CLR(port, LACP_ENABLED); | |
802 | ACTOR_STATE_CLR(port, DISTRIBUTING); | |
803 | ACTOR_STATE_CLR(port, COLLECTING); | |
804 | } | |
805 | ||
806 | /* Skip this port processing */ | |
807 | continue; | |
808 | } | |
809 | ||
810 | SM_FLAG_SET(port, LACP_ENABLED); | |
811 | ||
812 | /* Find LACP packet to this port. Do not check subtype, it is done in | |
813 | * function that queued packet */ | |
814 | if (rte_ring_dequeue(port->rx_ring, &pkt) == 0) { | |
815 | struct rte_mbuf *lacp_pkt = pkt; | |
816 | struct lacpdu_header *lacp; | |
817 | ||
818 | lacp = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *); | |
819 | RTE_ASSERT(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP); | |
820 | ||
821 | /* This is LACP frame so pass it to rx_machine */ | |
822 | rx_machine(internals, slave_id, &lacp->lacpdu); | |
823 | rte_pktmbuf_free(lacp_pkt); | |
824 | } else | |
825 | rx_machine(internals, slave_id, NULL); | |
826 | ||
827 | periodic_machine(internals, slave_id); | |
828 | mux_machine(internals, slave_id); | |
829 | tx_machine(internals, slave_id); | |
830 | selection_logic(internals, slave_id); | |
831 | ||
832 | SM_FLAG_CLR(port, BEGIN); | |
833 | show_warnings(slave_id); | |
834 | } | |
835 | ||
836 | rte_eal_alarm_set(internals->mode4.update_timeout_us, | |
837 | bond_mode_8023ad_periodic_cb, arg); | |
838 | } | |
839 | ||
840 | void | |
841 | bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, uint8_t slave_id) | |
842 | { | |
843 | struct bond_dev_private *internals = bond_dev->data->dev_private; | |
844 | ||
845 | struct port *port = &mode_8023ad_ports[slave_id]; | |
846 | struct port_params initial = { | |
847 | .system = { { 0 } }, | |
848 | .system_priority = rte_cpu_to_be_16(0xFFFF), | |
849 | .key = rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY), | |
850 | .port_priority = rte_cpu_to_be_16(0x00FF), | |
851 | .port_number = 0, | |
852 | }; | |
853 | ||
854 | char mem_name[RTE_ETH_NAME_MAX_LEN]; | |
855 | int socket_id; | |
856 | unsigned element_size; | |
857 | uint32_t total_tx_desc; | |
858 | struct bond_tx_queue *bd_tx_q; | |
859 | uint16_t q_id; | |
860 | ||
861 | /* Given slave mus not be in active list */ | |
862 | RTE_ASSERT(find_slave_by_id(internals->active_slaves, | |
863 | internals->active_slave_count, slave_id) == internals->active_slave_count); | |
864 | RTE_SET_USED(internals); /* used only for assert when enabled */ | |
865 | ||
866 | memcpy(&port->actor, &initial, sizeof(struct port_params)); | |
867 | /* Standard requires that port ID must be grater than 0. | |
868 | * Add 1 do get corresponding port_number */ | |
869 | port->actor.port_number = rte_cpu_to_be_16((uint16_t)slave_id + 1); | |
870 | ||
871 | memcpy(&port->partner, &initial, sizeof(struct port_params)); | |
872 | ||
873 | /* default states */ | |
874 | port->actor_state = STATE_AGGREGATION | STATE_LACP_ACTIVE | STATE_DEFAULTED; | |
875 | port->partner_state = STATE_LACP_ACTIVE; | |
876 | port->sm_flags = SM_FLAGS_BEGIN; | |
877 | ||
878 | /* use this port as agregator */ | |
879 | port->aggregator_port_id = slave_id; | |
880 | rte_eth_promiscuous_enable(slave_id); | |
881 | ||
882 | timer_cancel(&port->warning_timer); | |
883 | ||
884 | if (port->mbuf_pool != NULL) | |
885 | return; | |
886 | ||
887 | RTE_ASSERT(port->rx_ring == NULL); | |
888 | RTE_ASSERT(port->tx_ring == NULL); | |
889 | socket_id = rte_eth_devices[slave_id].data->numa_node; | |
890 | ||
891 | element_size = sizeof(struct slow_protocol_frame) + sizeof(struct rte_mbuf) | |
892 | + RTE_PKTMBUF_HEADROOM; | |
893 | ||
894 | /* The size of the mempool should be at least: | |
895 | * the sum of the TX descriptors + BOND_MODE_8023AX_SLAVE_TX_PKTS */ | |
896 | total_tx_desc = BOND_MODE_8023AX_SLAVE_TX_PKTS; | |
897 | for (q_id = 0; q_id < bond_dev->data->nb_tx_queues; q_id++) { | |
898 | bd_tx_q = (struct bond_tx_queue*)bond_dev->data->tx_queues[q_id]; | |
899 | total_tx_desc += bd_tx_q->nb_tx_desc; | |
900 | } | |
901 | ||
902 | snprintf(mem_name, RTE_DIM(mem_name), "slave_port%u_pool", slave_id); | |
903 | port->mbuf_pool = rte_mempool_create(mem_name, | |
904 | total_tx_desc, element_size, | |
905 | RTE_MEMPOOL_CACHE_MAX_SIZE >= 32 ? 32 : RTE_MEMPOOL_CACHE_MAX_SIZE, | |
906 | sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, | |
907 | NULL, rte_pktmbuf_init, NULL, socket_id, MEMPOOL_F_NO_SPREAD); | |
908 | ||
909 | /* Any memory allocation failure in initalization is critical because | |
910 | * resources can't be free, so reinitialization is impossible. */ | |
911 | if (port->mbuf_pool == NULL) { | |
912 | rte_panic("Slave %u: Failed to create memory pool '%s': %s\n", | |
913 | slave_id, mem_name, rte_strerror(rte_errno)); | |
914 | } | |
915 | ||
916 | snprintf(mem_name, RTE_DIM(mem_name), "slave_%u_rx", slave_id); | |
917 | port->rx_ring = rte_ring_create(mem_name, | |
918 | rte_align32pow2(BOND_MODE_8023AX_SLAVE_RX_PKTS), socket_id, 0); | |
919 | ||
920 | if (port->rx_ring == NULL) { | |
921 | rte_panic("Slave %u: Failed to create rx ring '%s': %s\n", slave_id, | |
922 | mem_name, rte_strerror(rte_errno)); | |
923 | } | |
924 | ||
925 | /* TX ring is at least one pkt longer to make room for marker packet. */ | |
926 | snprintf(mem_name, RTE_DIM(mem_name), "slave_%u_tx", slave_id); | |
927 | port->tx_ring = rte_ring_create(mem_name, | |
928 | rte_align32pow2(BOND_MODE_8023AX_SLAVE_TX_PKTS + 1), socket_id, 0); | |
929 | ||
930 | if (port->tx_ring == NULL) { | |
931 | rte_panic("Slave %u: Failed to create tx ring '%s': %s\n", slave_id, | |
932 | mem_name, rte_strerror(rte_errno)); | |
933 | } | |
934 | } | |
935 | ||
936 | int | |
937 | bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *bond_dev, | |
938 | uint8_t slave_id) | |
939 | { | |
940 | struct bond_dev_private *internals = bond_dev->data->dev_private; | |
941 | void *pkt = NULL; | |
942 | struct port *port; | |
943 | uint8_t i; | |
944 | ||
945 | /* Given slave must be in active list */ | |
946 | RTE_ASSERT(find_slave_by_id(internals->active_slaves, | |
947 | internals->active_slave_count, slave_id) < internals->active_slave_count); | |
948 | ||
949 | /* Exclude slave from transmit policy. If this slave is an aggregator | |
950 | * make all aggregated slaves unselected to force selection logic | |
951 | * to select suitable aggregator for this port. */ | |
952 | for (i = 0; i < internals->active_slave_count; i++) { | |
953 | port = &mode_8023ad_ports[internals->active_slaves[i]]; | |
954 | if (port->aggregator_port_id != slave_id) | |
955 | continue; | |
956 | ||
957 | port->selected = UNSELECTED; | |
958 | ||
959 | /* Use default aggregator */ | |
960 | port->aggregator_port_id = internals->active_slaves[i]; | |
961 | } | |
962 | ||
963 | port = &mode_8023ad_ports[slave_id]; | |
964 | port->selected = UNSELECTED; | |
965 | port->actor_state &= ~(STATE_SYNCHRONIZATION | STATE_DISTRIBUTING | | |
966 | STATE_COLLECTING); | |
967 | ||
968 | while (rte_ring_dequeue(port->rx_ring, &pkt) == 0) | |
969 | rte_pktmbuf_free((struct rte_mbuf *)pkt); | |
970 | ||
971 | while (rte_ring_dequeue(port->tx_ring, &pkt) == 0) | |
972 | rte_pktmbuf_free((struct rte_mbuf *)pkt); | |
973 | return 0; | |
974 | } | |
975 | ||
976 | void | |
977 | bond_mode_8023ad_mac_address_update(struct rte_eth_dev *bond_dev) | |
978 | { | |
979 | struct bond_dev_private *internals = bond_dev->data->dev_private; | |
980 | struct ether_addr slave_addr; | |
981 | struct port *slave, *agg_slave; | |
982 | uint8_t slave_id, i, j; | |
983 | ||
984 | bond_mode_8023ad_stop(bond_dev); | |
985 | ||
986 | for (i = 0; i < internals->active_slave_count; i++) { | |
987 | slave_id = internals->active_slaves[i]; | |
988 | slave = &mode_8023ad_ports[slave_id]; | |
989 | rte_eth_macaddr_get(slave_id, &slave_addr); | |
990 | ||
991 | if (is_same_ether_addr(&slave_addr, &slave->actor.system)) | |
992 | continue; | |
993 | ||
994 | ether_addr_copy(&slave_addr, &slave->actor.system); | |
995 | /* Do nothing if this port is not an aggregator. In other case | |
996 | * Set NTT flag on every port that use this aggregator. */ | |
997 | if (slave->aggregator_port_id != slave_id) | |
998 | continue; | |
999 | ||
1000 | for (j = 0; j < internals->active_slave_count; j++) { | |
1001 | agg_slave = &mode_8023ad_ports[internals->active_slaves[j]]; | |
1002 | if (agg_slave->aggregator_port_id == slave_id) | |
1003 | SM_FLAG_SET(agg_slave, NTT); | |
1004 | } | |
1005 | } | |
1006 | ||
1007 | if (bond_dev->data->dev_started) | |
1008 | bond_mode_8023ad_start(bond_dev); | |
1009 | } | |
1010 | ||
1011 | static void | |
1012 | bond_mode_8023ad_conf_get(struct rte_eth_dev *dev, | |
1013 | struct rte_eth_bond_8023ad_conf *conf) | |
1014 | { | |
1015 | struct bond_dev_private *internals = dev->data->dev_private; | |
1016 | struct mode8023ad_private *mode4 = &internals->mode4; | |
1017 | uint64_t ms_ticks = rte_get_tsc_hz() / 1000; | |
1018 | ||
1019 | conf->fast_periodic_ms = mode4->fast_periodic_timeout / ms_ticks; | |
1020 | conf->slow_periodic_ms = mode4->slow_periodic_timeout / ms_ticks; | |
1021 | conf->short_timeout_ms = mode4->short_timeout / ms_ticks; | |
1022 | conf->long_timeout_ms = mode4->long_timeout / ms_ticks; | |
1023 | conf->aggregate_wait_timeout_ms = mode4->aggregate_wait_timeout / ms_ticks; | |
1024 | conf->tx_period_ms = mode4->tx_period_timeout / ms_ticks; | |
1025 | conf->update_timeout_ms = mode4->update_timeout_us / 1000; | |
1026 | conf->rx_marker_period_ms = mode4->rx_marker_timeout / ms_ticks; | |
1027 | } | |
1028 | ||
1029 | static void | |
1030 | bond_mode_8023ad_conf_get_v1607(struct rte_eth_dev *dev, | |
1031 | struct rte_eth_bond_8023ad_conf *conf) | |
1032 | { | |
1033 | struct bond_dev_private *internals = dev->data->dev_private; | |
1034 | struct mode8023ad_private *mode4 = &internals->mode4; | |
1035 | ||
1036 | bond_mode_8023ad_conf_get(dev, conf); | |
1037 | conf->slowrx_cb = mode4->slowrx_cb; | |
1038 | } | |
1039 | ||
1040 | static void | |
1041 | bond_mode_8023ad_conf_get_default(struct rte_eth_bond_8023ad_conf *conf) | |
1042 | { | |
1043 | conf->fast_periodic_ms = BOND_8023AD_FAST_PERIODIC_MS; | |
1044 | conf->slow_periodic_ms = BOND_8023AD_SLOW_PERIODIC_MS; | |
1045 | conf->short_timeout_ms = BOND_8023AD_SHORT_TIMEOUT_MS; | |
1046 | conf->long_timeout_ms = BOND_8023AD_LONG_TIMEOUT_MS; | |
1047 | conf->aggregate_wait_timeout_ms = BOND_8023AD_AGGREGATE_WAIT_TIMEOUT_MS; | |
1048 | conf->tx_period_ms = BOND_8023AD_TX_MACHINE_PERIOD_MS; | |
1049 | conf->rx_marker_period_ms = BOND_8023AD_RX_MARKER_PERIOD_MS; | |
1050 | conf->update_timeout_ms = BOND_MODE_8023AX_UPDATE_TIMEOUT_MS; | |
1051 | conf->slowrx_cb = NULL; | |
1052 | } | |
1053 | ||
1054 | static void | |
1055 | bond_mode_8023ad_conf_assign(struct mode8023ad_private *mode4, | |
1056 | struct rte_eth_bond_8023ad_conf *conf) | |
1057 | { | |
1058 | uint64_t ms_ticks = rte_get_tsc_hz() / 1000; | |
1059 | ||
1060 | mode4->fast_periodic_timeout = conf->fast_periodic_ms * ms_ticks; | |
1061 | mode4->slow_periodic_timeout = conf->slow_periodic_ms * ms_ticks; | |
1062 | mode4->short_timeout = conf->short_timeout_ms * ms_ticks; | |
1063 | mode4->long_timeout = conf->long_timeout_ms * ms_ticks; | |
1064 | mode4->aggregate_wait_timeout = conf->aggregate_wait_timeout_ms * ms_ticks; | |
1065 | mode4->tx_period_timeout = conf->tx_period_ms * ms_ticks; | |
1066 | mode4->rx_marker_timeout = conf->rx_marker_period_ms * ms_ticks; | |
1067 | mode4->update_timeout_us = conf->update_timeout_ms * 1000; | |
1068 | } | |
1069 | ||
1070 | static void | |
1071 | bond_mode_8023ad_setup_v20(struct rte_eth_dev *dev, | |
1072 | struct rte_eth_bond_8023ad_conf *conf) | |
1073 | { | |
1074 | struct rte_eth_bond_8023ad_conf def_conf; | |
1075 | struct bond_dev_private *internals = dev->data->dev_private; | |
1076 | struct mode8023ad_private *mode4 = &internals->mode4; | |
1077 | ||
1078 | if (conf == NULL) { | |
1079 | conf = &def_conf; | |
1080 | bond_mode_8023ad_conf_get_default(conf); | |
1081 | } | |
1082 | ||
1083 | bond_mode_8023ad_stop(dev); | |
1084 | bond_mode_8023ad_conf_assign(mode4, conf); | |
1085 | ||
1086 | if (dev->data->dev_started) | |
1087 | bond_mode_8023ad_start(dev); | |
1088 | } | |
1089 | ||
1090 | ||
1091 | void | |
1092 | bond_mode_8023ad_setup(struct rte_eth_dev *dev, | |
1093 | struct rte_eth_bond_8023ad_conf *conf) | |
1094 | { | |
1095 | struct rte_eth_bond_8023ad_conf def_conf; | |
1096 | struct bond_dev_private *internals = dev->data->dev_private; | |
1097 | struct mode8023ad_private *mode4 = &internals->mode4; | |
1098 | ||
1099 | if (conf == NULL) { | |
1100 | conf = &def_conf; | |
1101 | bond_mode_8023ad_conf_get_default(conf); | |
1102 | } | |
1103 | ||
1104 | bond_mode_8023ad_stop(dev); | |
1105 | bond_mode_8023ad_conf_assign(mode4, conf); | |
1106 | mode4->slowrx_cb = conf->slowrx_cb; | |
1107 | ||
1108 | if (dev->data->dev_started) | |
1109 | bond_mode_8023ad_start(dev); | |
1110 | } | |
1111 | ||
1112 | int | |
1113 | bond_mode_8023ad_enable(struct rte_eth_dev *bond_dev) | |
1114 | { | |
1115 | struct bond_dev_private *internals = bond_dev->data->dev_private; | |
1116 | uint8_t i; | |
1117 | ||
1118 | for (i = 0; i < internals->active_slave_count; i++) | |
1119 | bond_mode_8023ad_activate_slave(bond_dev, i); | |
1120 | ||
1121 | return 0; | |
1122 | } | |
1123 | ||
1124 | int | |
1125 | bond_mode_8023ad_start(struct rte_eth_dev *bond_dev) | |
1126 | { | |
1127 | struct bond_dev_private *internals = bond_dev->data->dev_private; | |
1128 | struct mode8023ad_private *mode4 = &internals->mode4; | |
1129 | static const uint64_t us = BOND_MODE_8023AX_UPDATE_TIMEOUT_MS * 1000; | |
1130 | ||
1131 | if (mode4->slowrx_cb) | |
1132 | return rte_eal_alarm_set(us, &bond_mode_8023ad_ext_periodic_cb, | |
1133 | bond_dev); | |
1134 | ||
1135 | return rte_eal_alarm_set(us, &bond_mode_8023ad_periodic_cb, bond_dev); | |
1136 | } | |
1137 | ||
1138 | void | |
1139 | bond_mode_8023ad_stop(struct rte_eth_dev *bond_dev) | |
1140 | { | |
1141 | struct bond_dev_private *internals = bond_dev->data->dev_private; | |
1142 | struct mode8023ad_private *mode4 = &internals->mode4; | |
1143 | ||
1144 | if (mode4->slowrx_cb) { | |
1145 | rte_eal_alarm_cancel(&bond_mode_8023ad_ext_periodic_cb, | |
1146 | bond_dev); | |
1147 | return; | |
1148 | } | |
1149 | rte_eal_alarm_cancel(&bond_mode_8023ad_periodic_cb, bond_dev); | |
1150 | } | |
1151 | ||
1152 | void | |
1153 | bond_mode_8023ad_handle_slow_pkt(struct bond_dev_private *internals, | |
1154 | uint8_t slave_id, struct rte_mbuf *pkt) | |
1155 | { | |
1156 | struct mode8023ad_private *mode4 = &internals->mode4; | |
1157 | struct port *port = &mode_8023ad_ports[slave_id]; | |
1158 | struct marker_header *m_hdr; | |
1159 | uint64_t marker_timer, old_marker_timer; | |
1160 | int retval; | |
1161 | uint8_t wrn, subtype; | |
1162 | /* If packet is a marker, we send response now by reusing given packet | |
1163 | * and update only source MAC, destination MAC is multicast so don't | |
1164 | * update it. Other frames will be handled later by state machines */ | |
1165 | subtype = rte_pktmbuf_mtod(pkt, | |
1166 | struct slow_protocol_frame *)->slow_protocol.subtype; | |
1167 | ||
1168 | if (subtype == SLOW_SUBTYPE_MARKER) { | |
1169 | m_hdr = rte_pktmbuf_mtod(pkt, struct marker_header *); | |
1170 | ||
1171 | if (likely(m_hdr->marker.tlv_type_marker != MARKER_TLV_TYPE_INFO)) { | |
1172 | wrn = WRN_UNKNOWN_MARKER_TYPE; | |
1173 | goto free_out; | |
1174 | } | |
1175 | ||
1176 | /* Setup marker timer. Do it in loop in case concurrent access. */ | |
1177 | do { | |
1178 | old_marker_timer = port->rx_marker_timer; | |
1179 | if (!timer_is_expired(&old_marker_timer)) { | |
1180 | wrn = WRN_RX_MARKER_TO_FAST; | |
1181 | goto free_out; | |
1182 | } | |
1183 | ||
1184 | timer_set(&marker_timer, mode4->rx_marker_timeout); | |
1185 | retval = rte_atomic64_cmpset(&port->rx_marker_timer, | |
1186 | old_marker_timer, marker_timer); | |
1187 | } while (unlikely(retval == 0)); | |
1188 | ||
1189 | m_hdr->marker.tlv_type_marker = MARKER_TLV_TYPE_RESP; | |
1190 | rte_eth_macaddr_get(slave_id, &m_hdr->eth_hdr.s_addr); | |
1191 | ||
1192 | if (unlikely(rte_ring_enqueue(port->tx_ring, pkt) == -ENOBUFS)) { | |
1193 | /* reset timer */ | |
1194 | port->rx_marker_timer = 0; | |
1195 | wrn = WRN_TX_QUEUE_FULL; | |
1196 | goto free_out; | |
1197 | } | |
1198 | } else if (likely(subtype == SLOW_SUBTYPE_LACP)) { | |
1199 | if (unlikely(rte_ring_enqueue(port->rx_ring, pkt) == -ENOBUFS)) { | |
1200 | /* If RX fing full free lacpdu message and drop packet */ | |
1201 | wrn = WRN_RX_QUEUE_FULL; | |
1202 | goto free_out; | |
1203 | } | |
1204 | } else { | |
1205 | wrn = WRN_UNKNOWN_SLOW_TYPE; | |
1206 | goto free_out; | |
1207 | } | |
1208 | ||
1209 | return; | |
1210 | ||
1211 | free_out: | |
1212 | set_warning_flags(port, wrn); | |
1213 | rte_pktmbuf_free(pkt); | |
1214 | } | |
1215 | ||
1216 | int | |
1217 | rte_eth_bond_8023ad_conf_get_v20(uint8_t port_id, | |
1218 | struct rte_eth_bond_8023ad_conf *conf) | |
1219 | { | |
1220 | struct rte_eth_dev *bond_dev; | |
1221 | ||
1222 | if (valid_bonded_port_id(port_id) != 0) | |
1223 | return -EINVAL; | |
1224 | ||
1225 | if (conf == NULL) | |
1226 | return -EINVAL; | |
1227 | ||
1228 | bond_dev = &rte_eth_devices[port_id]; | |
1229 | bond_mode_8023ad_conf_get(bond_dev, conf); | |
1230 | return 0; | |
1231 | } | |
1232 | VERSION_SYMBOL(rte_eth_bond_8023ad_conf_get, _v20, 2.0); | |
1233 | ||
1234 | int | |
1235 | rte_eth_bond_8023ad_conf_get_v1607(uint8_t port_id, | |
1236 | struct rte_eth_bond_8023ad_conf *conf) | |
1237 | { | |
1238 | struct rte_eth_dev *bond_dev; | |
1239 | ||
1240 | if (valid_bonded_port_id(port_id) != 0) | |
1241 | return -EINVAL; | |
1242 | ||
1243 | if (conf == NULL) | |
1244 | return -EINVAL; | |
1245 | ||
1246 | bond_dev = &rte_eth_devices[port_id]; | |
1247 | bond_mode_8023ad_conf_get_v1607(bond_dev, conf); | |
1248 | return 0; | |
1249 | } | |
1250 | BIND_DEFAULT_SYMBOL(rte_eth_bond_8023ad_conf_get, _v1607, 16.07); | |
1251 | MAP_STATIC_SYMBOL(int rte_eth_bond_8023ad_conf_get(uint8_t port_id, | |
1252 | struct rte_eth_bond_8023ad_conf *conf), | |
1253 | rte_eth_bond_8023ad_conf_get_v1607); | |
1254 | ||
1255 | static int | |
1256 | bond_8023ad_setup_validate(uint8_t port_id, | |
1257 | struct rte_eth_bond_8023ad_conf *conf) | |
1258 | { | |
1259 | if (valid_bonded_port_id(port_id) != 0) | |
1260 | return -EINVAL; | |
1261 | ||
1262 | if (conf != NULL) { | |
1263 | /* Basic sanity check */ | |
1264 | if (conf->slow_periodic_ms == 0 || | |
1265 | conf->fast_periodic_ms >= conf->slow_periodic_ms || | |
1266 | conf->long_timeout_ms == 0 || | |
1267 | conf->short_timeout_ms >= conf->long_timeout_ms || | |
1268 | conf->aggregate_wait_timeout_ms == 0 || | |
1269 | conf->tx_period_ms == 0 || | |
1270 | conf->rx_marker_period_ms == 0 || | |
1271 | conf->update_timeout_ms == 0) { | |
1272 | RTE_LOG(ERR, PMD, "given mode 4 configuration is invalid\n"); | |
1273 | return -EINVAL; | |
1274 | } | |
1275 | } | |
1276 | ||
1277 | return 0; | |
1278 | } | |
1279 | ||
1280 | int | |
1281 | rte_eth_bond_8023ad_setup_v20(uint8_t port_id, | |
1282 | struct rte_eth_bond_8023ad_conf *conf) | |
1283 | { | |
1284 | struct rte_eth_dev *bond_dev; | |
1285 | int err; | |
1286 | ||
1287 | err = bond_8023ad_setup_validate(port_id, conf); | |
1288 | if (err != 0) | |
1289 | return err; | |
1290 | ||
1291 | bond_dev = &rte_eth_devices[port_id]; | |
1292 | bond_mode_8023ad_setup_v20(bond_dev, conf); | |
1293 | ||
1294 | return 0; | |
1295 | } | |
1296 | VERSION_SYMBOL(rte_eth_bond_8023ad_setup, _v20, 2.0); | |
1297 | ||
1298 | int | |
1299 | rte_eth_bond_8023ad_setup_v1607(uint8_t port_id, | |
1300 | struct rte_eth_bond_8023ad_conf *conf) | |
1301 | { | |
1302 | struct rte_eth_dev *bond_dev; | |
1303 | int err; | |
1304 | ||
1305 | err = bond_8023ad_setup_validate(port_id, conf); | |
1306 | if (err != 0) | |
1307 | return err; | |
1308 | ||
1309 | bond_dev = &rte_eth_devices[port_id]; | |
1310 | bond_mode_8023ad_setup(bond_dev, conf); | |
1311 | ||
1312 | return 0; | |
1313 | } | |
1314 | BIND_DEFAULT_SYMBOL(rte_eth_bond_8023ad_setup, _v1607, 16.07); | |
1315 | MAP_STATIC_SYMBOL(int rte_eth_bond_8023ad_setup(uint8_t port_id, | |
1316 | struct rte_eth_bond_8023ad_conf *conf), | |
1317 | rte_eth_bond_8023ad_setup_v1607); | |
1318 | ||
1319 | int | |
1320 | rte_eth_bond_8023ad_slave_info(uint8_t port_id, uint8_t slave_id, | |
1321 | struct rte_eth_bond_8023ad_slave_info *info) | |
1322 | { | |
1323 | struct rte_eth_dev *bond_dev; | |
1324 | struct bond_dev_private *internals; | |
1325 | struct port *port; | |
1326 | ||
1327 | if (info == NULL || valid_bonded_port_id(port_id) != 0 || | |
1328 | rte_eth_bond_mode_get(port_id) != BONDING_MODE_8023AD) | |
1329 | return -EINVAL; | |
1330 | ||
1331 | bond_dev = &rte_eth_devices[port_id]; | |
1332 | ||
1333 | internals = bond_dev->data->dev_private; | |
1334 | if (find_slave_by_id(internals->active_slaves, | |
1335 | internals->active_slave_count, slave_id) == | |
1336 | internals->active_slave_count) | |
1337 | return -EINVAL; | |
1338 | ||
1339 | port = &mode_8023ad_ports[slave_id]; | |
1340 | info->selected = port->selected; | |
1341 | ||
1342 | info->actor_state = port->actor_state; | |
1343 | rte_memcpy(&info->actor, &port->actor, sizeof(port->actor)); | |
1344 | ||
1345 | info->partner_state = port->partner_state; | |
1346 | rte_memcpy(&info->partner, &port->partner, sizeof(port->partner)); | |
1347 | ||
1348 | info->agg_port_id = port->aggregator_port_id; | |
1349 | return 0; | |
1350 | } | |
1351 | ||
1352 | static int | |
1353 | bond_8023ad_ext_validate(uint8_t port_id, uint8_t slave_id) | |
1354 | { | |
1355 | struct rte_eth_dev *bond_dev; | |
1356 | struct bond_dev_private *internals; | |
1357 | struct mode8023ad_private *mode4; | |
1358 | ||
1359 | if (rte_eth_bond_mode_get(port_id) != BONDING_MODE_8023AD) | |
1360 | return -EINVAL; | |
1361 | ||
1362 | bond_dev = &rte_eth_devices[port_id]; | |
1363 | ||
1364 | if (!bond_dev->data->dev_started) | |
1365 | return -EINVAL; | |
1366 | ||
1367 | internals = bond_dev->data->dev_private; | |
1368 | if (find_slave_by_id(internals->active_slaves, | |
1369 | internals->active_slave_count, slave_id) == | |
1370 | internals->active_slave_count) | |
1371 | return -EINVAL; | |
1372 | ||
1373 | mode4 = &internals->mode4; | |
1374 | if (mode4->slowrx_cb == NULL) | |
1375 | return -EINVAL; | |
1376 | ||
1377 | return 0; | |
1378 | } | |
1379 | ||
1380 | int | |
1381 | rte_eth_bond_8023ad_ext_collect(uint8_t port_id, uint8_t slave_id, int enabled) | |
1382 | { | |
1383 | struct port *port; | |
1384 | int res; | |
1385 | ||
1386 | res = bond_8023ad_ext_validate(port_id, slave_id); | |
1387 | if (res != 0) | |
1388 | return res; | |
1389 | ||
1390 | port = &mode_8023ad_ports[slave_id]; | |
1391 | ||
1392 | if (enabled) | |
1393 | ACTOR_STATE_SET(port, COLLECTING); | |
1394 | else | |
1395 | ACTOR_STATE_CLR(port, COLLECTING); | |
1396 | ||
1397 | return 0; | |
1398 | } | |
1399 | ||
1400 | int | |
1401 | rte_eth_bond_8023ad_ext_distrib(uint8_t port_id, uint8_t slave_id, int enabled) | |
1402 | { | |
1403 | struct port *port; | |
1404 | int res; | |
1405 | ||
1406 | res = bond_8023ad_ext_validate(port_id, slave_id); | |
1407 | if (res != 0) | |
1408 | return res; | |
1409 | ||
1410 | port = &mode_8023ad_ports[slave_id]; | |
1411 | ||
1412 | if (enabled) | |
1413 | ACTOR_STATE_SET(port, DISTRIBUTING); | |
1414 | else | |
1415 | ACTOR_STATE_CLR(port, DISTRIBUTING); | |
1416 | ||
1417 | return 0; | |
1418 | } | |
1419 | ||
1420 | int | |
1421 | rte_eth_bond_8023ad_ext_distrib_get(uint8_t port_id, uint8_t slave_id) | |
1422 | { | |
1423 | struct port *port; | |
1424 | int err; | |
1425 | ||
1426 | err = bond_8023ad_ext_validate(port_id, slave_id); | |
1427 | if (err != 0) | |
1428 | return err; | |
1429 | ||
1430 | port = &mode_8023ad_ports[slave_id]; | |
1431 | return ACTOR_STATE(port, DISTRIBUTING); | |
1432 | } | |
1433 | ||
1434 | int | |
1435 | rte_eth_bond_8023ad_ext_collect_get(uint8_t port_id, uint8_t slave_id) | |
1436 | { | |
1437 | struct port *port; | |
1438 | int err; | |
1439 | ||
1440 | err = bond_8023ad_ext_validate(port_id, slave_id); | |
1441 | if (err != 0) | |
1442 | return err; | |
1443 | ||
1444 | port = &mode_8023ad_ports[slave_id]; | |
1445 | return ACTOR_STATE(port, COLLECTING); | |
1446 | } | |
1447 | ||
1448 | int | |
1449 | rte_eth_bond_8023ad_ext_slowtx(uint8_t port_id, uint8_t slave_id, | |
1450 | struct rte_mbuf *lacp_pkt) | |
1451 | { | |
1452 | struct port *port; | |
1453 | int res; | |
1454 | ||
1455 | res = bond_8023ad_ext_validate(port_id, slave_id); | |
1456 | if (res != 0) | |
1457 | return res; | |
1458 | ||
1459 | port = &mode_8023ad_ports[slave_id]; | |
1460 | ||
1461 | if (rte_pktmbuf_pkt_len(lacp_pkt) < sizeof(struct lacpdu_header)) | |
1462 | return -EINVAL; | |
1463 | ||
1464 | struct lacpdu_header *lacp; | |
1465 | ||
1466 | /* only enqueue LACPDUs */ | |
1467 | lacp = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *); | |
1468 | if (lacp->lacpdu.subtype != SLOW_SUBTYPE_LACP) | |
1469 | return -EINVAL; | |
1470 | ||
1471 | MODE4_DEBUG("sending LACP frame\n"); | |
1472 | ||
1473 | return rte_ring_enqueue(port->tx_ring, lacp_pkt); | |
1474 | } | |
1475 | ||
1476 | static void | |
1477 | bond_mode_8023ad_ext_periodic_cb(void *arg) | |
1478 | { | |
1479 | struct rte_eth_dev *bond_dev = arg; | |
1480 | struct bond_dev_private *internals = bond_dev->data->dev_private; | |
1481 | struct mode8023ad_private *mode4 = &internals->mode4; | |
1482 | struct port *port; | |
1483 | void *pkt = NULL; | |
1484 | uint16_t i, slave_id; | |
1485 | ||
1486 | for (i = 0; i < internals->active_slave_count; i++) { | |
1487 | slave_id = internals->active_slaves[i]; | |
1488 | port = &mode_8023ad_ports[slave_id]; | |
1489 | ||
1490 | if (rte_ring_dequeue(port->rx_ring, &pkt) == 0) { | |
1491 | struct rte_mbuf *lacp_pkt = pkt; | |
1492 | struct lacpdu_header *lacp; | |
1493 | ||
1494 | lacp = rte_pktmbuf_mtod(lacp_pkt, | |
1495 | struct lacpdu_header *); | |
1496 | RTE_VERIFY(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP); | |
1497 | ||
1498 | /* This is LACP frame so pass it to rx callback. | |
1499 | * Callback is responsible for freeing mbuf. | |
1500 | */ | |
1501 | mode4->slowrx_cb(slave_id, lacp_pkt); | |
1502 | } | |
1503 | } | |
1504 | ||
1505 | rte_eal_alarm_set(internals->mode4.update_timeout_us, | |
1506 | bond_mode_8023ad_ext_periodic_cb, arg); | |
1507 | } |