]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * net/tipc/link.c: TIPC link code | |
3 | * | |
4 | * Copyright (c) 1996-2007, 2012-2015, Ericsson AB | |
5 | * Copyright (c) 2004-2007, 2010-2013, Wind River Systems | |
6 | * All rights reserved. | |
7 | * | |
8 | * Redistribution and use in source and binary forms, with or without | |
9 | * modification, are permitted provided that the following conditions are met: | |
10 | * | |
11 | * 1. Redistributions of source code must retain the above copyright | |
12 | * notice, this list of conditions and the following disclaimer. | |
13 | * 2. Redistributions in binary form must reproduce the above copyright | |
14 | * notice, this list of conditions and the following disclaimer in the | |
15 | * documentation and/or other materials provided with the distribution. | |
16 | * 3. Neither the names of the copyright holders nor the names of its | |
17 | * contributors may be used to endorse or promote products derived from | |
18 | * this software without specific prior written permission. | |
19 | * | |
20 | * Alternatively, this software may be distributed under the terms of the | |
21 | * GNU General Public License ("GPL") version 2 as published by the Free | |
22 | * Software Foundation. | |
23 | * | |
24 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | |
25 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
26 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
27 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | |
28 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
29 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
30 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |
31 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |
32 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
33 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
34 | * POSSIBILITY OF SUCH DAMAGE. | |
35 | */ | |
36 | ||
37 | #include "core.h" | |
38 | #include "subscr.h" | |
39 | #include "link.h" | |
40 | #include "bcast.h" | |
41 | #include "socket.h" | |
42 | #include "name_distr.h" | |
43 | #include "discover.h" | |
44 | #include "netlink.h" | |
45 | ||
46 | #include <linux/pkt_sched.h> | |
47 | ||
48 | /* | |
49 | * Error message prefixes | |
50 | */ | |
51 | static const char *link_co_err = "Link tunneling error, "; | |
52 | static const char *link_rst_msg = "Resetting link "; | |
53 | static const char tipc_bclink_name[] = "broadcast-link"; | |
54 | ||
55 | static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = { | |
56 | [TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC }, | |
57 | [TIPC_NLA_LINK_NAME] = { | |
58 | .type = NLA_STRING, | |
59 | .len = TIPC_MAX_LINK_NAME | |
60 | }, | |
61 | [TIPC_NLA_LINK_MTU] = { .type = NLA_U32 }, | |
62 | [TIPC_NLA_LINK_BROADCAST] = { .type = NLA_FLAG }, | |
63 | [TIPC_NLA_LINK_UP] = { .type = NLA_FLAG }, | |
64 | [TIPC_NLA_LINK_ACTIVE] = { .type = NLA_FLAG }, | |
65 | [TIPC_NLA_LINK_PROP] = { .type = NLA_NESTED }, | |
66 | [TIPC_NLA_LINK_STATS] = { .type = NLA_NESTED }, | |
67 | [TIPC_NLA_LINK_RX] = { .type = NLA_U32 }, | |
68 | [TIPC_NLA_LINK_TX] = { .type = NLA_U32 } | |
69 | }; | |
70 | ||
71 | /* Properties valid for media, bearar and link */ | |
72 | static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = { | |
73 | [TIPC_NLA_PROP_UNSPEC] = { .type = NLA_UNSPEC }, | |
74 | [TIPC_NLA_PROP_PRIO] = { .type = NLA_U32 }, | |
75 | [TIPC_NLA_PROP_TOL] = { .type = NLA_U32 }, | |
76 | [TIPC_NLA_PROP_WIN] = { .type = NLA_U32 } | |
77 | }; | |
78 | ||
79 | /* | |
80 | * Interval between NACKs when packets arrive out of order | |
81 | */ | |
82 | #define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2) | |
83 | /* | |
84 | * Out-of-range value for link session numbers | |
85 | */ | |
86 | #define WILDCARD_SESSION 0x10000 | |
87 | ||
88 | /* Link FSM states: | |
89 | */ | |
90 | enum { | |
91 | LINK_ESTABLISHED = 0xe, | |
92 | LINK_ESTABLISHING = 0xe << 4, | |
93 | LINK_RESET = 0x1 << 8, | |
94 | LINK_RESETTING = 0x2 << 12, | |
95 | LINK_PEER_RESET = 0xd << 16, | |
96 | LINK_FAILINGOVER = 0xf << 20, | |
97 | LINK_SYNCHING = 0xc << 24 | |
98 | }; | |
99 | ||
100 | /* Link FSM state checking routines | |
101 | */ | |
102 | static int link_is_up(struct tipc_link *l) | |
103 | { | |
104 | return l->state & (LINK_ESTABLISHED | LINK_SYNCHING); | |
105 | } | |
106 | ||
107 | static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, | |
108 | struct sk_buff_head *xmitq); | |
109 | static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe, | |
110 | u16 rcvgap, int tolerance, int priority, | |
111 | struct sk_buff_head *xmitq); | |
112 | static void link_reset_statistics(struct tipc_link *l_ptr); | |
113 | static void link_print(struct tipc_link *l_ptr, const char *str); | |
114 | static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf); | |
115 | ||
116 | /* | |
117 | * Simple non-static link routines (i.e. referenced outside this file) | |
118 | */ | |
119 | bool tipc_link_is_up(struct tipc_link *l) | |
120 | { | |
121 | return link_is_up(l); | |
122 | } | |
123 | ||
124 | bool tipc_link_peer_is_down(struct tipc_link *l) | |
125 | { | |
126 | return l->state == LINK_PEER_RESET; | |
127 | } | |
128 | ||
129 | bool tipc_link_is_reset(struct tipc_link *l) | |
130 | { | |
131 | return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING); | |
132 | } | |
133 | ||
134 | bool tipc_link_is_establishing(struct tipc_link *l) | |
135 | { | |
136 | return l->state == LINK_ESTABLISHING; | |
137 | } | |
138 | ||
139 | bool tipc_link_is_synching(struct tipc_link *l) | |
140 | { | |
141 | return l->state == LINK_SYNCHING; | |
142 | } | |
143 | ||
144 | bool tipc_link_is_failingover(struct tipc_link *l) | |
145 | { | |
146 | return l->state == LINK_FAILINGOVER; | |
147 | } | |
148 | ||
149 | bool tipc_link_is_blocked(struct tipc_link *l) | |
150 | { | |
151 | return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER); | |
152 | } | |
153 | ||
154 | int tipc_link_is_active(struct tipc_link *l) | |
155 | { | |
156 | struct tipc_node *n = l->owner; | |
157 | ||
158 | return (node_active_link(n, 0) == l) || (node_active_link(n, 1) == l); | |
159 | } | |
160 | ||
161 | void tipc_link_add_bc_peer(struct tipc_link *l) | |
162 | { | |
163 | l->ackers++; | |
164 | } | |
165 | ||
166 | void tipc_link_remove_bc_peer(struct tipc_link *l) | |
167 | { | |
168 | l->ackers--; | |
169 | } | |
170 | ||
171 | int tipc_link_bc_peers(struct tipc_link *l) | |
172 | { | |
173 | return l->ackers; | |
174 | } | |
175 | ||
176 | static u32 link_own_addr(struct tipc_link *l) | |
177 | { | |
178 | return msg_prevnode(l->pmsg); | |
179 | } | |
180 | ||
181 | /** | |
182 | * tipc_link_create - create a new link | |
183 | * @n: pointer to associated node | |
184 | * @if_name: associated interface name | |
185 | * @bearer_id: id (index) of associated bearer | |
186 | * @tolerance: link tolerance to be used by link | |
187 | * @net_plane: network plane (A,B,c..) this link belongs to | |
188 | * @mtu: mtu to be advertised by link | |
189 | * @priority: priority to be used by link | |
190 | * @window: send window to be used by link | |
191 | * @session: session to be used by link | |
192 | * @ownnode: identity of own node | |
193 | * @peer: node id of peer node | |
194 | * @maddr: media address to be used | |
195 | * @inputq: queue to put messages ready for delivery | |
196 | * @namedq: queue to put binding table update messages ready for delivery | |
197 | * @link: return value, pointer to put the created link | |
198 | * | |
199 | * Returns true if link was created, otherwise false | |
200 | */ | |
201 | bool tipc_link_create(struct tipc_node *n, char *if_name, int bearer_id, | |
202 | int tolerance, char net_plane, u32 mtu, int priority, | |
203 | int window, u32 session, u32 ownnode, u32 peer, | |
204 | struct tipc_media_addr *maddr, | |
205 | struct sk_buff_head *inputq, struct sk_buff_head *namedq, | |
206 | struct tipc_link **link) | |
207 | { | |
208 | struct tipc_link *l; | |
209 | struct tipc_msg *hdr; | |
210 | ||
211 | l = kzalloc(sizeof(*l), GFP_ATOMIC); | |
212 | if (!l) | |
213 | return false; | |
214 | *link = l; | |
215 | l->pmsg = (struct tipc_msg *)&l->proto_msg; | |
216 | hdr = l->pmsg; | |
217 | tipc_msg_init(ownnode, hdr, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, peer); | |
218 | msg_set_size(hdr, sizeof(l->proto_msg)); | |
219 | msg_set_session(hdr, session); | |
220 | msg_set_bearer_id(hdr, l->bearer_id); | |
221 | ||
222 | /* Note: peer i/f name is completed by reset/activate message */ | |
223 | sprintf(l->name, "%u.%u.%u:%s-%u.%u.%u:unknown", | |
224 | tipc_zone(ownnode), tipc_cluster(ownnode), tipc_node(ownnode), | |
225 | if_name, tipc_zone(peer), tipc_cluster(peer), tipc_node(peer)); | |
226 | strcpy((char *)msg_data(hdr), if_name); | |
227 | ||
228 | l->addr = peer; | |
229 | l->media_addr = maddr; | |
230 | l->owner = n; | |
231 | l->peer_session = WILDCARD_SESSION; | |
232 | l->bearer_id = bearer_id; | |
233 | l->tolerance = tolerance; | |
234 | l->net_plane = net_plane; | |
235 | l->advertised_mtu = mtu; | |
236 | l->mtu = mtu; | |
237 | l->priority = priority; | |
238 | tipc_link_set_queue_limits(l, window); | |
239 | l->ackers = 1; | |
240 | l->inputq = inputq; | |
241 | l->namedq = namedq; | |
242 | l->state = LINK_RESETTING; | |
243 | __skb_queue_head_init(&l->transmq); | |
244 | __skb_queue_head_init(&l->backlogq); | |
245 | __skb_queue_head_init(&l->deferdq); | |
246 | skb_queue_head_init(&l->wakeupq); | |
247 | skb_queue_head_init(l->inputq); | |
248 | return true; | |
249 | } | |
250 | ||
251 | /** | |
252 | * tipc_link_bc_create - create new link to be used for broadcast | |
253 | * @n: pointer to associated node | |
254 | * @mtu: mtu to be used | |
255 | * @window: send window to be used | |
256 | * @inputq: queue to put messages ready for delivery | |
257 | * @namedq: queue to put binding table update messages ready for delivery | |
258 | * @link: return value, pointer to put the created link | |
259 | * | |
260 | * Returns true if link was created, otherwise false | |
261 | */ | |
262 | bool tipc_link_bc_create(struct tipc_node *n, int mtu, int window, | |
263 | struct sk_buff_head *inputq, | |
264 | struct sk_buff_head *namedq, | |
265 | struct tipc_link **link) | |
266 | { | |
267 | struct tipc_link *l; | |
268 | ||
269 | if (!tipc_link_create(n, "", MAX_BEARERS, 0, 'Z', mtu, 0, window, | |
270 | 0, 0, 0, NULL, inputq, namedq, link)) | |
271 | return false; | |
272 | ||
273 | l = *link; | |
274 | strcpy(l->name, tipc_bclink_name); | |
275 | tipc_link_reset(l); | |
276 | l->ackers = 0; | |
277 | return true; | |
278 | } | |
279 | ||
280 | /* tipc_link_build_bcast_sync_msg() - synchronize broadcast link endpoints. | |
281 | * | |
282 | * Give a newly added peer node the sequence number where it should | |
283 | * start receiving and acking broadcast packets. | |
284 | */ | |
285 | void tipc_link_build_bcast_sync_msg(struct tipc_link *l, | |
286 | struct sk_buff_head *xmitq) | |
287 | { | |
288 | struct sk_buff *skb; | |
289 | struct sk_buff_head list; | |
290 | u16 last_sent; | |
291 | ||
292 | skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, | |
293 | 0, l->addr, link_own_addr(l), 0, 0, 0); | |
294 | if (!skb) | |
295 | return; | |
296 | last_sent = tipc_bclink_get_last_sent(l->owner->net); | |
297 | msg_set_last_bcast(buf_msg(skb), last_sent); | |
298 | __skb_queue_head_init(&list); | |
299 | __skb_queue_tail(&list, skb); | |
300 | tipc_link_xmit(l, &list, xmitq); | |
301 | } | |
302 | ||
303 | /** | |
304 | * tipc_link_fsm_evt - link finite state machine | |
305 | * @l: pointer to link | |
306 | * @evt: state machine event to be processed | |
307 | */ | |
308 | int tipc_link_fsm_evt(struct tipc_link *l, int evt) | |
309 | { | |
310 | int rc = 0; | |
311 | ||
312 | switch (l->state) { | |
313 | case LINK_RESETTING: | |
314 | switch (evt) { | |
315 | case LINK_PEER_RESET_EVT: | |
316 | l->state = LINK_PEER_RESET; | |
317 | break; | |
318 | case LINK_RESET_EVT: | |
319 | l->state = LINK_RESET; | |
320 | break; | |
321 | case LINK_FAILURE_EVT: | |
322 | case LINK_FAILOVER_BEGIN_EVT: | |
323 | case LINK_ESTABLISH_EVT: | |
324 | case LINK_FAILOVER_END_EVT: | |
325 | case LINK_SYNCH_BEGIN_EVT: | |
326 | case LINK_SYNCH_END_EVT: | |
327 | default: | |
328 | goto illegal_evt; | |
329 | } | |
330 | break; | |
331 | case LINK_RESET: | |
332 | switch (evt) { | |
333 | case LINK_PEER_RESET_EVT: | |
334 | l->state = LINK_ESTABLISHING; | |
335 | break; | |
336 | case LINK_FAILOVER_BEGIN_EVT: | |
337 | l->state = LINK_FAILINGOVER; | |
338 | case LINK_FAILURE_EVT: | |
339 | case LINK_RESET_EVT: | |
340 | case LINK_ESTABLISH_EVT: | |
341 | case LINK_FAILOVER_END_EVT: | |
342 | break; | |
343 | case LINK_SYNCH_BEGIN_EVT: | |
344 | case LINK_SYNCH_END_EVT: | |
345 | default: | |
346 | goto illegal_evt; | |
347 | } | |
348 | break; | |
349 | case LINK_PEER_RESET: | |
350 | switch (evt) { | |
351 | case LINK_RESET_EVT: | |
352 | l->state = LINK_ESTABLISHING; | |
353 | break; | |
354 | case LINK_PEER_RESET_EVT: | |
355 | case LINK_ESTABLISH_EVT: | |
356 | case LINK_FAILURE_EVT: | |
357 | break; | |
358 | case LINK_SYNCH_BEGIN_EVT: | |
359 | case LINK_SYNCH_END_EVT: | |
360 | case LINK_FAILOVER_BEGIN_EVT: | |
361 | case LINK_FAILOVER_END_EVT: | |
362 | default: | |
363 | goto illegal_evt; | |
364 | } | |
365 | break; | |
366 | case LINK_FAILINGOVER: | |
367 | switch (evt) { | |
368 | case LINK_FAILOVER_END_EVT: | |
369 | l->state = LINK_RESET; | |
370 | break; | |
371 | case LINK_PEER_RESET_EVT: | |
372 | case LINK_RESET_EVT: | |
373 | case LINK_ESTABLISH_EVT: | |
374 | case LINK_FAILURE_EVT: | |
375 | break; | |
376 | case LINK_FAILOVER_BEGIN_EVT: | |
377 | case LINK_SYNCH_BEGIN_EVT: | |
378 | case LINK_SYNCH_END_EVT: | |
379 | default: | |
380 | goto illegal_evt; | |
381 | } | |
382 | break; | |
383 | case LINK_ESTABLISHING: | |
384 | switch (evt) { | |
385 | case LINK_ESTABLISH_EVT: | |
386 | l->state = LINK_ESTABLISHED; | |
387 | break; | |
388 | case LINK_FAILOVER_BEGIN_EVT: | |
389 | l->state = LINK_FAILINGOVER; | |
390 | break; | |
391 | case LINK_RESET_EVT: | |
392 | l->state = LINK_RESET; | |
393 | break; | |
394 | case LINK_FAILURE_EVT: | |
395 | case LINK_PEER_RESET_EVT: | |
396 | case LINK_SYNCH_BEGIN_EVT: | |
397 | case LINK_FAILOVER_END_EVT: | |
398 | break; | |
399 | case LINK_SYNCH_END_EVT: | |
400 | default: | |
401 | goto illegal_evt; | |
402 | } | |
403 | break; | |
404 | case LINK_ESTABLISHED: | |
405 | switch (evt) { | |
406 | case LINK_PEER_RESET_EVT: | |
407 | l->state = LINK_PEER_RESET; | |
408 | rc |= TIPC_LINK_DOWN_EVT; | |
409 | break; | |
410 | case LINK_FAILURE_EVT: | |
411 | l->state = LINK_RESETTING; | |
412 | rc |= TIPC_LINK_DOWN_EVT; | |
413 | break; | |
414 | case LINK_RESET_EVT: | |
415 | l->state = LINK_RESET; | |
416 | break; | |
417 | case LINK_ESTABLISH_EVT: | |
418 | case LINK_SYNCH_END_EVT: | |
419 | break; | |
420 | case LINK_SYNCH_BEGIN_EVT: | |
421 | l->state = LINK_SYNCHING; | |
422 | break; | |
423 | case LINK_FAILOVER_BEGIN_EVT: | |
424 | case LINK_FAILOVER_END_EVT: | |
425 | default: | |
426 | goto illegal_evt; | |
427 | } | |
428 | break; | |
429 | case LINK_SYNCHING: | |
430 | switch (evt) { | |
431 | case LINK_PEER_RESET_EVT: | |
432 | l->state = LINK_PEER_RESET; | |
433 | rc |= TIPC_LINK_DOWN_EVT; | |
434 | break; | |
435 | case LINK_FAILURE_EVT: | |
436 | l->state = LINK_RESETTING; | |
437 | rc |= TIPC_LINK_DOWN_EVT; | |
438 | break; | |
439 | case LINK_RESET_EVT: | |
440 | l->state = LINK_RESET; | |
441 | break; | |
442 | case LINK_ESTABLISH_EVT: | |
443 | case LINK_SYNCH_BEGIN_EVT: | |
444 | break; | |
445 | case LINK_SYNCH_END_EVT: | |
446 | l->state = LINK_ESTABLISHED; | |
447 | break; | |
448 | case LINK_FAILOVER_BEGIN_EVT: | |
449 | case LINK_FAILOVER_END_EVT: | |
450 | default: | |
451 | goto illegal_evt; | |
452 | } | |
453 | break; | |
454 | default: | |
455 | pr_err("Unknown FSM state %x in %s\n", l->state, l->name); | |
456 | } | |
457 | return rc; | |
458 | illegal_evt: | |
459 | pr_err("Illegal FSM event %x in state %x on link %s\n", | |
460 | evt, l->state, l->name); | |
461 | return rc; | |
462 | } | |
463 | ||
464 | /* link_profile_stats - update statistical profiling of traffic | |
465 | */ | |
466 | static void link_profile_stats(struct tipc_link *l) | |
467 | { | |
468 | struct sk_buff *skb; | |
469 | struct tipc_msg *msg; | |
470 | int length; | |
471 | ||
472 | /* Update counters used in statistical profiling of send traffic */ | |
473 | l->stats.accu_queue_sz += skb_queue_len(&l->transmq); | |
474 | l->stats.queue_sz_counts++; | |
475 | ||
476 | skb = skb_peek(&l->transmq); | |
477 | if (!skb) | |
478 | return; | |
479 | msg = buf_msg(skb); | |
480 | length = msg_size(msg); | |
481 | ||
482 | if (msg_user(msg) == MSG_FRAGMENTER) { | |
483 | if (msg_type(msg) != FIRST_FRAGMENT) | |
484 | return; | |
485 | length = msg_size(msg_get_wrapped(msg)); | |
486 | } | |
487 | l->stats.msg_lengths_total += length; | |
488 | l->stats.msg_length_counts++; | |
489 | if (length <= 64) | |
490 | l->stats.msg_length_profile[0]++; | |
491 | else if (length <= 256) | |
492 | l->stats.msg_length_profile[1]++; | |
493 | else if (length <= 1024) | |
494 | l->stats.msg_length_profile[2]++; | |
495 | else if (length <= 4096) | |
496 | l->stats.msg_length_profile[3]++; | |
497 | else if (length <= 16384) | |
498 | l->stats.msg_length_profile[4]++; | |
499 | else if (length <= 32768) | |
500 | l->stats.msg_length_profile[5]++; | |
501 | else | |
502 | l->stats.msg_length_profile[6]++; | |
503 | } | |
504 | ||
505 | /* tipc_link_timeout - perform periodic task as instructed from node timeout | |
506 | */ | |
507 | int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq) | |
508 | { | |
509 | int rc = 0; | |
510 | int mtyp = STATE_MSG; | |
511 | bool xmit = false; | |
512 | bool prb = false; | |
513 | ||
514 | link_profile_stats(l); | |
515 | ||
516 | switch (l->state) { | |
517 | case LINK_ESTABLISHED: | |
518 | case LINK_SYNCHING: | |
519 | if (!l->silent_intv_cnt) { | |
520 | if (tipc_bclink_acks_missing(l->owner)) | |
521 | xmit = true; | |
522 | } else if (l->silent_intv_cnt <= l->abort_limit) { | |
523 | xmit = true; | |
524 | prb = true; | |
525 | } else { | |
526 | rc |= tipc_link_fsm_evt(l, LINK_FAILURE_EVT); | |
527 | } | |
528 | l->silent_intv_cnt++; | |
529 | break; | |
530 | case LINK_RESET: | |
531 | xmit = true; | |
532 | mtyp = RESET_MSG; | |
533 | break; | |
534 | case LINK_ESTABLISHING: | |
535 | xmit = true; | |
536 | mtyp = ACTIVATE_MSG; | |
537 | break; | |
538 | case LINK_PEER_RESET: | |
539 | case LINK_RESETTING: | |
540 | case LINK_FAILINGOVER: | |
541 | break; | |
542 | default: | |
543 | break; | |
544 | } | |
545 | ||
546 | if (xmit) | |
547 | tipc_link_build_proto_msg(l, mtyp, prb, 0, 0, 0, xmitq); | |
548 | ||
549 | return rc; | |
550 | } | |
551 | ||
552 | /** | |
553 | * link_schedule_user - schedule a message sender for wakeup after congestion | |
554 | * @link: congested link | |
555 | * @list: message that was attempted sent | |
556 | * Create pseudo msg to send back to user when congestion abates | |
557 | * Does not consume buffer list | |
558 | */ | |
559 | static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list) | |
560 | { | |
561 | struct tipc_msg *msg = buf_msg(skb_peek(list)); | |
562 | int imp = msg_importance(msg); | |
563 | u32 oport = msg_origport(msg); | |
564 | u32 addr = link_own_addr(link); | |
565 | struct sk_buff *skb; | |
566 | ||
567 | /* This really cannot happen... */ | |
568 | if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) { | |
569 | pr_warn("%s<%s>, send queue full", link_rst_msg, link->name); | |
570 | return -ENOBUFS; | |
571 | } | |
572 | /* Non-blocking sender: */ | |
573 | if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending) | |
574 | return -ELINKCONG; | |
575 | ||
576 | /* Create and schedule wakeup pseudo message */ | |
577 | skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0, | |
578 | addr, addr, oport, 0, 0); | |
579 | if (!skb) | |
580 | return -ENOBUFS; | |
581 | TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list); | |
582 | TIPC_SKB_CB(skb)->chain_imp = imp; | |
583 | skb_queue_tail(&link->wakeupq, skb); | |
584 | link->stats.link_congs++; | |
585 | return -ELINKCONG; | |
586 | } | |
587 | ||
588 | /** | |
589 | * link_prepare_wakeup - prepare users for wakeup after congestion | |
590 | * @link: congested link | |
591 | * Move a number of waiting users, as permitted by available space in | |
592 | * the send queue, from link wait queue to node wait queue for wakeup | |
593 | */ | |
594 | void link_prepare_wakeup(struct tipc_link *l) | |
595 | { | |
596 | int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,}; | |
597 | int imp, lim; | |
598 | struct sk_buff *skb, *tmp; | |
599 | ||
600 | skb_queue_walk_safe(&l->wakeupq, skb, tmp) { | |
601 | imp = TIPC_SKB_CB(skb)->chain_imp; | |
602 | lim = l->window + l->backlog[imp].limit; | |
603 | pnd[imp] += TIPC_SKB_CB(skb)->chain_sz; | |
604 | if ((pnd[imp] + l->backlog[imp].len) >= lim) | |
605 | break; | |
606 | skb_unlink(skb, &l->wakeupq); | |
607 | skb_queue_tail(l->inputq, skb); | |
608 | } | |
609 | } | |
610 | ||
611 | /** | |
612 | * tipc_link_reset_fragments - purge link's inbound message fragments queue | |
613 | * @l_ptr: pointer to link | |
614 | */ | |
615 | void tipc_link_reset_fragments(struct tipc_link *l_ptr) | |
616 | { | |
617 | kfree_skb(l_ptr->reasm_buf); | |
618 | l_ptr->reasm_buf = NULL; | |
619 | } | |
620 | ||
621 | void tipc_link_purge_backlog(struct tipc_link *l) | |
622 | { | |
623 | __skb_queue_purge(&l->backlogq); | |
624 | l->backlog[TIPC_LOW_IMPORTANCE].len = 0; | |
625 | l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0; | |
626 | l->backlog[TIPC_HIGH_IMPORTANCE].len = 0; | |
627 | l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0; | |
628 | l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0; | |
629 | } | |
630 | ||
631 | /** | |
632 | * tipc_link_purge_queues - purge all pkt queues associated with link | |
633 | * @l_ptr: pointer to link | |
634 | */ | |
635 | void tipc_link_purge_queues(struct tipc_link *l_ptr) | |
636 | { | |
637 | __skb_queue_purge(&l_ptr->deferdq); | |
638 | __skb_queue_purge(&l_ptr->transmq); | |
639 | tipc_link_purge_backlog(l_ptr); | |
640 | tipc_link_reset_fragments(l_ptr); | |
641 | } | |
642 | ||
643 | void tipc_link_reset(struct tipc_link *l) | |
644 | { | |
645 | /* Link is down, accept any session */ | |
646 | l->peer_session = WILDCARD_SESSION; | |
647 | ||
648 | /* If peer is up, it only accepts an incremented session number */ | |
649 | msg_set_session(l->pmsg, msg_session(l->pmsg) + 1); | |
650 | ||
651 | /* Prepare for renewed mtu size negotiation */ | |
652 | l->mtu = l->advertised_mtu; | |
653 | ||
654 | /* Clean up all queues: */ | |
655 | __skb_queue_purge(&l->transmq); | |
656 | __skb_queue_purge(&l->deferdq); | |
657 | skb_queue_splice_init(&l->wakeupq, l->inputq); | |
658 | ||
659 | tipc_link_purge_backlog(l); | |
660 | kfree_skb(l->reasm_buf); | |
661 | kfree_skb(l->failover_reasm_skb); | |
662 | l->reasm_buf = NULL; | |
663 | l->failover_reasm_skb = NULL; | |
664 | l->rcv_unacked = 0; | |
665 | l->snd_nxt = 1; | |
666 | l->rcv_nxt = 1; | |
667 | l->acked = 0; | |
668 | l->silent_intv_cnt = 0; | |
669 | l->stats.recv_info = 0; | |
670 | l->stale_count = 0; | |
671 | link_reset_statistics(l); | |
672 | } | |
673 | ||
674 | /** | |
675 | * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked | |
676 | * @link: link to use | |
677 | * @list: chain of buffers containing message | |
678 | * | |
679 | * Consumes the buffer chain, except when returning an error code, | |
680 | * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS | |
681 | * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted | |
682 | */ | |
683 | int __tipc_link_xmit(struct net *net, struct tipc_link *link, | |
684 | struct sk_buff_head *list) | |
685 | { | |
686 | struct tipc_msg *msg = buf_msg(skb_peek(list)); | |
687 | unsigned int maxwin = link->window; | |
688 | unsigned int i, imp = msg_importance(msg); | |
689 | uint mtu = link->mtu; | |
690 | u16 ack = mod(link->rcv_nxt - 1); | |
691 | u16 seqno = link->snd_nxt; | |
692 | u16 bc_last_in = link->owner->bclink.last_in; | |
693 | struct tipc_media_addr *addr = link->media_addr; | |
694 | struct sk_buff_head *transmq = &link->transmq; | |
695 | struct sk_buff_head *backlogq = &link->backlogq; | |
696 | struct sk_buff *skb, *bskb; | |
697 | ||
698 | /* Match msg importance against this and all higher backlog limits: */ | |
699 | for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) { | |
700 | if (unlikely(link->backlog[i].len >= link->backlog[i].limit)) | |
701 | return link_schedule_user(link, list); | |
702 | } | |
703 | if (unlikely(msg_size(msg) > mtu)) | |
704 | return -EMSGSIZE; | |
705 | ||
706 | /* Prepare each packet for sending, and add to relevant queue: */ | |
707 | while (skb_queue_len(list)) { | |
708 | skb = skb_peek(list); | |
709 | msg = buf_msg(skb); | |
710 | msg_set_seqno(msg, seqno); | |
711 | msg_set_ack(msg, ack); | |
712 | msg_set_bcast_ack(msg, bc_last_in); | |
713 | ||
714 | if (likely(skb_queue_len(transmq) < maxwin)) { | |
715 | __skb_dequeue(list); | |
716 | __skb_queue_tail(transmq, skb); | |
717 | tipc_bearer_send(net, link->bearer_id, skb, addr); | |
718 | link->rcv_unacked = 0; | |
719 | seqno++; | |
720 | continue; | |
721 | } | |
722 | if (tipc_msg_bundle(skb_peek_tail(backlogq), msg, mtu)) { | |
723 | kfree_skb(__skb_dequeue(list)); | |
724 | link->stats.sent_bundled++; | |
725 | continue; | |
726 | } | |
727 | if (tipc_msg_make_bundle(&bskb, msg, mtu, link->addr)) { | |
728 | kfree_skb(__skb_dequeue(list)); | |
729 | __skb_queue_tail(backlogq, bskb); | |
730 | link->backlog[msg_importance(buf_msg(bskb))].len++; | |
731 | link->stats.sent_bundled++; | |
732 | link->stats.sent_bundles++; | |
733 | continue; | |
734 | } | |
735 | link->backlog[imp].len += skb_queue_len(list); | |
736 | skb_queue_splice_tail_init(list, backlogq); | |
737 | } | |
738 | link->snd_nxt = seqno; | |
739 | return 0; | |
740 | } | |
741 | ||
742 | /** | |
743 | * tipc_link_xmit(): enqueue buffer list according to queue situation | |
744 | * @link: link to use | |
745 | * @list: chain of buffers containing message | |
746 | * @xmitq: returned list of packets to be sent by caller | |
747 | * | |
748 | * Consumes the buffer chain, except when returning -ELINKCONG, | |
749 | * since the caller then may want to make more send attempts. | |
750 | * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS | |
751 | * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted | |
752 | */ | |
753 | int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, | |
754 | struct sk_buff_head *xmitq) | |
755 | { | |
756 | struct tipc_msg *hdr = buf_msg(skb_peek(list)); | |
757 | unsigned int maxwin = l->window; | |
758 | unsigned int i, imp = msg_importance(hdr); | |
759 | unsigned int mtu = l->mtu; | |
760 | u16 ack = l->rcv_nxt - 1; | |
761 | u16 seqno = l->snd_nxt; | |
762 | u16 bc_last_in = l->owner->bclink.last_in; | |
763 | struct sk_buff_head *transmq = &l->transmq; | |
764 | struct sk_buff_head *backlogq = &l->backlogq; | |
765 | struct sk_buff *skb, *_skb, *bskb; | |
766 | ||
767 | /* Match msg importance against this and all higher backlog limits: */ | |
768 | for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) { | |
769 | if (unlikely(l->backlog[i].len >= l->backlog[i].limit)) | |
770 | return link_schedule_user(l, list); | |
771 | } | |
772 | if (unlikely(msg_size(hdr) > mtu)) | |
773 | return -EMSGSIZE; | |
774 | ||
775 | /* Prepare each packet for sending, and add to relevant queue: */ | |
776 | while (skb_queue_len(list)) { | |
777 | skb = skb_peek(list); | |
778 | hdr = buf_msg(skb); | |
779 | msg_set_seqno(hdr, seqno); | |
780 | msg_set_ack(hdr, ack); | |
781 | msg_set_bcast_ack(hdr, bc_last_in); | |
782 | ||
783 | if (likely(skb_queue_len(transmq) < maxwin)) { | |
784 | _skb = skb_clone(skb, GFP_ATOMIC); | |
785 | if (!_skb) | |
786 | return -ENOBUFS; | |
787 | __skb_dequeue(list); | |
788 | __skb_queue_tail(transmq, skb); | |
789 | __skb_queue_tail(xmitq, _skb); | |
790 | TIPC_SKB_CB(skb)->ackers = l->ackers; | |
791 | l->rcv_unacked = 0; | |
792 | seqno++; | |
793 | continue; | |
794 | } | |
795 | if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) { | |
796 | kfree_skb(__skb_dequeue(list)); | |
797 | l->stats.sent_bundled++; | |
798 | continue; | |
799 | } | |
800 | if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) { | |
801 | kfree_skb(__skb_dequeue(list)); | |
802 | __skb_queue_tail(backlogq, bskb); | |
803 | l->backlog[msg_importance(buf_msg(bskb))].len++; | |
804 | l->stats.sent_bundled++; | |
805 | l->stats.sent_bundles++; | |
806 | continue; | |
807 | } | |
808 | l->backlog[imp].len += skb_queue_len(list); | |
809 | skb_queue_splice_tail_init(list, backlogq); | |
810 | } | |
811 | l->snd_nxt = seqno; | |
812 | return 0; | |
813 | } | |
814 | ||
815 | /* | |
816 | * tipc_link_sync_rcv - synchronize broadcast link endpoints. | |
817 | * Receive the sequence number where we should start receiving and | |
818 | * acking broadcast packets from a newly added peer node, and open | |
819 | * up for reception of such packets. | |
820 | * | |
821 | * Called with node locked | |
822 | */ | |
823 | static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf) | |
824 | { | |
825 | struct tipc_msg *msg = buf_msg(buf); | |
826 | ||
827 | n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg); | |
828 | n->bclink.recv_permitted = true; | |
829 | kfree_skb(buf); | |
830 | } | |
831 | ||
832 | /* | |
833 | * tipc_link_push_packets - push unsent packets to bearer | |
834 | * | |
835 | * Push out the unsent messages of a link where congestion | |
836 | * has abated. Node is locked. | |
837 | * | |
838 | * Called with node locked | |
839 | */ | |
840 | void tipc_link_push_packets(struct tipc_link *link) | |
841 | { | |
842 | struct sk_buff *skb; | |
843 | struct tipc_msg *msg; | |
844 | u16 seqno = link->snd_nxt; | |
845 | u16 ack = mod(link->rcv_nxt - 1); | |
846 | ||
847 | while (skb_queue_len(&link->transmq) < link->window) { | |
848 | skb = __skb_dequeue(&link->backlogq); | |
849 | if (!skb) | |
850 | break; | |
851 | TIPC_SKB_CB(skb)->ackers = link->ackers; | |
852 | msg = buf_msg(skb); | |
853 | link->backlog[msg_importance(msg)].len--; | |
854 | msg_set_ack(msg, ack); | |
855 | msg_set_seqno(msg, seqno); | |
856 | seqno = mod(seqno + 1); | |
857 | msg_set_bcast_ack(msg, link->owner->bclink.last_in); | |
858 | link->rcv_unacked = 0; | |
859 | __skb_queue_tail(&link->transmq, skb); | |
860 | tipc_bearer_send(link->owner->net, link->bearer_id, | |
861 | skb, link->media_addr); | |
862 | } | |
863 | link->snd_nxt = seqno; | |
864 | } | |
865 | ||
866 | void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq) | |
867 | { | |
868 | struct sk_buff *skb, *_skb; | |
869 | struct tipc_msg *hdr; | |
870 | u16 seqno = l->snd_nxt; | |
871 | u16 ack = l->rcv_nxt - 1; | |
872 | ||
873 | while (skb_queue_len(&l->transmq) < l->window) { | |
874 | skb = skb_peek(&l->backlogq); | |
875 | if (!skb) | |
876 | break; | |
877 | _skb = skb_clone(skb, GFP_ATOMIC); | |
878 | if (!_skb) | |
879 | break; | |
880 | __skb_dequeue(&l->backlogq); | |
881 | hdr = buf_msg(skb); | |
882 | l->backlog[msg_importance(hdr)].len--; | |
883 | __skb_queue_tail(&l->transmq, skb); | |
884 | __skb_queue_tail(xmitq, _skb); | |
885 | TIPC_SKB_CB(skb)->ackers = l->ackers; | |
886 | msg_set_ack(hdr, ack); | |
887 | msg_set_seqno(hdr, seqno); | |
888 | msg_set_bcast_ack(hdr, l->owner->bclink.last_in); | |
889 | l->rcv_unacked = 0; | |
890 | seqno++; | |
891 | } | |
892 | l->snd_nxt = seqno; | |
893 | } | |
894 | ||
895 | static void link_retransmit_failure(struct tipc_link *l_ptr, | |
896 | struct sk_buff *buf) | |
897 | { | |
898 | struct tipc_msg *msg = buf_msg(buf); | |
899 | struct net *net = l_ptr->owner->net; | |
900 | ||
901 | pr_warn("Retransmission failure on link <%s>\n", l_ptr->name); | |
902 | ||
903 | if (l_ptr->addr) { | |
904 | /* Handle failure on standard link */ | |
905 | link_print(l_ptr, "Resetting link "); | |
906 | pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n", | |
907 | msg_user(msg), msg_type(msg), msg_size(msg), | |
908 | msg_errcode(msg)); | |
909 | pr_info("sqno %u, prev: %x, src: %x\n", | |
910 | msg_seqno(msg), msg_prevnode(msg), msg_orignode(msg)); | |
911 | } else { | |
912 | /* Handle failure on broadcast link */ | |
913 | struct tipc_node *n_ptr; | |
914 | char addr_string[16]; | |
915 | ||
916 | pr_info("Msg seq number: %u, ", msg_seqno(msg)); | |
917 | pr_cont("Outstanding acks: %u\n", TIPC_SKB_CB(buf)->ackers); | |
918 | ||
919 | n_ptr = tipc_bclink_retransmit_to(net); | |
920 | ||
921 | tipc_addr_string_fill(addr_string, n_ptr->addr); | |
922 | pr_info("Broadcast link info for %s\n", addr_string); | |
923 | pr_info("Reception permitted: %d, Acked: %u\n", | |
924 | n_ptr->bclink.recv_permitted, | |
925 | n_ptr->bclink.acked); | |
926 | pr_info("Last in: %u, Oos state: %u, Last sent: %u\n", | |
927 | n_ptr->bclink.last_in, | |
928 | n_ptr->bclink.oos_state, | |
929 | n_ptr->bclink.last_sent); | |
930 | ||
931 | n_ptr->action_flags |= TIPC_BCAST_RESET; | |
932 | l_ptr->stale_count = 0; | |
933 | } | |
934 | } | |
935 | ||
936 | void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb, | |
937 | u32 retransmits) | |
938 | { | |
939 | struct tipc_msg *msg; | |
940 | ||
941 | if (!skb) | |
942 | return; | |
943 | ||
944 | msg = buf_msg(skb); | |
945 | ||
946 | /* Detect repeated retransmit failures */ | |
947 | if (l_ptr->last_retransm == msg_seqno(msg)) { | |
948 | if (++l_ptr->stale_count > 100) { | |
949 | link_retransmit_failure(l_ptr, skb); | |
950 | return; | |
951 | } | |
952 | } else { | |
953 | l_ptr->last_retransm = msg_seqno(msg); | |
954 | l_ptr->stale_count = 1; | |
955 | } | |
956 | ||
957 | skb_queue_walk_from(&l_ptr->transmq, skb) { | |
958 | if (!retransmits) | |
959 | break; | |
960 | msg = buf_msg(skb); | |
961 | msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1)); | |
962 | msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); | |
963 | tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb, | |
964 | l_ptr->media_addr); | |
965 | retransmits--; | |
966 | l_ptr->stats.retransmitted++; | |
967 | } | |
968 | } | |
969 | ||
970 | int tipc_link_retrans(struct tipc_link *l, u16 from, u16 to, | |
971 | struct sk_buff_head *xmitq) | |
972 | { | |
973 | struct sk_buff *_skb, *skb = skb_peek(&l->transmq); | |
974 | struct tipc_msg *hdr; | |
975 | u16 ack = l->rcv_nxt - 1; | |
976 | u16 bc_ack = l->owner->bclink.last_in; | |
977 | ||
978 | if (!skb) | |
979 | return 0; | |
980 | ||
981 | /* Detect repeated retransmit failures on same packet */ | |
982 | if (likely(l->last_retransm != buf_seqno(skb))) { | |
983 | l->last_retransm = buf_seqno(skb); | |
984 | l->stale_count = 1; | |
985 | } else if (++l->stale_count > 100) { | |
986 | link_retransmit_failure(l, skb); | |
987 | return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); | |
988 | } | |
989 | ||
990 | /* Move forward to where retransmission should start */ | |
991 | skb_queue_walk(&l->transmq, skb) { | |
992 | if (!less(buf_seqno(skb), from)) | |
993 | break; | |
994 | } | |
995 | ||
996 | skb_queue_walk_from(&l->transmq, skb) { | |
997 | if (more(buf_seqno(skb), to)) | |
998 | break; | |
999 | hdr = buf_msg(skb); | |
1000 | _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC); | |
1001 | if (!_skb) | |
1002 | return 0; | |
1003 | hdr = buf_msg(_skb); | |
1004 | msg_set_ack(hdr, ack); | |
1005 | msg_set_bcast_ack(hdr, bc_ack); | |
1006 | _skb->priority = TC_PRIO_CONTROL; | |
1007 | __skb_queue_tail(xmitq, _skb); | |
1008 | l->stats.retransmitted++; | |
1009 | } | |
1010 | return 0; | |
1011 | } | |
1012 | ||
1013 | /* tipc_data_input - deliver data and name distr msgs to upper layer | |
1014 | * | |
1015 | * Consumes buffer if message is of right type | |
1016 | * Node lock must be held | |
1017 | */ | |
1018 | static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb, | |
1019 | struct sk_buff_head *inputq) | |
1020 | { | |
1021 | struct tipc_node *node = link->owner; | |
1022 | ||
1023 | switch (msg_user(buf_msg(skb))) { | |
1024 | case TIPC_LOW_IMPORTANCE: | |
1025 | case TIPC_MEDIUM_IMPORTANCE: | |
1026 | case TIPC_HIGH_IMPORTANCE: | |
1027 | case TIPC_CRITICAL_IMPORTANCE: | |
1028 | case CONN_MANAGER: | |
1029 | skb_queue_tail(inputq, skb); | |
1030 | return true; | |
1031 | case NAME_DISTRIBUTOR: | |
1032 | node->bclink.recv_permitted = true; | |
1033 | skb_queue_tail(link->namedq, skb); | |
1034 | return true; | |
1035 | case MSG_BUNDLER: | |
1036 | case TUNNEL_PROTOCOL: | |
1037 | case MSG_FRAGMENTER: | |
1038 | case BCAST_PROTOCOL: | |
1039 | return false; | |
1040 | default: | |
1041 | pr_warn("Dropping received illegal msg type\n"); | |
1042 | kfree_skb(skb); | |
1043 | return false; | |
1044 | }; | |
1045 | } | |
1046 | ||
1047 | /* tipc_link_input - process packet that has passed link protocol check | |
1048 | * | |
1049 | * Consumes buffer | |
1050 | */ | |
1051 | static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb, | |
1052 | struct sk_buff_head *inputq) | |
1053 | { | |
1054 | struct tipc_node *node = l->owner; | |
1055 | struct tipc_msg *hdr = buf_msg(skb); | |
1056 | struct sk_buff **reasm_skb = &l->reasm_buf; | |
1057 | struct sk_buff *iskb; | |
1058 | struct sk_buff_head tmpq; | |
1059 | int usr = msg_user(hdr); | |
1060 | int rc = 0; | |
1061 | int pos = 0; | |
1062 | int ipos = 0; | |
1063 | ||
1064 | if (unlikely(usr == TUNNEL_PROTOCOL)) { | |
1065 | if (msg_type(hdr) == SYNCH_MSG) { | |
1066 | __skb_queue_purge(&l->deferdq); | |
1067 | goto drop; | |
1068 | } | |
1069 | if (!tipc_msg_extract(skb, &iskb, &ipos)) | |
1070 | return rc; | |
1071 | kfree_skb(skb); | |
1072 | skb = iskb; | |
1073 | hdr = buf_msg(skb); | |
1074 | if (less(msg_seqno(hdr), l->drop_point)) | |
1075 | goto drop; | |
1076 | if (tipc_data_input(l, skb, inputq)) | |
1077 | return rc; | |
1078 | usr = msg_user(hdr); | |
1079 | reasm_skb = &l->failover_reasm_skb; | |
1080 | } | |
1081 | ||
1082 | if (usr == MSG_BUNDLER) { | |
1083 | skb_queue_head_init(&tmpq); | |
1084 | l->stats.recv_bundles++; | |
1085 | l->stats.recv_bundled += msg_msgcnt(hdr); | |
1086 | while (tipc_msg_extract(skb, &iskb, &pos)) | |
1087 | tipc_data_input(l, iskb, &tmpq); | |
1088 | tipc_skb_queue_splice_tail(&tmpq, inputq); | |
1089 | return 0; | |
1090 | } else if (usr == MSG_FRAGMENTER) { | |
1091 | l->stats.recv_fragments++; | |
1092 | if (tipc_buf_append(reasm_skb, &skb)) { | |
1093 | l->stats.recv_fragmented++; | |
1094 | tipc_data_input(l, skb, inputq); | |
1095 | } else if (!*reasm_skb) { | |
1096 | return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); | |
1097 | } | |
1098 | return 0; | |
1099 | } else if (usr == BCAST_PROTOCOL) { | |
1100 | tipc_link_sync_rcv(node, skb); | |
1101 | return 0; | |
1102 | } | |
1103 | drop: | |
1104 | kfree_skb(skb); | |
1105 | return 0; | |
1106 | } | |
1107 | ||
1108 | static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked) | |
1109 | { | |
1110 | bool released = false; | |
1111 | struct sk_buff *skb, *tmp; | |
1112 | ||
1113 | skb_queue_walk_safe(&l->transmq, skb, tmp) { | |
1114 | if (more(buf_seqno(skb), acked)) | |
1115 | break; | |
1116 | __skb_unlink(skb, &l->transmq); | |
1117 | kfree_skb(skb); | |
1118 | released = true; | |
1119 | } | |
1120 | return released; | |
1121 | } | |
1122 | ||
1123 | /* tipc_link_build_ack_msg: prepare link acknowledge message for transmission | |
1124 | */ | |
1125 | void tipc_link_build_ack_msg(struct tipc_link *l, struct sk_buff_head *xmitq) | |
1126 | { | |
1127 | l->rcv_unacked = 0; | |
1128 | l->stats.sent_acks++; | |
1129 | tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq); | |
1130 | } | |
1131 | ||
1132 | /* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message | |
1133 | */ | |
1134 | void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq) | |
1135 | { | |
1136 | int mtyp = RESET_MSG; | |
1137 | ||
1138 | if (l->state == LINK_ESTABLISHING) | |
1139 | mtyp = ACTIVATE_MSG; | |
1140 | ||
1141 | tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, xmitq); | |
1142 | } | |
1143 | ||
1144 | /* tipc_link_build_nack_msg: prepare link nack message for transmission | |
1145 | */ | |
1146 | static void tipc_link_build_nack_msg(struct tipc_link *l, | |
1147 | struct sk_buff_head *xmitq) | |
1148 | { | |
1149 | u32 def_cnt = ++l->stats.deferred_recv; | |
1150 | ||
1151 | if ((skb_queue_len(&l->deferdq) == 1) || !(def_cnt % TIPC_NACK_INTV)) | |
1152 | tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq); | |
1153 | } | |
1154 | ||
1155 | /* tipc_link_rcv - process TIPC packets/messages arriving from off-node | |
1156 | * @l: the link that should handle the message | |
1157 | * @skb: TIPC packet | |
1158 | * @xmitq: queue to place packets to be sent after this call | |
1159 | */ | |
1160 | int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb, | |
1161 | struct sk_buff_head *xmitq) | |
1162 | { | |
1163 | struct sk_buff_head *defq = &l->deferdq; | |
1164 | struct tipc_msg *hdr; | |
1165 | u16 seqno, rcv_nxt, win_lim; | |
1166 | int rc = 0; | |
1167 | ||
1168 | do { | |
1169 | hdr = buf_msg(skb); | |
1170 | seqno = msg_seqno(hdr); | |
1171 | rcv_nxt = l->rcv_nxt; | |
1172 | win_lim = rcv_nxt + TIPC_MAX_LINK_WIN; | |
1173 | ||
1174 | /* Verify and update link state */ | |
1175 | if (unlikely(msg_user(hdr) == LINK_PROTOCOL)) | |
1176 | return tipc_link_proto_rcv(l, skb, xmitq); | |
1177 | ||
1178 | if (unlikely(!link_is_up(l))) { | |
1179 | if (l->state == LINK_ESTABLISHING) | |
1180 | rc = TIPC_LINK_UP_EVT; | |
1181 | goto drop; | |
1182 | } | |
1183 | ||
1184 | /* Don't send probe at next timeout expiration */ | |
1185 | l->silent_intv_cnt = 0; | |
1186 | ||
1187 | /* Drop if outside receive window */ | |
1188 | if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) { | |
1189 | l->stats.duplicates++; | |
1190 | goto drop; | |
1191 | } | |
1192 | ||
1193 | /* Forward queues and wake up waiting users */ | |
1194 | if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) { | |
1195 | tipc_link_advance_backlog(l, xmitq); | |
1196 | if (unlikely(!skb_queue_empty(&l->wakeupq))) | |
1197 | link_prepare_wakeup(l); | |
1198 | } | |
1199 | ||
1200 | /* Defer delivery if sequence gap */ | |
1201 | if (unlikely(seqno != rcv_nxt)) { | |
1202 | __tipc_skb_queue_sorted(defq, seqno, skb); | |
1203 | tipc_link_build_nack_msg(l, xmitq); | |
1204 | break; | |
1205 | } | |
1206 | ||
1207 | /* Deliver packet */ | |
1208 | l->rcv_nxt++; | |
1209 | l->stats.recv_info++; | |
1210 | if (!tipc_data_input(l, skb, l->inputq)) | |
1211 | rc = tipc_link_input(l, skb, l->inputq); | |
1212 | if (unlikely(rc)) | |
1213 | break; | |
1214 | if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN)) | |
1215 | tipc_link_build_ack_msg(l, xmitq); | |
1216 | ||
1217 | } while ((skb = __skb_dequeue(defq))); | |
1218 | ||
1219 | return rc; | |
1220 | drop: | |
1221 | kfree_skb(skb); | |
1222 | return rc; | |
1223 | } | |
1224 | ||
1225 | /** | |
1226 | * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue | |
1227 | * | |
1228 | * Returns increase in queue length (i.e. 0 or 1) | |
1229 | */ | |
1230 | u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb) | |
1231 | { | |
1232 | struct sk_buff *skb1; | |
1233 | u16 seq_no = buf_seqno(skb); | |
1234 | ||
1235 | /* Empty queue ? */ | |
1236 | if (skb_queue_empty(list)) { | |
1237 | __skb_queue_tail(list, skb); | |
1238 | return 1; | |
1239 | } | |
1240 | ||
1241 | /* Last ? */ | |
1242 | if (less(buf_seqno(skb_peek_tail(list)), seq_no)) { | |
1243 | __skb_queue_tail(list, skb); | |
1244 | return 1; | |
1245 | } | |
1246 | ||
1247 | /* Locate insertion point in queue, then insert; discard if duplicate */ | |
1248 | skb_queue_walk(list, skb1) { | |
1249 | u16 curr_seqno = buf_seqno(skb1); | |
1250 | ||
1251 | if (seq_no == curr_seqno) { | |
1252 | kfree_skb(skb); | |
1253 | return 0; | |
1254 | } | |
1255 | ||
1256 | if (less(seq_no, curr_seqno)) | |
1257 | break; | |
1258 | } | |
1259 | ||
1260 | __skb_queue_before(list, skb1, skb); | |
1261 | return 1; | |
1262 | } | |
1263 | ||
1264 | /* | |
1265 | * Send protocol message to the other endpoint. | |
1266 | */ | |
1267 | void tipc_link_proto_xmit(struct tipc_link *l, u32 msg_typ, int probe_msg, | |
1268 | u32 gap, u32 tolerance, u32 priority) | |
1269 | { | |
1270 | struct sk_buff *skb = NULL; | |
1271 | struct sk_buff_head xmitq; | |
1272 | ||
1273 | __skb_queue_head_init(&xmitq); | |
1274 | tipc_link_build_proto_msg(l, msg_typ, probe_msg, gap, | |
1275 | tolerance, priority, &xmitq); | |
1276 | skb = __skb_dequeue(&xmitq); | |
1277 | if (!skb) | |
1278 | return; | |
1279 | tipc_bearer_send(l->owner->net, l->bearer_id, skb, l->media_addr); | |
1280 | l->rcv_unacked = 0; | |
1281 | kfree_skb(skb); | |
1282 | } | |
1283 | ||
1284 | /* tipc_link_build_proto_msg: prepare link protocol message for transmission | |
1285 | */ | |
1286 | static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe, | |
1287 | u16 rcvgap, int tolerance, int priority, | |
1288 | struct sk_buff_head *xmitq) | |
1289 | { | |
1290 | struct sk_buff *skb = NULL; | |
1291 | struct tipc_msg *hdr = l->pmsg; | |
1292 | u16 snd_nxt = l->snd_nxt; | |
1293 | u16 rcv_nxt = l->rcv_nxt; | |
1294 | u16 rcv_last = rcv_nxt - 1; | |
1295 | int node_up = l->owner->bclink.recv_permitted; | |
1296 | ||
1297 | /* Don't send protocol message during reset or link failover */ | |
1298 | if (tipc_link_is_blocked(l)) | |
1299 | return; | |
1300 | ||
1301 | msg_set_type(hdr, mtyp); | |
1302 | msg_set_net_plane(hdr, l->net_plane); | |
1303 | msg_set_bcast_ack(hdr, l->owner->bclink.last_in); | |
1304 | msg_set_last_bcast(hdr, tipc_bclink_get_last_sent(l->owner->net)); | |
1305 | msg_set_link_tolerance(hdr, tolerance); | |
1306 | msg_set_linkprio(hdr, priority); | |
1307 | msg_set_redundant_link(hdr, node_up); | |
1308 | msg_set_seq_gap(hdr, 0); | |
1309 | ||
1310 | /* Compatibility: created msg must not be in sequence with pkt flow */ | |
1311 | msg_set_seqno(hdr, snd_nxt + U16_MAX / 2); | |
1312 | ||
1313 | if (mtyp == STATE_MSG) { | |
1314 | if (!tipc_link_is_up(l)) | |
1315 | return; | |
1316 | msg_set_next_sent(hdr, snd_nxt); | |
1317 | ||
1318 | /* Override rcvgap if there are packets in deferred queue */ | |
1319 | if (!skb_queue_empty(&l->deferdq)) | |
1320 | rcvgap = buf_seqno(skb_peek(&l->deferdq)) - rcv_nxt; | |
1321 | if (rcvgap) { | |
1322 | msg_set_seq_gap(hdr, rcvgap); | |
1323 | l->stats.sent_nacks++; | |
1324 | } | |
1325 | msg_set_ack(hdr, rcv_last); | |
1326 | msg_set_probe(hdr, probe); | |
1327 | if (probe) | |
1328 | l->stats.sent_probes++; | |
1329 | l->stats.sent_states++; | |
1330 | } else { | |
1331 | /* RESET_MSG or ACTIVATE_MSG */ | |
1332 | msg_set_max_pkt(hdr, l->advertised_mtu); | |
1333 | msg_set_ack(hdr, l->rcv_nxt - 1); | |
1334 | msg_set_next_sent(hdr, 1); | |
1335 | } | |
1336 | skb = tipc_buf_acquire(msg_size(hdr)); | |
1337 | if (!skb) | |
1338 | return; | |
1339 | skb_copy_to_linear_data(skb, hdr, msg_size(hdr)); | |
1340 | skb->priority = TC_PRIO_CONTROL; | |
1341 | __skb_queue_tail(xmitq, skb); | |
1342 | } | |
1343 | ||
1344 | /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets | |
1345 | * with contents of the link's transmit and backlog queues. | |
1346 | */ | |
1347 | void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl, | |
1348 | int mtyp, struct sk_buff_head *xmitq) | |
1349 | { | |
1350 | struct sk_buff *skb, *tnlskb; | |
1351 | struct tipc_msg *hdr, tnlhdr; | |
1352 | struct sk_buff_head *queue = &l->transmq; | |
1353 | struct sk_buff_head tmpxq, tnlq; | |
1354 | u16 pktlen, pktcnt, seqno = l->snd_nxt; | |
1355 | ||
1356 | if (!tnl) | |
1357 | return; | |
1358 | ||
1359 | skb_queue_head_init(&tnlq); | |
1360 | skb_queue_head_init(&tmpxq); | |
1361 | ||
1362 | /* At least one packet required for safe algorithm => add dummy */ | |
1363 | skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG, | |
1364 | BASIC_H_SIZE, 0, l->addr, link_own_addr(l), | |
1365 | 0, 0, TIPC_ERR_NO_PORT); | |
1366 | if (!skb) { | |
1367 | pr_warn("%sunable to create tunnel packet\n", link_co_err); | |
1368 | return; | |
1369 | } | |
1370 | skb_queue_tail(&tnlq, skb); | |
1371 | tipc_link_xmit(l, &tnlq, &tmpxq); | |
1372 | __skb_queue_purge(&tmpxq); | |
1373 | ||
1374 | /* Initialize reusable tunnel packet header */ | |
1375 | tipc_msg_init(link_own_addr(l), &tnlhdr, TUNNEL_PROTOCOL, | |
1376 | mtyp, INT_H_SIZE, l->addr); | |
1377 | pktcnt = skb_queue_len(&l->transmq) + skb_queue_len(&l->backlogq); | |
1378 | msg_set_msgcnt(&tnlhdr, pktcnt); | |
1379 | msg_set_bearer_id(&tnlhdr, l->peer_bearer_id); | |
1380 | tnl: | |
1381 | /* Wrap each packet into a tunnel packet */ | |
1382 | skb_queue_walk(queue, skb) { | |
1383 | hdr = buf_msg(skb); | |
1384 | if (queue == &l->backlogq) | |
1385 | msg_set_seqno(hdr, seqno++); | |
1386 | pktlen = msg_size(hdr); | |
1387 | msg_set_size(&tnlhdr, pktlen + INT_H_SIZE); | |
1388 | tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE); | |
1389 | if (!tnlskb) { | |
1390 | pr_warn("%sunable to send packet\n", link_co_err); | |
1391 | return; | |
1392 | } | |
1393 | skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE); | |
1394 | skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen); | |
1395 | __skb_queue_tail(&tnlq, tnlskb); | |
1396 | } | |
1397 | if (queue != &l->backlogq) { | |
1398 | queue = &l->backlogq; | |
1399 | goto tnl; | |
1400 | } | |
1401 | ||
1402 | tipc_link_xmit(tnl, &tnlq, xmitq); | |
1403 | ||
1404 | if (mtyp == FAILOVER_MSG) { | |
1405 | tnl->drop_point = l->rcv_nxt; | |
1406 | tnl->failover_reasm_skb = l->reasm_buf; | |
1407 | l->reasm_buf = NULL; | |
1408 | } | |
1409 | } | |
1410 | ||
1411 | /* tipc_link_proto_rcv(): receive link level protocol message : | |
1412 | * Note that network plane id propagates through the network, and may | |
1413 | * change at any time. The node with lowest numerical id determines | |
1414 | * network plane | |
1415 | */ | |
1416 | static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, | |
1417 | struct sk_buff_head *xmitq) | |
1418 | { | |
1419 | struct tipc_msg *hdr = buf_msg(skb); | |
1420 | u16 rcvgap = 0; | |
1421 | u16 ack = msg_ack(hdr); | |
1422 | u16 gap = msg_seq_gap(hdr); | |
1423 | u16 peers_snd_nxt = msg_next_sent(hdr); | |
1424 | u16 peers_tol = msg_link_tolerance(hdr); | |
1425 | u16 peers_prio = msg_linkprio(hdr); | |
1426 | u16 rcv_nxt = l->rcv_nxt; | |
1427 | int mtyp = msg_type(hdr); | |
1428 | char *if_name; | |
1429 | int rc = 0; | |
1430 | ||
1431 | if (tipc_link_is_blocked(l)) | |
1432 | goto exit; | |
1433 | ||
1434 | if (link_own_addr(l) > msg_prevnode(hdr)) | |
1435 | l->net_plane = msg_net_plane(hdr); | |
1436 | ||
1437 | switch (mtyp) { | |
1438 | case RESET_MSG: | |
1439 | ||
1440 | /* Ignore duplicate RESET with old session number */ | |
1441 | if ((less_eq(msg_session(hdr), l->peer_session)) && | |
1442 | (l->peer_session != WILDCARD_SESSION)) | |
1443 | break; | |
1444 | /* fall thru' */ | |
1445 | ||
1446 | case ACTIVATE_MSG: | |
1447 | ||
1448 | /* Complete own link name with peer's interface name */ | |
1449 | if_name = strrchr(l->name, ':') + 1; | |
1450 | if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME) | |
1451 | break; | |
1452 | if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME) | |
1453 | break; | |
1454 | strncpy(if_name, msg_data(hdr), TIPC_MAX_IF_NAME); | |
1455 | ||
1456 | /* Update own tolerance if peer indicates a non-zero value */ | |
1457 | if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) | |
1458 | l->tolerance = peers_tol; | |
1459 | ||
1460 | /* Update own priority if peer's priority is higher */ | |
1461 | if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI)) | |
1462 | l->priority = peers_prio; | |
1463 | ||
1464 | /* ACTIVATE_MSG serves as PEER_RESET if link is already down */ | |
1465 | if ((mtyp == RESET_MSG) || !link_is_up(l)) | |
1466 | rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT); | |
1467 | ||
1468 | /* ACTIVATE_MSG takes up link if it was already locally reset */ | |
1469 | if ((mtyp == ACTIVATE_MSG) && (l->state == LINK_ESTABLISHING)) | |
1470 | rc = TIPC_LINK_UP_EVT; | |
1471 | ||
1472 | l->peer_session = msg_session(hdr); | |
1473 | l->peer_bearer_id = msg_bearer_id(hdr); | |
1474 | if (l->mtu > msg_max_pkt(hdr)) | |
1475 | l->mtu = msg_max_pkt(hdr); | |
1476 | break; | |
1477 | ||
1478 | case STATE_MSG: | |
1479 | ||
1480 | /* Update own tolerance if peer indicates a non-zero value */ | |
1481 | if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) | |
1482 | l->tolerance = peers_tol; | |
1483 | ||
1484 | l->silent_intv_cnt = 0; | |
1485 | l->stats.recv_states++; | |
1486 | if (msg_probe(hdr)) | |
1487 | l->stats.recv_probes++; | |
1488 | ||
1489 | if (!link_is_up(l)) { | |
1490 | if (l->state == LINK_ESTABLISHING) | |
1491 | rc = TIPC_LINK_UP_EVT; | |
1492 | break; | |
1493 | } | |
1494 | ||
1495 | /* Send NACK if peer has sent pkts we haven't received yet */ | |
1496 | if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l)) | |
1497 | rcvgap = peers_snd_nxt - l->rcv_nxt; | |
1498 | if (rcvgap || (msg_probe(hdr))) | |
1499 | tipc_link_build_proto_msg(l, STATE_MSG, 0, rcvgap, | |
1500 | 0, 0, xmitq); | |
1501 | tipc_link_release_pkts(l, ack); | |
1502 | ||
1503 | /* If NACK, retransmit will now start at right position */ | |
1504 | if (gap) { | |
1505 | rc = tipc_link_retrans(l, ack + 1, ack + gap, xmitq); | |
1506 | l->stats.recv_nacks++; | |
1507 | } | |
1508 | ||
1509 | tipc_link_advance_backlog(l, xmitq); | |
1510 | if (unlikely(!skb_queue_empty(&l->wakeupq))) | |
1511 | link_prepare_wakeup(l); | |
1512 | } | |
1513 | exit: | |
1514 | kfree_skb(skb); | |
1515 | return rc; | |
1516 | } | |
1517 | ||
1518 | void tipc_link_set_queue_limits(struct tipc_link *l, u32 win) | |
1519 | { | |
1520 | int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE); | |
1521 | ||
1522 | l->window = win; | |
1523 | l->backlog[TIPC_LOW_IMPORTANCE].limit = win / 2; | |
1524 | l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = win; | |
1525 | l->backlog[TIPC_HIGH_IMPORTANCE].limit = win / 2 * 3; | |
1526 | l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = win * 2; | |
1527 | l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk; | |
1528 | } | |
1529 | ||
1530 | /* tipc_link_find_owner - locate owner node of link by link's name | |
1531 | * @net: the applicable net namespace | |
1532 | * @name: pointer to link name string | |
1533 | * @bearer_id: pointer to index in 'node->links' array where the link was found. | |
1534 | * | |
1535 | * Returns pointer to node owning the link, or 0 if no matching link is found. | |
1536 | */ | |
1537 | static struct tipc_node *tipc_link_find_owner(struct net *net, | |
1538 | const char *link_name, | |
1539 | unsigned int *bearer_id) | |
1540 | { | |
1541 | struct tipc_net *tn = net_generic(net, tipc_net_id); | |
1542 | struct tipc_link *l_ptr; | |
1543 | struct tipc_node *n_ptr; | |
1544 | struct tipc_node *found_node = NULL; | |
1545 | int i; | |
1546 | ||
1547 | *bearer_id = 0; | |
1548 | rcu_read_lock(); | |
1549 | list_for_each_entry_rcu(n_ptr, &tn->node_list, list) { | |
1550 | tipc_node_lock(n_ptr); | |
1551 | for (i = 0; i < MAX_BEARERS; i++) { | |
1552 | l_ptr = n_ptr->links[i].link; | |
1553 | if (l_ptr && !strcmp(l_ptr->name, link_name)) { | |
1554 | *bearer_id = i; | |
1555 | found_node = n_ptr; | |
1556 | break; | |
1557 | } | |
1558 | } | |
1559 | tipc_node_unlock(n_ptr); | |
1560 | if (found_node) | |
1561 | break; | |
1562 | } | |
1563 | rcu_read_unlock(); | |
1564 | ||
1565 | return found_node; | |
1566 | } | |
1567 | ||
1568 | /** | |
1569 | * link_reset_statistics - reset link statistics | |
1570 | * @l_ptr: pointer to link | |
1571 | */ | |
1572 | static void link_reset_statistics(struct tipc_link *l_ptr) | |
1573 | { | |
1574 | memset(&l_ptr->stats, 0, sizeof(l_ptr->stats)); | |
1575 | l_ptr->stats.sent_info = l_ptr->snd_nxt; | |
1576 | l_ptr->stats.recv_info = l_ptr->rcv_nxt; | |
1577 | } | |
1578 | ||
1579 | static void link_print(struct tipc_link *l, const char *str) | |
1580 | { | |
1581 | struct sk_buff *hskb = skb_peek(&l->transmq); | |
1582 | u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1; | |
1583 | u16 tail = l->snd_nxt - 1; | |
1584 | ||
1585 | pr_info("%s Link <%s> state %x\n", str, l->name, l->state); | |
1586 | pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n", | |
1587 | skb_queue_len(&l->transmq), head, tail, | |
1588 | skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt); | |
1589 | } | |
1590 | ||
1591 | /* Parse and validate nested (link) properties valid for media, bearer and link | |
1592 | */ | |
1593 | int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[]) | |
1594 | { | |
1595 | int err; | |
1596 | ||
1597 | err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop, | |
1598 | tipc_nl_prop_policy); | |
1599 | if (err) | |
1600 | return err; | |
1601 | ||
1602 | if (props[TIPC_NLA_PROP_PRIO]) { | |
1603 | u32 prio; | |
1604 | ||
1605 | prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); | |
1606 | if (prio > TIPC_MAX_LINK_PRI) | |
1607 | return -EINVAL; | |
1608 | } | |
1609 | ||
1610 | if (props[TIPC_NLA_PROP_TOL]) { | |
1611 | u32 tol; | |
1612 | ||
1613 | tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]); | |
1614 | if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL)) | |
1615 | return -EINVAL; | |
1616 | } | |
1617 | ||
1618 | if (props[TIPC_NLA_PROP_WIN]) { | |
1619 | u32 win; | |
1620 | ||
1621 | win = nla_get_u32(props[TIPC_NLA_PROP_WIN]); | |
1622 | if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN)) | |
1623 | return -EINVAL; | |
1624 | } | |
1625 | ||
1626 | return 0; | |
1627 | } | |
1628 | ||
1629 | int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info) | |
1630 | { | |
1631 | int err; | |
1632 | int res = 0; | |
1633 | int bearer_id; | |
1634 | char *name; | |
1635 | struct tipc_link *link; | |
1636 | struct tipc_node *node; | |
1637 | struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; | |
1638 | struct net *net = sock_net(skb->sk); | |
1639 | ||
1640 | if (!info->attrs[TIPC_NLA_LINK]) | |
1641 | return -EINVAL; | |
1642 | ||
1643 | err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX, | |
1644 | info->attrs[TIPC_NLA_LINK], | |
1645 | tipc_nl_link_policy); | |
1646 | if (err) | |
1647 | return err; | |
1648 | ||
1649 | if (!attrs[TIPC_NLA_LINK_NAME]) | |
1650 | return -EINVAL; | |
1651 | ||
1652 | name = nla_data(attrs[TIPC_NLA_LINK_NAME]); | |
1653 | ||
1654 | if (strcmp(name, tipc_bclink_name) == 0) | |
1655 | return tipc_nl_bc_link_set(net, attrs); | |
1656 | ||
1657 | node = tipc_link_find_owner(net, name, &bearer_id); | |
1658 | if (!node) | |
1659 | return -EINVAL; | |
1660 | ||
1661 | tipc_node_lock(node); | |
1662 | ||
1663 | link = node->links[bearer_id].link; | |
1664 | if (!link) { | |
1665 | res = -EINVAL; | |
1666 | goto out; | |
1667 | } | |
1668 | ||
1669 | if (attrs[TIPC_NLA_LINK_PROP]) { | |
1670 | struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; | |
1671 | ||
1672 | err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], | |
1673 | props); | |
1674 | if (err) { | |
1675 | res = err; | |
1676 | goto out; | |
1677 | } | |
1678 | ||
1679 | if (props[TIPC_NLA_PROP_TOL]) { | |
1680 | u32 tol; | |
1681 | ||
1682 | tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]); | |
1683 | link->tolerance = tol; | |
1684 | tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0); | |
1685 | } | |
1686 | if (props[TIPC_NLA_PROP_PRIO]) { | |
1687 | u32 prio; | |
1688 | ||
1689 | prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); | |
1690 | link->priority = prio; | |
1691 | tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio); | |
1692 | } | |
1693 | if (props[TIPC_NLA_PROP_WIN]) { | |
1694 | u32 win; | |
1695 | ||
1696 | win = nla_get_u32(props[TIPC_NLA_PROP_WIN]); | |
1697 | tipc_link_set_queue_limits(link, win); | |
1698 | } | |
1699 | } | |
1700 | ||
1701 | out: | |
1702 | tipc_node_unlock(node); | |
1703 | ||
1704 | return res; | |
1705 | } | |
1706 | ||
1707 | static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s) | |
1708 | { | |
1709 | int i; | |
1710 | struct nlattr *stats; | |
1711 | ||
1712 | struct nla_map { | |
1713 | u32 key; | |
1714 | u32 val; | |
1715 | }; | |
1716 | ||
1717 | struct nla_map map[] = { | |
1718 | {TIPC_NLA_STATS_RX_INFO, s->recv_info}, | |
1719 | {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments}, | |
1720 | {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented}, | |
1721 | {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles}, | |
1722 | {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled}, | |
1723 | {TIPC_NLA_STATS_TX_INFO, s->sent_info}, | |
1724 | {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments}, | |
1725 | {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented}, | |
1726 | {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles}, | |
1727 | {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled}, | |
1728 | {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ? | |
1729 | s->msg_length_counts : 1}, | |
1730 | {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts}, | |
1731 | {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total}, | |
1732 | {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]}, | |
1733 | {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]}, | |
1734 | {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]}, | |
1735 | {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]}, | |
1736 | {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]}, | |
1737 | {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]}, | |
1738 | {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]}, | |
1739 | {TIPC_NLA_STATS_RX_STATES, s->recv_states}, | |
1740 | {TIPC_NLA_STATS_RX_PROBES, s->recv_probes}, | |
1741 | {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks}, | |
1742 | {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv}, | |
1743 | {TIPC_NLA_STATS_TX_STATES, s->sent_states}, | |
1744 | {TIPC_NLA_STATS_TX_PROBES, s->sent_probes}, | |
1745 | {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks}, | |
1746 | {TIPC_NLA_STATS_TX_ACKS, s->sent_acks}, | |
1747 | {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted}, | |
1748 | {TIPC_NLA_STATS_DUPLICATES, s->duplicates}, | |
1749 | {TIPC_NLA_STATS_LINK_CONGS, s->link_congs}, | |
1750 | {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz}, | |
1751 | {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ? | |
1752 | (s->accu_queue_sz / s->queue_sz_counts) : 0} | |
1753 | }; | |
1754 | ||
1755 | stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS); | |
1756 | if (!stats) | |
1757 | return -EMSGSIZE; | |
1758 | ||
1759 | for (i = 0; i < ARRAY_SIZE(map); i++) | |
1760 | if (nla_put_u32(skb, map[i].key, map[i].val)) | |
1761 | goto msg_full; | |
1762 | ||
1763 | nla_nest_end(skb, stats); | |
1764 | ||
1765 | return 0; | |
1766 | msg_full: | |
1767 | nla_nest_cancel(skb, stats); | |
1768 | ||
1769 | return -EMSGSIZE; | |
1770 | } | |
1771 | ||
1772 | /* Caller should hold appropriate locks to protect the link */ | |
1773 | static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg, | |
1774 | struct tipc_link *link, int nlflags) | |
1775 | { | |
1776 | int err; | |
1777 | void *hdr; | |
1778 | struct nlattr *attrs; | |
1779 | struct nlattr *prop; | |
1780 | struct tipc_net *tn = net_generic(net, tipc_net_id); | |
1781 | ||
1782 | hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, | |
1783 | nlflags, TIPC_NL_LINK_GET); | |
1784 | if (!hdr) | |
1785 | return -EMSGSIZE; | |
1786 | ||
1787 | attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK); | |
1788 | if (!attrs) | |
1789 | goto msg_full; | |
1790 | ||
1791 | if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name)) | |
1792 | goto attr_msg_full; | |
1793 | if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, | |
1794 | tipc_cluster_mask(tn->own_addr))) | |
1795 | goto attr_msg_full; | |
1796 | if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu)) | |
1797 | goto attr_msg_full; | |
1798 | if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->rcv_nxt)) | |
1799 | goto attr_msg_full; | |
1800 | if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->snd_nxt)) | |
1801 | goto attr_msg_full; | |
1802 | ||
1803 | if (tipc_link_is_up(link)) | |
1804 | if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP)) | |
1805 | goto attr_msg_full; | |
1806 | if (tipc_link_is_active(link)) | |
1807 | if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE)) | |
1808 | goto attr_msg_full; | |
1809 | ||
1810 | prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP); | |
1811 | if (!prop) | |
1812 | goto attr_msg_full; | |
1813 | if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority)) | |
1814 | goto prop_msg_full; | |
1815 | if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance)) | |
1816 | goto prop_msg_full; | |
1817 | if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, | |
1818 | link->window)) | |
1819 | goto prop_msg_full; | |
1820 | if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority)) | |
1821 | goto prop_msg_full; | |
1822 | nla_nest_end(msg->skb, prop); | |
1823 | ||
1824 | err = __tipc_nl_add_stats(msg->skb, &link->stats); | |
1825 | if (err) | |
1826 | goto attr_msg_full; | |
1827 | ||
1828 | nla_nest_end(msg->skb, attrs); | |
1829 | genlmsg_end(msg->skb, hdr); | |
1830 | ||
1831 | return 0; | |
1832 | ||
1833 | prop_msg_full: | |
1834 | nla_nest_cancel(msg->skb, prop); | |
1835 | attr_msg_full: | |
1836 | nla_nest_cancel(msg->skb, attrs); | |
1837 | msg_full: | |
1838 | genlmsg_cancel(msg->skb, hdr); | |
1839 | ||
1840 | return -EMSGSIZE; | |
1841 | } | |
1842 | ||
1843 | /* Caller should hold node lock */ | |
1844 | static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg, | |
1845 | struct tipc_node *node, u32 *prev_link) | |
1846 | { | |
1847 | u32 i; | |
1848 | int err; | |
1849 | ||
1850 | for (i = *prev_link; i < MAX_BEARERS; i++) { | |
1851 | *prev_link = i; | |
1852 | ||
1853 | if (!node->links[i].link) | |
1854 | continue; | |
1855 | ||
1856 | err = __tipc_nl_add_link(net, msg, | |
1857 | node->links[i].link, NLM_F_MULTI); | |
1858 | if (err) | |
1859 | return err; | |
1860 | } | |
1861 | *prev_link = 0; | |
1862 | ||
1863 | return 0; | |
1864 | } | |
1865 | ||
1866 | int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb) | |
1867 | { | |
1868 | struct net *net = sock_net(skb->sk); | |
1869 | struct tipc_net *tn = net_generic(net, tipc_net_id); | |
1870 | struct tipc_node *node; | |
1871 | struct tipc_nl_msg msg; | |
1872 | u32 prev_node = cb->args[0]; | |
1873 | u32 prev_link = cb->args[1]; | |
1874 | int done = cb->args[2]; | |
1875 | int err; | |
1876 | ||
1877 | if (done) | |
1878 | return 0; | |
1879 | ||
1880 | msg.skb = skb; | |
1881 | msg.portid = NETLINK_CB(cb->skb).portid; | |
1882 | msg.seq = cb->nlh->nlmsg_seq; | |
1883 | ||
1884 | rcu_read_lock(); | |
1885 | if (prev_node) { | |
1886 | node = tipc_node_find(net, prev_node); | |
1887 | if (!node) { | |
1888 | /* We never set seq or call nl_dump_check_consistent() | |
1889 | * this means that setting prev_seq here will cause the | |
1890 | * consistence check to fail in the netlink callback | |
1891 | * handler. Resulting in the last NLMSG_DONE message | |
1892 | * having the NLM_F_DUMP_INTR flag set. | |
1893 | */ | |
1894 | cb->prev_seq = 1; | |
1895 | goto out; | |
1896 | } | |
1897 | tipc_node_put(node); | |
1898 | ||
1899 | list_for_each_entry_continue_rcu(node, &tn->node_list, | |
1900 | list) { | |
1901 | tipc_node_lock(node); | |
1902 | err = __tipc_nl_add_node_links(net, &msg, node, | |
1903 | &prev_link); | |
1904 | tipc_node_unlock(node); | |
1905 | if (err) | |
1906 | goto out; | |
1907 | ||
1908 | prev_node = node->addr; | |
1909 | } | |
1910 | } else { | |
1911 | err = tipc_nl_add_bc_link(net, &msg); | |
1912 | if (err) | |
1913 | goto out; | |
1914 | ||
1915 | list_for_each_entry_rcu(node, &tn->node_list, list) { | |
1916 | tipc_node_lock(node); | |
1917 | err = __tipc_nl_add_node_links(net, &msg, node, | |
1918 | &prev_link); | |
1919 | tipc_node_unlock(node); | |
1920 | if (err) | |
1921 | goto out; | |
1922 | ||
1923 | prev_node = node->addr; | |
1924 | } | |
1925 | } | |
1926 | done = 1; | |
1927 | out: | |
1928 | rcu_read_unlock(); | |
1929 | ||
1930 | cb->args[0] = prev_node; | |
1931 | cb->args[1] = prev_link; | |
1932 | cb->args[2] = done; | |
1933 | ||
1934 | return skb->len; | |
1935 | } | |
1936 | ||
1937 | int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info) | |
1938 | { | |
1939 | struct net *net = genl_info_net(info); | |
1940 | struct tipc_nl_msg msg; | |
1941 | char *name; | |
1942 | int err; | |
1943 | ||
1944 | msg.portid = info->snd_portid; | |
1945 | msg.seq = info->snd_seq; | |
1946 | ||
1947 | if (!info->attrs[TIPC_NLA_LINK_NAME]) | |
1948 | return -EINVAL; | |
1949 | name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]); | |
1950 | ||
1951 | msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | |
1952 | if (!msg.skb) | |
1953 | return -ENOMEM; | |
1954 | ||
1955 | if (strcmp(name, tipc_bclink_name) == 0) { | |
1956 | err = tipc_nl_add_bc_link(net, &msg); | |
1957 | if (err) { | |
1958 | nlmsg_free(msg.skb); | |
1959 | return err; | |
1960 | } | |
1961 | } else { | |
1962 | int bearer_id; | |
1963 | struct tipc_node *node; | |
1964 | struct tipc_link *link; | |
1965 | ||
1966 | node = tipc_link_find_owner(net, name, &bearer_id); | |
1967 | if (!node) | |
1968 | return -EINVAL; | |
1969 | ||
1970 | tipc_node_lock(node); | |
1971 | link = node->links[bearer_id].link; | |
1972 | if (!link) { | |
1973 | tipc_node_unlock(node); | |
1974 | nlmsg_free(msg.skb); | |
1975 | return -EINVAL; | |
1976 | } | |
1977 | ||
1978 | err = __tipc_nl_add_link(net, &msg, link, 0); | |
1979 | tipc_node_unlock(node); | |
1980 | if (err) { | |
1981 | nlmsg_free(msg.skb); | |
1982 | return err; | |
1983 | } | |
1984 | } | |
1985 | ||
1986 | return genlmsg_reply(msg.skb, info); | |
1987 | } | |
1988 | ||
1989 | int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info) | |
1990 | { | |
1991 | int err; | |
1992 | char *link_name; | |
1993 | unsigned int bearer_id; | |
1994 | struct tipc_link *link; | |
1995 | struct tipc_node *node; | |
1996 | struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; | |
1997 | struct net *net = sock_net(skb->sk); | |
1998 | ||
1999 | if (!info->attrs[TIPC_NLA_LINK]) | |
2000 | return -EINVAL; | |
2001 | ||
2002 | err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX, | |
2003 | info->attrs[TIPC_NLA_LINK], | |
2004 | tipc_nl_link_policy); | |
2005 | if (err) | |
2006 | return err; | |
2007 | ||
2008 | if (!attrs[TIPC_NLA_LINK_NAME]) | |
2009 | return -EINVAL; | |
2010 | ||
2011 | link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]); | |
2012 | ||
2013 | if (strcmp(link_name, tipc_bclink_name) == 0) { | |
2014 | err = tipc_bclink_reset_stats(net); | |
2015 | if (err) | |
2016 | return err; | |
2017 | return 0; | |
2018 | } | |
2019 | ||
2020 | node = tipc_link_find_owner(net, link_name, &bearer_id); | |
2021 | if (!node) | |
2022 | return -EINVAL; | |
2023 | ||
2024 | tipc_node_lock(node); | |
2025 | ||
2026 | link = node->links[bearer_id].link; | |
2027 | if (!link) { | |
2028 | tipc_node_unlock(node); | |
2029 | return -EINVAL; | |
2030 | } | |
2031 | ||
2032 | link_reset_statistics(link); | |
2033 | ||
2034 | tipc_node_unlock(node); | |
2035 | ||
2036 | return 0; | |
2037 | } |