]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/tipc/link.c
Merge remote-tracking branches 'regulator/topic/act8865', 'regulator/topic/can-change...
[mirror_ubuntu-artful-kernel.git] / net / tipc / link.c
1 /*
2 * net/tipc/link.c: TIPC link code
3 *
4 * Copyright (c) 1996-2007, 2012-2016, Ericsson AB
5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include "core.h"
38 #include "subscr.h"
39 #include "link.h"
40 #include "bcast.h"
41 #include "socket.h"
42 #include "name_distr.h"
43 #include "discover.h"
44 #include "netlink.h"
45
46 #include <linux/pkt_sched.h>
47
48 struct tipc_stats {
49 u32 sent_info; /* used in counting # sent packets */
50 u32 recv_info; /* used in counting # recv'd packets */
51 u32 sent_states;
52 u32 recv_states;
53 u32 sent_probes;
54 u32 recv_probes;
55 u32 sent_nacks;
56 u32 recv_nacks;
57 u32 sent_acks;
58 u32 sent_bundled;
59 u32 sent_bundles;
60 u32 recv_bundled;
61 u32 recv_bundles;
62 u32 retransmitted;
63 u32 sent_fragmented;
64 u32 sent_fragments;
65 u32 recv_fragmented;
66 u32 recv_fragments;
67 u32 link_congs; /* # port sends blocked by congestion */
68 u32 deferred_recv;
69 u32 duplicates;
70 u32 max_queue_sz; /* send queue size high water mark */
71 u32 accu_queue_sz; /* used for send queue size profiling */
72 u32 queue_sz_counts; /* used for send queue size profiling */
73 u32 msg_length_counts; /* used for message length profiling */
74 u32 msg_lengths_total; /* used for message length profiling */
75 u32 msg_length_profile[7]; /* used for msg. length profiling */
76 };
77
78 /**
79 * struct tipc_link - TIPC link data structure
80 * @addr: network address of link's peer node
81 * @name: link name character string
82 * @media_addr: media address to use when sending messages over link
83 * @timer: link timer
84 * @net: pointer to namespace struct
85 * @refcnt: reference counter for permanent references (owner node & timer)
86 * @peer_session: link session # being used by peer end of link
87 * @peer_bearer_id: bearer id used by link's peer endpoint
88 * @bearer_id: local bearer id used by link
89 * @tolerance: minimum link continuity loss needed to reset link [in ms]
90 * @keepalive_intv: link keepalive timer interval
91 * @abort_limit: # of unacknowledged continuity probes needed to reset link
92 * @state: current state of link FSM
93 * @peer_caps: bitmap describing capabilities of peer node
94 * @silent_intv_cnt: # of timer intervals without any reception from peer
95 * @proto_msg: template for control messages generated by link
96 * @pmsg: convenience pointer to "proto_msg" field
97 * @priority: current link priority
98 * @net_plane: current link network plane ('A' through 'H')
99 * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
100 * @exp_msg_count: # of tunnelled messages expected during link changeover
101 * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
102 * @mtu: current maximum packet size for this link
103 * @advertised_mtu: advertised own mtu when link is being established
104 * @transmitq: queue for sent, non-acked messages
105 * @backlogq: queue for messages waiting to be sent
106 * @snt_nxt: next sequence number to use for outbound messages
107 * @last_retransmitted: sequence number of most recently retransmitted message
108 * @stale_count: # of identical retransmit requests made by peer
109 * @ackers: # of peers that needs to ack each packet before it can be released
110 * @acked: # last packet acked by a certain peer. Used for broadcast.
111 * @rcv_nxt: next sequence number to expect for inbound messages
112 * @deferred_queue: deferred queue saved OOS b'cast message received from node
113 * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
114 * @inputq: buffer queue for messages to be delivered upwards
115 * @namedq: buffer queue for name table messages to be delivered upwards
116 * @next_out: ptr to first unsent outbound message in queue
117 * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
118 * @long_msg_seq_no: next identifier to use for outbound fragmented messages
119 * @reasm_buf: head of partially reassembled inbound message fragments
120 * @bc_rcvr: marks that this is a broadcast receiver link
121 * @stats: collects statistics regarding link activity
122 */
123 struct tipc_link {
124 u32 addr;
125 char name[TIPC_MAX_LINK_NAME];
126 struct net *net;
127
128 /* Management and link supervision data */
129 u32 peer_session;
130 u32 session;
131 u32 peer_bearer_id;
132 u32 bearer_id;
133 u32 tolerance;
134 unsigned long keepalive_intv;
135 u32 abort_limit;
136 u32 state;
137 u16 peer_caps;
138 bool active;
139 u32 silent_intv_cnt;
140 char if_name[TIPC_MAX_IF_NAME];
141 u32 priority;
142 char net_plane;
143 u16 rst_cnt;
144
145 /* Failover/synch */
146 u16 drop_point;
147 struct sk_buff *failover_reasm_skb;
148
149 /* Max packet negotiation */
150 u16 mtu;
151 u16 advertised_mtu;
152
153 /* Sending */
154 struct sk_buff_head transmq;
155 struct sk_buff_head backlogq;
156 struct {
157 u16 len;
158 u16 limit;
159 } backlog[5];
160 u16 snd_nxt;
161 u16 last_retransm;
162 u16 window;
163 u32 stale_count;
164
165 /* Reception */
166 u16 rcv_nxt;
167 u32 rcv_unacked;
168 struct sk_buff_head deferdq;
169 struct sk_buff_head *inputq;
170 struct sk_buff_head *namedq;
171
172 /* Congestion handling */
173 struct sk_buff_head wakeupq;
174
175 /* Fragmentation/reassembly */
176 struct sk_buff *reasm_buf;
177
178 /* Broadcast */
179 u16 ackers;
180 u16 acked;
181 struct tipc_link *bc_rcvlink;
182 struct tipc_link *bc_sndlink;
183 int nack_state;
184 bool bc_peer_is_up;
185
186 /* Statistics */
187 struct tipc_stats stats;
188 };
189
190 /*
191 * Error message prefixes
192 */
193 static const char *link_co_err = "Link tunneling error, ";
194 static const char *link_rst_msg = "Resetting link ";
195
196 /* Send states for broadcast NACKs
197 */
198 enum {
199 BC_NACK_SND_CONDITIONAL,
200 BC_NACK_SND_UNCONDITIONAL,
201 BC_NACK_SND_SUPPRESS,
202 };
203
204 /*
205 * Interval between NACKs when packets arrive out of order
206 */
207 #define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
208
209 /* Wildcard value for link session numbers. When it is known that
210 * peer endpoint is down, any session number must be accepted.
211 */
212 #define ANY_SESSION 0x10000
213
214 /* Link FSM states:
215 */
216 enum {
217 LINK_ESTABLISHED = 0xe,
218 LINK_ESTABLISHING = 0xe << 4,
219 LINK_RESET = 0x1 << 8,
220 LINK_RESETTING = 0x2 << 12,
221 LINK_PEER_RESET = 0xd << 16,
222 LINK_FAILINGOVER = 0xf << 20,
223 LINK_SYNCHING = 0xc << 24
224 };
225
226 /* Link FSM state checking routines
227 */
228 static int link_is_up(struct tipc_link *l)
229 {
230 return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
231 }
232
233 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
234 struct sk_buff_head *xmitq);
235 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
236 u16 rcvgap, int tolerance, int priority,
237 struct sk_buff_head *xmitq);
238 static void link_print(struct tipc_link *l, const char *str);
239 static void tipc_link_build_nack_msg(struct tipc_link *l,
240 struct sk_buff_head *xmitq);
241 static void tipc_link_build_bc_init_msg(struct tipc_link *l,
242 struct sk_buff_head *xmitq);
243 static bool tipc_link_release_pkts(struct tipc_link *l, u16 to);
244
245 /*
246 * Simple non-static link routines (i.e. referenced outside this file)
247 */
248 bool tipc_link_is_up(struct tipc_link *l)
249 {
250 return link_is_up(l);
251 }
252
253 bool tipc_link_peer_is_down(struct tipc_link *l)
254 {
255 return l->state == LINK_PEER_RESET;
256 }
257
258 bool tipc_link_is_reset(struct tipc_link *l)
259 {
260 return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
261 }
262
263 bool tipc_link_is_establishing(struct tipc_link *l)
264 {
265 return l->state == LINK_ESTABLISHING;
266 }
267
268 bool tipc_link_is_synching(struct tipc_link *l)
269 {
270 return l->state == LINK_SYNCHING;
271 }
272
273 bool tipc_link_is_failingover(struct tipc_link *l)
274 {
275 return l->state == LINK_FAILINGOVER;
276 }
277
278 bool tipc_link_is_blocked(struct tipc_link *l)
279 {
280 return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
281 }
282
283 static bool link_is_bc_sndlink(struct tipc_link *l)
284 {
285 return !l->bc_sndlink;
286 }
287
288 static bool link_is_bc_rcvlink(struct tipc_link *l)
289 {
290 return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
291 }
292
293 int tipc_link_is_active(struct tipc_link *l)
294 {
295 return l->active;
296 }
297
298 void tipc_link_set_active(struct tipc_link *l, bool active)
299 {
300 l->active = active;
301 }
302
303 u32 tipc_link_id(struct tipc_link *l)
304 {
305 return l->peer_bearer_id << 16 | l->bearer_id;
306 }
307
308 int tipc_link_window(struct tipc_link *l)
309 {
310 return l->window;
311 }
312
313 int tipc_link_prio(struct tipc_link *l)
314 {
315 return l->priority;
316 }
317
318 unsigned long tipc_link_tolerance(struct tipc_link *l)
319 {
320 return l->tolerance;
321 }
322
323 struct sk_buff_head *tipc_link_inputq(struct tipc_link *l)
324 {
325 return l->inputq;
326 }
327
328 char tipc_link_plane(struct tipc_link *l)
329 {
330 return l->net_plane;
331 }
332
333 void tipc_link_add_bc_peer(struct tipc_link *snd_l,
334 struct tipc_link *uc_l,
335 struct sk_buff_head *xmitq)
336 {
337 struct tipc_link *rcv_l = uc_l->bc_rcvlink;
338
339 snd_l->ackers++;
340 rcv_l->acked = snd_l->snd_nxt - 1;
341 snd_l->state = LINK_ESTABLISHED;
342 tipc_link_build_bc_init_msg(uc_l, xmitq);
343 }
344
345 void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
346 struct tipc_link *rcv_l,
347 struct sk_buff_head *xmitq)
348 {
349 u16 ack = snd_l->snd_nxt - 1;
350
351 snd_l->ackers--;
352 tipc_link_bc_ack_rcv(rcv_l, ack, xmitq);
353 tipc_link_reset(rcv_l);
354 rcv_l->state = LINK_RESET;
355 if (!snd_l->ackers) {
356 tipc_link_reset(snd_l);
357 snd_l->state = LINK_RESET;
358 __skb_queue_purge(xmitq);
359 }
360 }
361
362 int tipc_link_bc_peers(struct tipc_link *l)
363 {
364 return l->ackers;
365 }
366
367 void tipc_link_set_mtu(struct tipc_link *l, int mtu)
368 {
369 l->mtu = mtu;
370 }
371
372 int tipc_link_mtu(struct tipc_link *l)
373 {
374 return l->mtu;
375 }
376
377 u16 tipc_link_rcv_nxt(struct tipc_link *l)
378 {
379 return l->rcv_nxt;
380 }
381
382 u16 tipc_link_acked(struct tipc_link *l)
383 {
384 return l->acked;
385 }
386
387 char *tipc_link_name(struct tipc_link *l)
388 {
389 return l->name;
390 }
391
392 /**
393 * tipc_link_create - create a new link
394 * @n: pointer to associated node
395 * @if_name: associated interface name
396 * @bearer_id: id (index) of associated bearer
397 * @tolerance: link tolerance to be used by link
398 * @net_plane: network plane (A,B,c..) this link belongs to
399 * @mtu: mtu to be advertised by link
400 * @priority: priority to be used by link
401 * @window: send window to be used by link
402 * @session: session to be used by link
403 * @ownnode: identity of own node
404 * @peer: node id of peer node
405 * @peer_caps: bitmap describing peer node capabilities
406 * @bc_sndlink: the namespace global link used for broadcast sending
407 * @bc_rcvlink: the peer specific link used for broadcast reception
408 * @inputq: queue to put messages ready for delivery
409 * @namedq: queue to put binding table update messages ready for delivery
410 * @link: return value, pointer to put the created link
411 *
412 * Returns true if link was created, otherwise false
413 */
414 bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
415 int tolerance, char net_plane, u32 mtu, int priority,
416 int window, u32 session, u32 ownnode, u32 peer,
417 u16 peer_caps,
418 struct tipc_link *bc_sndlink,
419 struct tipc_link *bc_rcvlink,
420 struct sk_buff_head *inputq,
421 struct sk_buff_head *namedq,
422 struct tipc_link **link)
423 {
424 struct tipc_link *l;
425
426 l = kzalloc(sizeof(*l), GFP_ATOMIC);
427 if (!l)
428 return false;
429 *link = l;
430 l->session = session;
431
432 /* Note: peer i/f name is completed by reset/activate message */
433 sprintf(l->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
434 tipc_zone(ownnode), tipc_cluster(ownnode), tipc_node(ownnode),
435 if_name, tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
436 strcpy(l->if_name, if_name);
437 l->addr = peer;
438 l->peer_caps = peer_caps;
439 l->net = net;
440 l->peer_session = ANY_SESSION;
441 l->bearer_id = bearer_id;
442 l->tolerance = tolerance;
443 l->net_plane = net_plane;
444 l->advertised_mtu = mtu;
445 l->mtu = mtu;
446 l->priority = priority;
447 tipc_link_set_queue_limits(l, window);
448 l->ackers = 1;
449 l->bc_sndlink = bc_sndlink;
450 l->bc_rcvlink = bc_rcvlink;
451 l->inputq = inputq;
452 l->namedq = namedq;
453 l->state = LINK_RESETTING;
454 __skb_queue_head_init(&l->transmq);
455 __skb_queue_head_init(&l->backlogq);
456 __skb_queue_head_init(&l->deferdq);
457 skb_queue_head_init(&l->wakeupq);
458 skb_queue_head_init(l->inputq);
459 return true;
460 }
461
462 /**
463 * tipc_link_bc_create - create new link to be used for broadcast
464 * @n: pointer to associated node
465 * @mtu: mtu to be used
466 * @window: send window to be used
467 * @inputq: queue to put messages ready for delivery
468 * @namedq: queue to put binding table update messages ready for delivery
469 * @link: return value, pointer to put the created link
470 *
471 * Returns true if link was created, otherwise false
472 */
473 bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
474 int mtu, int window, u16 peer_caps,
475 struct sk_buff_head *inputq,
476 struct sk_buff_head *namedq,
477 struct tipc_link *bc_sndlink,
478 struct tipc_link **link)
479 {
480 struct tipc_link *l;
481
482 if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, window,
483 0, ownnode, peer, peer_caps, bc_sndlink,
484 NULL, inputq, namedq, link))
485 return false;
486
487 l = *link;
488 strcpy(l->name, tipc_bclink_name);
489 tipc_link_reset(l);
490 l->state = LINK_RESET;
491 l->ackers = 0;
492 l->bc_rcvlink = l;
493
494 /* Broadcast send link is always up */
495 if (link_is_bc_sndlink(l))
496 l->state = LINK_ESTABLISHED;
497
498 return true;
499 }
500
501 /**
502 * tipc_link_fsm_evt - link finite state machine
503 * @l: pointer to link
504 * @evt: state machine event to be processed
505 */
506 int tipc_link_fsm_evt(struct tipc_link *l, int evt)
507 {
508 int rc = 0;
509
510 switch (l->state) {
511 case LINK_RESETTING:
512 switch (evt) {
513 case LINK_PEER_RESET_EVT:
514 l->state = LINK_PEER_RESET;
515 break;
516 case LINK_RESET_EVT:
517 l->state = LINK_RESET;
518 break;
519 case LINK_FAILURE_EVT:
520 case LINK_FAILOVER_BEGIN_EVT:
521 case LINK_ESTABLISH_EVT:
522 case LINK_FAILOVER_END_EVT:
523 case LINK_SYNCH_BEGIN_EVT:
524 case LINK_SYNCH_END_EVT:
525 default:
526 goto illegal_evt;
527 }
528 break;
529 case LINK_RESET:
530 switch (evt) {
531 case LINK_PEER_RESET_EVT:
532 l->state = LINK_ESTABLISHING;
533 break;
534 case LINK_FAILOVER_BEGIN_EVT:
535 l->state = LINK_FAILINGOVER;
536 case LINK_FAILURE_EVT:
537 case LINK_RESET_EVT:
538 case LINK_ESTABLISH_EVT:
539 case LINK_FAILOVER_END_EVT:
540 break;
541 case LINK_SYNCH_BEGIN_EVT:
542 case LINK_SYNCH_END_EVT:
543 default:
544 goto illegal_evt;
545 }
546 break;
547 case LINK_PEER_RESET:
548 switch (evt) {
549 case LINK_RESET_EVT:
550 l->state = LINK_ESTABLISHING;
551 break;
552 case LINK_PEER_RESET_EVT:
553 case LINK_ESTABLISH_EVT:
554 case LINK_FAILURE_EVT:
555 break;
556 case LINK_SYNCH_BEGIN_EVT:
557 case LINK_SYNCH_END_EVT:
558 case LINK_FAILOVER_BEGIN_EVT:
559 case LINK_FAILOVER_END_EVT:
560 default:
561 goto illegal_evt;
562 }
563 break;
564 case LINK_FAILINGOVER:
565 switch (evt) {
566 case LINK_FAILOVER_END_EVT:
567 l->state = LINK_RESET;
568 break;
569 case LINK_PEER_RESET_EVT:
570 case LINK_RESET_EVT:
571 case LINK_ESTABLISH_EVT:
572 case LINK_FAILURE_EVT:
573 break;
574 case LINK_FAILOVER_BEGIN_EVT:
575 case LINK_SYNCH_BEGIN_EVT:
576 case LINK_SYNCH_END_EVT:
577 default:
578 goto illegal_evt;
579 }
580 break;
581 case LINK_ESTABLISHING:
582 switch (evt) {
583 case LINK_ESTABLISH_EVT:
584 l->state = LINK_ESTABLISHED;
585 break;
586 case LINK_FAILOVER_BEGIN_EVT:
587 l->state = LINK_FAILINGOVER;
588 break;
589 case LINK_RESET_EVT:
590 l->state = LINK_RESET;
591 break;
592 case LINK_FAILURE_EVT:
593 case LINK_PEER_RESET_EVT:
594 case LINK_SYNCH_BEGIN_EVT:
595 case LINK_FAILOVER_END_EVT:
596 break;
597 case LINK_SYNCH_END_EVT:
598 default:
599 goto illegal_evt;
600 }
601 break;
602 case LINK_ESTABLISHED:
603 switch (evt) {
604 case LINK_PEER_RESET_EVT:
605 l->state = LINK_PEER_RESET;
606 rc |= TIPC_LINK_DOWN_EVT;
607 break;
608 case LINK_FAILURE_EVT:
609 l->state = LINK_RESETTING;
610 rc |= TIPC_LINK_DOWN_EVT;
611 break;
612 case LINK_RESET_EVT:
613 l->state = LINK_RESET;
614 break;
615 case LINK_ESTABLISH_EVT:
616 case LINK_SYNCH_END_EVT:
617 break;
618 case LINK_SYNCH_BEGIN_EVT:
619 l->state = LINK_SYNCHING;
620 break;
621 case LINK_FAILOVER_BEGIN_EVT:
622 case LINK_FAILOVER_END_EVT:
623 default:
624 goto illegal_evt;
625 }
626 break;
627 case LINK_SYNCHING:
628 switch (evt) {
629 case LINK_PEER_RESET_EVT:
630 l->state = LINK_PEER_RESET;
631 rc |= TIPC_LINK_DOWN_EVT;
632 break;
633 case LINK_FAILURE_EVT:
634 l->state = LINK_RESETTING;
635 rc |= TIPC_LINK_DOWN_EVT;
636 break;
637 case LINK_RESET_EVT:
638 l->state = LINK_RESET;
639 break;
640 case LINK_ESTABLISH_EVT:
641 case LINK_SYNCH_BEGIN_EVT:
642 break;
643 case LINK_SYNCH_END_EVT:
644 l->state = LINK_ESTABLISHED;
645 break;
646 case LINK_FAILOVER_BEGIN_EVT:
647 case LINK_FAILOVER_END_EVT:
648 default:
649 goto illegal_evt;
650 }
651 break;
652 default:
653 pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
654 }
655 return rc;
656 illegal_evt:
657 pr_err("Illegal FSM event %x in state %x on link %s\n",
658 evt, l->state, l->name);
659 return rc;
660 }
661
662 /* link_profile_stats - update statistical profiling of traffic
663 */
664 static void link_profile_stats(struct tipc_link *l)
665 {
666 struct sk_buff *skb;
667 struct tipc_msg *msg;
668 int length;
669
670 /* Update counters used in statistical profiling of send traffic */
671 l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
672 l->stats.queue_sz_counts++;
673
674 skb = skb_peek(&l->transmq);
675 if (!skb)
676 return;
677 msg = buf_msg(skb);
678 length = msg_size(msg);
679
680 if (msg_user(msg) == MSG_FRAGMENTER) {
681 if (msg_type(msg) != FIRST_FRAGMENT)
682 return;
683 length = msg_size(msg_get_wrapped(msg));
684 }
685 l->stats.msg_lengths_total += length;
686 l->stats.msg_length_counts++;
687 if (length <= 64)
688 l->stats.msg_length_profile[0]++;
689 else if (length <= 256)
690 l->stats.msg_length_profile[1]++;
691 else if (length <= 1024)
692 l->stats.msg_length_profile[2]++;
693 else if (length <= 4096)
694 l->stats.msg_length_profile[3]++;
695 else if (length <= 16384)
696 l->stats.msg_length_profile[4]++;
697 else if (length <= 32768)
698 l->stats.msg_length_profile[5]++;
699 else
700 l->stats.msg_length_profile[6]++;
701 }
702
703 /* tipc_link_timeout - perform periodic task as instructed from node timeout
704 */
705 int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
706 {
707 int mtyp = 0;
708 int rc = 0;
709 bool state = false;
710 bool probe = false;
711 bool setup = false;
712 u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
713 u16 bc_acked = l->bc_rcvlink->acked;
714
715 link_profile_stats(l);
716
717 switch (l->state) {
718 case LINK_ESTABLISHED:
719 case LINK_SYNCHING:
720 if (l->silent_intv_cnt > l->abort_limit)
721 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
722 mtyp = STATE_MSG;
723 state = bc_acked != bc_snt;
724 probe = l->silent_intv_cnt;
725 l->silent_intv_cnt++;
726 break;
727 case LINK_RESET:
728 setup = l->rst_cnt++ <= 4;
729 setup |= !(l->rst_cnt % 16);
730 mtyp = RESET_MSG;
731 break;
732 case LINK_ESTABLISHING:
733 setup = true;
734 mtyp = ACTIVATE_MSG;
735 break;
736 case LINK_PEER_RESET:
737 case LINK_RESETTING:
738 case LINK_FAILINGOVER:
739 break;
740 default:
741 break;
742 }
743
744 if (state || probe || setup)
745 tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, xmitq);
746
747 return rc;
748 }
749
750 /**
751 * link_schedule_user - schedule a message sender for wakeup after congestion
752 * @link: congested link
753 * @list: message that was attempted sent
754 * Create pseudo msg to send back to user when congestion abates
755 * Does not consume buffer list
756 */
757 static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list)
758 {
759 struct tipc_msg *msg = buf_msg(skb_peek(list));
760 int imp = msg_importance(msg);
761 u32 oport = msg_origport(msg);
762 u32 addr = tipc_own_addr(link->net);
763 struct sk_buff *skb;
764
765 /* This really cannot happen... */
766 if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
767 pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
768 return -ENOBUFS;
769 }
770 /* Non-blocking sender: */
771 if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending)
772 return -ELINKCONG;
773
774 /* Create and schedule wakeup pseudo message */
775 skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
776 addr, addr, oport, 0, 0);
777 if (!skb)
778 return -ENOBUFS;
779 TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list);
780 TIPC_SKB_CB(skb)->chain_imp = imp;
781 skb_queue_tail(&link->wakeupq, skb);
782 link->stats.link_congs++;
783 return -ELINKCONG;
784 }
785
786 /**
787 * link_prepare_wakeup - prepare users for wakeup after congestion
788 * @link: congested link
789 * Move a number of waiting users, as permitted by available space in
790 * the send queue, from link wait queue to node wait queue for wakeup
791 */
792 void link_prepare_wakeup(struct tipc_link *l)
793 {
794 int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
795 int imp, lim;
796 struct sk_buff *skb, *tmp;
797
798 skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
799 imp = TIPC_SKB_CB(skb)->chain_imp;
800 lim = l->window + l->backlog[imp].limit;
801 pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
802 if ((pnd[imp] + l->backlog[imp].len) >= lim)
803 break;
804 skb_unlink(skb, &l->wakeupq);
805 skb_queue_tail(l->inputq, skb);
806 }
807 }
808
809 void tipc_link_reset(struct tipc_link *l)
810 {
811 l->peer_session = ANY_SESSION;
812 l->session++;
813 l->mtu = l->advertised_mtu;
814 __skb_queue_purge(&l->transmq);
815 __skb_queue_purge(&l->deferdq);
816 skb_queue_splice_init(&l->wakeupq, l->inputq);
817 __skb_queue_purge(&l->backlogq);
818 l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
819 l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
820 l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
821 l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
822 l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
823 kfree_skb(l->reasm_buf);
824 kfree_skb(l->failover_reasm_skb);
825 l->reasm_buf = NULL;
826 l->failover_reasm_skb = NULL;
827 l->rcv_unacked = 0;
828 l->snd_nxt = 1;
829 l->rcv_nxt = 1;
830 l->acked = 0;
831 l->silent_intv_cnt = 0;
832 l->rst_cnt = 0;
833 l->stats.recv_info = 0;
834 l->stale_count = 0;
835 l->bc_peer_is_up = false;
836 tipc_link_reset_stats(l);
837 }
838
839 /**
840 * tipc_link_xmit(): enqueue buffer list according to queue situation
841 * @link: link to use
842 * @list: chain of buffers containing message
843 * @xmitq: returned list of packets to be sent by caller
844 *
845 * Consumes the buffer chain, except when returning -ELINKCONG,
846 * since the caller then may want to make more send attempts.
847 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
848 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
849 */
850 int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
851 struct sk_buff_head *xmitq)
852 {
853 struct tipc_msg *hdr = buf_msg(skb_peek(list));
854 unsigned int maxwin = l->window;
855 unsigned int i, imp = msg_importance(hdr);
856 unsigned int mtu = l->mtu;
857 u16 ack = l->rcv_nxt - 1;
858 u16 seqno = l->snd_nxt;
859 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
860 struct sk_buff_head *transmq = &l->transmq;
861 struct sk_buff_head *backlogq = &l->backlogq;
862 struct sk_buff *skb, *_skb, *bskb;
863
864 /* Match msg importance against this and all higher backlog limits: */
865 for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
866 if (unlikely(l->backlog[i].len >= l->backlog[i].limit))
867 return link_schedule_user(l, list);
868 }
869 if (unlikely(msg_size(hdr) > mtu)) {
870 skb_queue_purge(list);
871 return -EMSGSIZE;
872 }
873
874 /* Prepare each packet for sending, and add to relevant queue: */
875 while (skb_queue_len(list)) {
876 skb = skb_peek(list);
877 hdr = buf_msg(skb);
878 msg_set_seqno(hdr, seqno);
879 msg_set_ack(hdr, ack);
880 msg_set_bcast_ack(hdr, bc_ack);
881
882 if (likely(skb_queue_len(transmq) < maxwin)) {
883 _skb = skb_clone(skb, GFP_ATOMIC);
884 if (!_skb) {
885 skb_queue_purge(list);
886 return -ENOBUFS;
887 }
888 __skb_dequeue(list);
889 __skb_queue_tail(transmq, skb);
890 __skb_queue_tail(xmitq, _skb);
891 TIPC_SKB_CB(skb)->ackers = l->ackers;
892 l->rcv_unacked = 0;
893 seqno++;
894 continue;
895 }
896 if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
897 kfree_skb(__skb_dequeue(list));
898 l->stats.sent_bundled++;
899 continue;
900 }
901 if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
902 kfree_skb(__skb_dequeue(list));
903 __skb_queue_tail(backlogq, bskb);
904 l->backlog[msg_importance(buf_msg(bskb))].len++;
905 l->stats.sent_bundled++;
906 l->stats.sent_bundles++;
907 continue;
908 }
909 l->backlog[imp].len += skb_queue_len(list);
910 skb_queue_splice_tail_init(list, backlogq);
911 }
912 l->snd_nxt = seqno;
913 return 0;
914 }
915
916 void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
917 {
918 struct sk_buff *skb, *_skb;
919 struct tipc_msg *hdr;
920 u16 seqno = l->snd_nxt;
921 u16 ack = l->rcv_nxt - 1;
922 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
923
924 while (skb_queue_len(&l->transmq) < l->window) {
925 skb = skb_peek(&l->backlogq);
926 if (!skb)
927 break;
928 _skb = skb_clone(skb, GFP_ATOMIC);
929 if (!_skb)
930 break;
931 __skb_dequeue(&l->backlogq);
932 hdr = buf_msg(skb);
933 l->backlog[msg_importance(hdr)].len--;
934 __skb_queue_tail(&l->transmq, skb);
935 __skb_queue_tail(xmitq, _skb);
936 TIPC_SKB_CB(skb)->ackers = l->ackers;
937 msg_set_seqno(hdr, seqno);
938 msg_set_ack(hdr, ack);
939 msg_set_bcast_ack(hdr, bc_ack);
940 l->rcv_unacked = 0;
941 seqno++;
942 }
943 l->snd_nxt = seqno;
944 }
945
946 static void link_retransmit_failure(struct tipc_link *l, struct sk_buff *skb)
947 {
948 struct tipc_msg *hdr = buf_msg(skb);
949
950 pr_warn("Retransmission failure on link <%s>\n", l->name);
951 link_print(l, "Resetting link ");
952 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
953 msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
954 pr_info("sqno %u, prev: %x, src: %x\n",
955 msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr));
956 }
957
958 int tipc_link_retrans(struct tipc_link *l, u16 from, u16 to,
959 struct sk_buff_head *xmitq)
960 {
961 struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
962 struct tipc_msg *hdr;
963 u16 ack = l->rcv_nxt - 1;
964 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
965
966 if (!skb)
967 return 0;
968
969 /* Detect repeated retransmit failures on same packet */
970 if (likely(l->last_retransm != buf_seqno(skb))) {
971 l->last_retransm = buf_seqno(skb);
972 l->stale_count = 1;
973 } else if (++l->stale_count > 100) {
974 link_retransmit_failure(l, skb);
975 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
976 }
977
978 /* Move forward to where retransmission should start */
979 skb_queue_walk(&l->transmq, skb) {
980 if (!less(buf_seqno(skb), from))
981 break;
982 }
983
984 skb_queue_walk_from(&l->transmq, skb) {
985 if (more(buf_seqno(skb), to))
986 break;
987 hdr = buf_msg(skb);
988 _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
989 if (!_skb)
990 return 0;
991 hdr = buf_msg(_skb);
992 msg_set_ack(hdr, ack);
993 msg_set_bcast_ack(hdr, bc_ack);
994 _skb->priority = TC_PRIO_CONTROL;
995 __skb_queue_tail(xmitq, _skb);
996 l->stats.retransmitted++;
997 }
998 return 0;
999 }
1000
1001 /* tipc_data_input - deliver data and name distr msgs to upper layer
1002 *
1003 * Consumes buffer if message is of right type
1004 * Node lock must be held
1005 */
1006 static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
1007 struct sk_buff_head *inputq)
1008 {
1009 switch (msg_user(buf_msg(skb))) {
1010 case TIPC_LOW_IMPORTANCE:
1011 case TIPC_MEDIUM_IMPORTANCE:
1012 case TIPC_HIGH_IMPORTANCE:
1013 case TIPC_CRITICAL_IMPORTANCE:
1014 case CONN_MANAGER:
1015 skb_queue_tail(inputq, skb);
1016 return true;
1017 case NAME_DISTRIBUTOR:
1018 l->bc_rcvlink->state = LINK_ESTABLISHED;
1019 skb_queue_tail(l->namedq, skb);
1020 return true;
1021 case MSG_BUNDLER:
1022 case TUNNEL_PROTOCOL:
1023 case MSG_FRAGMENTER:
1024 case BCAST_PROTOCOL:
1025 return false;
1026 default:
1027 pr_warn("Dropping received illegal msg type\n");
1028 kfree_skb(skb);
1029 return false;
1030 };
1031 }
1032
1033 /* tipc_link_input - process packet that has passed link protocol check
1034 *
1035 * Consumes buffer
1036 */
1037 static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
1038 struct sk_buff_head *inputq)
1039 {
1040 struct tipc_msg *hdr = buf_msg(skb);
1041 struct sk_buff **reasm_skb = &l->reasm_buf;
1042 struct sk_buff *iskb;
1043 struct sk_buff_head tmpq;
1044 int usr = msg_user(hdr);
1045 int rc = 0;
1046 int pos = 0;
1047 int ipos = 0;
1048
1049 if (unlikely(usr == TUNNEL_PROTOCOL)) {
1050 if (msg_type(hdr) == SYNCH_MSG) {
1051 __skb_queue_purge(&l->deferdq);
1052 goto drop;
1053 }
1054 if (!tipc_msg_extract(skb, &iskb, &ipos))
1055 return rc;
1056 kfree_skb(skb);
1057 skb = iskb;
1058 hdr = buf_msg(skb);
1059 if (less(msg_seqno(hdr), l->drop_point))
1060 goto drop;
1061 if (tipc_data_input(l, skb, inputq))
1062 return rc;
1063 usr = msg_user(hdr);
1064 reasm_skb = &l->failover_reasm_skb;
1065 }
1066
1067 if (usr == MSG_BUNDLER) {
1068 skb_queue_head_init(&tmpq);
1069 l->stats.recv_bundles++;
1070 l->stats.recv_bundled += msg_msgcnt(hdr);
1071 while (tipc_msg_extract(skb, &iskb, &pos))
1072 tipc_data_input(l, iskb, &tmpq);
1073 tipc_skb_queue_splice_tail(&tmpq, inputq);
1074 return 0;
1075 } else if (usr == MSG_FRAGMENTER) {
1076 l->stats.recv_fragments++;
1077 if (tipc_buf_append(reasm_skb, &skb)) {
1078 l->stats.recv_fragmented++;
1079 tipc_data_input(l, skb, inputq);
1080 } else if (!*reasm_skb && !link_is_bc_rcvlink(l)) {
1081 pr_warn_ratelimited("Unable to build fragment list\n");
1082 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1083 }
1084 return 0;
1085 } else if (usr == BCAST_PROTOCOL) {
1086 tipc_bcast_lock(l->net);
1087 tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
1088 tipc_bcast_unlock(l->net);
1089 }
1090 drop:
1091 kfree_skb(skb);
1092 return 0;
1093 }
1094
1095 static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
1096 {
1097 bool released = false;
1098 struct sk_buff *skb, *tmp;
1099
1100 skb_queue_walk_safe(&l->transmq, skb, tmp) {
1101 if (more(buf_seqno(skb), acked))
1102 break;
1103 __skb_unlink(skb, &l->transmq);
1104 kfree_skb(skb);
1105 released = true;
1106 }
1107 return released;
1108 }
1109
1110 /* tipc_link_build_state_msg: prepare link state message for transmission
1111 *
1112 * Note that sending of broadcast ack is coordinated among nodes, to reduce
1113 * risk of ack storms towards the sender
1114 */
1115 int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1116 {
1117 if (!l)
1118 return 0;
1119
1120 /* Broadcast ACK must be sent via a unicast link => defer to caller */
1121 if (link_is_bc_rcvlink(l)) {
1122 if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf)
1123 return 0;
1124 l->rcv_unacked = 0;
1125 return TIPC_LINK_SND_BC_ACK;
1126 }
1127
1128 /* Unicast ACK */
1129 l->rcv_unacked = 0;
1130 l->stats.sent_acks++;
1131 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq);
1132 return 0;
1133 }
1134
1135 /* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
1136 */
1137 void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1138 {
1139 int mtyp = RESET_MSG;
1140 struct sk_buff *skb;
1141
1142 if (l->state == LINK_ESTABLISHING)
1143 mtyp = ACTIVATE_MSG;
1144
1145 tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, xmitq);
1146
1147 /* Inform peer that this endpoint is going down if applicable */
1148 skb = skb_peek_tail(xmitq);
1149 if (skb && (l->state == LINK_RESET))
1150 msg_set_peer_stopping(buf_msg(skb), 1);
1151 }
1152
1153 /* tipc_link_build_nack_msg: prepare link nack message for transmission
1154 */
1155 static void tipc_link_build_nack_msg(struct tipc_link *l,
1156 struct sk_buff_head *xmitq)
1157 {
1158 u32 def_cnt = ++l->stats.deferred_recv;
1159
1160 if (link_is_bc_rcvlink(l))
1161 return;
1162
1163 if ((skb_queue_len(&l->deferdq) == 1) || !(def_cnt % TIPC_NACK_INTV))
1164 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq);
1165 }
1166
1167 /* tipc_link_rcv - process TIPC packets/messages arriving from off-node
1168 * @l: the link that should handle the message
1169 * @skb: TIPC packet
1170 * @xmitq: queue to place packets to be sent after this call
1171 */
1172 int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1173 struct sk_buff_head *xmitq)
1174 {
1175 struct sk_buff_head *defq = &l->deferdq;
1176 struct tipc_msg *hdr;
1177 u16 seqno, rcv_nxt, win_lim;
1178 int rc = 0;
1179
1180 do {
1181 hdr = buf_msg(skb);
1182 seqno = msg_seqno(hdr);
1183 rcv_nxt = l->rcv_nxt;
1184 win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
1185
1186 /* Verify and update link state */
1187 if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
1188 return tipc_link_proto_rcv(l, skb, xmitq);
1189
1190 if (unlikely(!link_is_up(l))) {
1191 if (l->state == LINK_ESTABLISHING)
1192 rc = TIPC_LINK_UP_EVT;
1193 goto drop;
1194 }
1195
1196 /* Don't send probe at next timeout expiration */
1197 l->silent_intv_cnt = 0;
1198
1199 /* Drop if outside receive window */
1200 if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
1201 l->stats.duplicates++;
1202 goto drop;
1203 }
1204
1205 /* Forward queues and wake up waiting users */
1206 if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
1207 tipc_link_advance_backlog(l, xmitq);
1208 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1209 link_prepare_wakeup(l);
1210 }
1211
1212 /* Defer delivery if sequence gap */
1213 if (unlikely(seqno != rcv_nxt)) {
1214 __tipc_skb_queue_sorted(defq, seqno, skb);
1215 tipc_link_build_nack_msg(l, xmitq);
1216 break;
1217 }
1218
1219 /* Deliver packet */
1220 l->rcv_nxt++;
1221 l->stats.recv_info++;
1222 if (!tipc_data_input(l, skb, l->inputq))
1223 rc |= tipc_link_input(l, skb, l->inputq);
1224 if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
1225 rc |= tipc_link_build_state_msg(l, xmitq);
1226 if (unlikely(rc & ~TIPC_LINK_SND_BC_ACK))
1227 break;
1228 } while ((skb = __skb_dequeue(defq)));
1229
1230 return rc;
1231 drop:
1232 kfree_skb(skb);
1233 return rc;
1234 }
1235
1236 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1237 u16 rcvgap, int tolerance, int priority,
1238 struct sk_buff_head *xmitq)
1239 {
1240 struct sk_buff *skb;
1241 struct tipc_msg *hdr;
1242 struct sk_buff_head *dfq = &l->deferdq;
1243 bool node_up = link_is_up(l->bc_rcvlink);
1244
1245 /* Don't send protocol message during reset or link failover */
1246 if (tipc_link_is_blocked(l))
1247 return;
1248
1249 if (!tipc_link_is_up(l) && (mtyp == STATE_MSG))
1250 return;
1251
1252 if (!skb_queue_empty(dfq))
1253 rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1254
1255 skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE,
1256 TIPC_MAX_IF_NAME, l->addr,
1257 tipc_own_addr(l->net), 0, 0, 0);
1258 if (!skb)
1259 return;
1260
1261 hdr = buf_msg(skb);
1262 msg_set_session(hdr, l->session);
1263 msg_set_bearer_id(hdr, l->bearer_id);
1264 msg_set_net_plane(hdr, l->net_plane);
1265 msg_set_next_sent(hdr, l->snd_nxt);
1266 msg_set_ack(hdr, l->rcv_nxt - 1);
1267 msg_set_bcast_ack(hdr, l->bc_rcvlink->rcv_nxt - 1);
1268 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1269 msg_set_link_tolerance(hdr, tolerance);
1270 msg_set_linkprio(hdr, priority);
1271 msg_set_redundant_link(hdr, node_up);
1272 msg_set_seq_gap(hdr, 0);
1273 msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
1274
1275 if (mtyp == STATE_MSG) {
1276 msg_set_seq_gap(hdr, rcvgap);
1277 msg_set_size(hdr, INT_H_SIZE);
1278 msg_set_probe(hdr, probe);
1279 l->stats.sent_states++;
1280 l->rcv_unacked = 0;
1281 } else {
1282 /* RESET_MSG or ACTIVATE_MSG */
1283 msg_set_max_pkt(hdr, l->advertised_mtu);
1284 strcpy(msg_data(hdr), l->if_name);
1285 }
1286 if (probe)
1287 l->stats.sent_probes++;
1288 if (rcvgap)
1289 l->stats.sent_nacks++;
1290 skb->priority = TC_PRIO_CONTROL;
1291 __skb_queue_tail(xmitq, skb);
1292 }
1293
1294 /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
1295 * with contents of the link's transmit and backlog queues.
1296 */
1297 void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
1298 int mtyp, struct sk_buff_head *xmitq)
1299 {
1300 struct sk_buff *skb, *tnlskb;
1301 struct tipc_msg *hdr, tnlhdr;
1302 struct sk_buff_head *queue = &l->transmq;
1303 struct sk_buff_head tmpxq, tnlq;
1304 u16 pktlen, pktcnt, seqno = l->snd_nxt;
1305
1306 if (!tnl)
1307 return;
1308
1309 skb_queue_head_init(&tnlq);
1310 skb_queue_head_init(&tmpxq);
1311
1312 /* At least one packet required for safe algorithm => add dummy */
1313 skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1314 BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
1315 0, 0, TIPC_ERR_NO_PORT);
1316 if (!skb) {
1317 pr_warn("%sunable to create tunnel packet\n", link_co_err);
1318 return;
1319 }
1320 skb_queue_tail(&tnlq, skb);
1321 tipc_link_xmit(l, &tnlq, &tmpxq);
1322 __skb_queue_purge(&tmpxq);
1323
1324 /* Initialize reusable tunnel packet header */
1325 tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,
1326 mtyp, INT_H_SIZE, l->addr);
1327 pktcnt = skb_queue_len(&l->transmq) + skb_queue_len(&l->backlogq);
1328 msg_set_msgcnt(&tnlhdr, pktcnt);
1329 msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
1330 tnl:
1331 /* Wrap each packet into a tunnel packet */
1332 skb_queue_walk(queue, skb) {
1333 hdr = buf_msg(skb);
1334 if (queue == &l->backlogq)
1335 msg_set_seqno(hdr, seqno++);
1336 pktlen = msg_size(hdr);
1337 msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
1338 tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE);
1339 if (!tnlskb) {
1340 pr_warn("%sunable to send packet\n", link_co_err);
1341 return;
1342 }
1343 skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
1344 skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
1345 __skb_queue_tail(&tnlq, tnlskb);
1346 }
1347 if (queue != &l->backlogq) {
1348 queue = &l->backlogq;
1349 goto tnl;
1350 }
1351
1352 tipc_link_xmit(tnl, &tnlq, xmitq);
1353
1354 if (mtyp == FAILOVER_MSG) {
1355 tnl->drop_point = l->rcv_nxt;
1356 tnl->failover_reasm_skb = l->reasm_buf;
1357 l->reasm_buf = NULL;
1358 }
1359 }
1360
1361 /* tipc_link_proto_rcv(): receive link level protocol message :
1362 * Note that network plane id propagates through the network, and may
1363 * change at any time. The node with lowest numerical id determines
1364 * network plane
1365 */
1366 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1367 struct sk_buff_head *xmitq)
1368 {
1369 struct tipc_msg *hdr = buf_msg(skb);
1370 u16 rcvgap = 0;
1371 u16 ack = msg_ack(hdr);
1372 u16 gap = msg_seq_gap(hdr);
1373 u16 peers_snd_nxt = msg_next_sent(hdr);
1374 u16 peers_tol = msg_link_tolerance(hdr);
1375 u16 peers_prio = msg_linkprio(hdr);
1376 u16 rcv_nxt = l->rcv_nxt;
1377 int mtyp = msg_type(hdr);
1378 char *if_name;
1379 int rc = 0;
1380
1381 if (tipc_link_is_blocked(l) || !xmitq)
1382 goto exit;
1383
1384 if (tipc_own_addr(l->net) > msg_prevnode(hdr))
1385 l->net_plane = msg_net_plane(hdr);
1386
1387 switch (mtyp) {
1388 case RESET_MSG:
1389
1390 /* Ignore duplicate RESET with old session number */
1391 if ((less_eq(msg_session(hdr), l->peer_session)) &&
1392 (l->peer_session != ANY_SESSION))
1393 break;
1394 /* fall thru' */
1395
1396 case ACTIVATE_MSG:
1397 skb_linearize(skb);
1398 hdr = buf_msg(skb);
1399
1400 /* Complete own link name with peer's interface name */
1401 if_name = strrchr(l->name, ':') + 1;
1402 if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
1403 break;
1404 if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
1405 break;
1406 strncpy(if_name, msg_data(hdr), TIPC_MAX_IF_NAME);
1407
1408 /* Update own tolerance if peer indicates a non-zero value */
1409 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1410 l->tolerance = peers_tol;
1411
1412 /* Update own priority if peer's priority is higher */
1413 if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
1414 l->priority = peers_prio;
1415
1416 /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
1417 if (msg_peer_stopping(hdr))
1418 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1419 else if ((mtyp == RESET_MSG) || !link_is_up(l))
1420 rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
1421
1422 /* ACTIVATE_MSG takes up link if it was already locally reset */
1423 if ((mtyp == ACTIVATE_MSG) && (l->state == LINK_ESTABLISHING))
1424 rc = TIPC_LINK_UP_EVT;
1425
1426 l->peer_session = msg_session(hdr);
1427 l->peer_bearer_id = msg_bearer_id(hdr);
1428 if (l->mtu > msg_max_pkt(hdr))
1429 l->mtu = msg_max_pkt(hdr);
1430 break;
1431
1432 case STATE_MSG:
1433
1434 /* Update own tolerance if peer indicates a non-zero value */
1435 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1436 l->tolerance = peers_tol;
1437
1438 if (peers_prio && in_range(peers_prio, TIPC_MIN_LINK_PRI,
1439 TIPC_MAX_LINK_PRI)) {
1440 l->priority = peers_prio;
1441 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1442 }
1443
1444 l->silent_intv_cnt = 0;
1445 l->stats.recv_states++;
1446 if (msg_probe(hdr))
1447 l->stats.recv_probes++;
1448
1449 if (!link_is_up(l)) {
1450 if (l->state == LINK_ESTABLISHING)
1451 rc = TIPC_LINK_UP_EVT;
1452 break;
1453 }
1454
1455 /* Send NACK if peer has sent pkts we haven't received yet */
1456 if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l))
1457 rcvgap = peers_snd_nxt - l->rcv_nxt;
1458 if (rcvgap || (msg_probe(hdr)))
1459 tipc_link_build_proto_msg(l, STATE_MSG, 0, rcvgap,
1460 0, 0, xmitq);
1461 tipc_link_release_pkts(l, ack);
1462
1463 /* If NACK, retransmit will now start at right position */
1464 if (gap) {
1465 rc = tipc_link_retrans(l, ack + 1, ack + gap, xmitq);
1466 l->stats.recv_nacks++;
1467 }
1468
1469 tipc_link_advance_backlog(l, xmitq);
1470 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1471 link_prepare_wakeup(l);
1472 }
1473 exit:
1474 kfree_skb(skb);
1475 return rc;
1476 }
1477
1478 /* tipc_link_build_bc_proto_msg() - create broadcast protocol message
1479 */
1480 static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
1481 u16 peers_snd_nxt,
1482 struct sk_buff_head *xmitq)
1483 {
1484 struct sk_buff *skb;
1485 struct tipc_msg *hdr;
1486 struct sk_buff *dfrd_skb = skb_peek(&l->deferdq);
1487 u16 ack = l->rcv_nxt - 1;
1488 u16 gap_to = peers_snd_nxt - 1;
1489
1490 skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
1491 0, l->addr, tipc_own_addr(l->net), 0, 0, 0);
1492 if (!skb)
1493 return false;
1494 hdr = buf_msg(skb);
1495 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1496 msg_set_bcast_ack(hdr, ack);
1497 msg_set_bcgap_after(hdr, ack);
1498 if (dfrd_skb)
1499 gap_to = buf_seqno(dfrd_skb) - 1;
1500 msg_set_bcgap_to(hdr, gap_to);
1501 msg_set_non_seq(hdr, bcast);
1502 __skb_queue_tail(xmitq, skb);
1503 return true;
1504 }
1505
1506 /* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
1507 *
1508 * Give a newly added peer node the sequence number where it should
1509 * start receiving and acking broadcast packets.
1510 */
1511 static void tipc_link_build_bc_init_msg(struct tipc_link *l,
1512 struct sk_buff_head *xmitq)
1513 {
1514 struct sk_buff_head list;
1515
1516 __skb_queue_head_init(&list);
1517 if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
1518 return;
1519 tipc_link_xmit(l, &list, xmitq);
1520 }
1521
1522 /* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
1523 */
1524 void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
1525 {
1526 int mtyp = msg_type(hdr);
1527 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
1528
1529 if (link_is_up(l))
1530 return;
1531
1532 if (msg_user(hdr) == BCAST_PROTOCOL) {
1533 l->rcv_nxt = peers_snd_nxt;
1534 l->state = LINK_ESTABLISHED;
1535 return;
1536 }
1537
1538 if (l->peer_caps & TIPC_BCAST_SYNCH)
1539 return;
1540
1541 if (msg_peer_node_is_up(hdr))
1542 return;
1543
1544 /* Compatibility: accept older, less safe initial synch data */
1545 if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG))
1546 l->rcv_nxt = peers_snd_nxt;
1547 }
1548
1549 /* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
1550 */
1551 void tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
1552 struct sk_buff_head *xmitq)
1553 {
1554 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
1555
1556 if (!link_is_up(l))
1557 return;
1558
1559 if (!msg_peer_node_is_up(hdr))
1560 return;
1561
1562 l->bc_peer_is_up = true;
1563
1564 /* Ignore if peers_snd_nxt goes beyond receive window */
1565 if (more(peers_snd_nxt, l->rcv_nxt + l->window))
1566 return;
1567
1568 if (!more(peers_snd_nxt, l->rcv_nxt)) {
1569 l->nack_state = BC_NACK_SND_CONDITIONAL;
1570 return;
1571 }
1572
1573 /* Don't NACK if one was recently sent or peeked */
1574 if (l->nack_state == BC_NACK_SND_SUPPRESS) {
1575 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
1576 return;
1577 }
1578
1579 /* Conditionally delay NACK sending until next synch rcv */
1580 if (l->nack_state == BC_NACK_SND_CONDITIONAL) {
1581 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
1582 if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
1583 return;
1584 }
1585
1586 /* Send NACK now but suppress next one */
1587 tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq);
1588 l->nack_state = BC_NACK_SND_SUPPRESS;
1589 }
1590
1591 void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
1592 struct sk_buff_head *xmitq)
1593 {
1594 struct sk_buff *skb, *tmp;
1595 struct tipc_link *snd_l = l->bc_sndlink;
1596
1597 if (!link_is_up(l) || !l->bc_peer_is_up)
1598 return;
1599
1600 if (!more(acked, l->acked))
1601 return;
1602
1603 /* Skip over packets peer has already acked */
1604 skb_queue_walk(&snd_l->transmq, skb) {
1605 if (more(buf_seqno(skb), l->acked))
1606 break;
1607 }
1608
1609 /* Update/release the packets peer is acking now */
1610 skb_queue_walk_from_safe(&snd_l->transmq, skb, tmp) {
1611 if (more(buf_seqno(skb), acked))
1612 break;
1613 if (!--TIPC_SKB_CB(skb)->ackers) {
1614 __skb_unlink(skb, &snd_l->transmq);
1615 kfree_skb(skb);
1616 }
1617 }
1618 l->acked = acked;
1619 tipc_link_advance_backlog(snd_l, xmitq);
1620 if (unlikely(!skb_queue_empty(&snd_l->wakeupq)))
1621 link_prepare_wakeup(snd_l);
1622 }
1623
1624 /* tipc_link_bc_nack_rcv(): receive broadcast nack message
1625 */
1626 int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
1627 struct sk_buff_head *xmitq)
1628 {
1629 struct tipc_msg *hdr = buf_msg(skb);
1630 u32 dnode = msg_destnode(hdr);
1631 int mtyp = msg_type(hdr);
1632 u16 acked = msg_bcast_ack(hdr);
1633 u16 from = acked + 1;
1634 u16 to = msg_bcgap_to(hdr);
1635 u16 peers_snd_nxt = to + 1;
1636 int rc = 0;
1637
1638 kfree_skb(skb);
1639
1640 if (!tipc_link_is_up(l) || !l->bc_peer_is_up)
1641 return 0;
1642
1643 if (mtyp != STATE_MSG)
1644 return 0;
1645
1646 if (dnode == tipc_own_addr(l->net)) {
1647 tipc_link_bc_ack_rcv(l, acked, xmitq);
1648 rc = tipc_link_retrans(l->bc_sndlink, from, to, xmitq);
1649 l->stats.recv_nacks++;
1650 return rc;
1651 }
1652
1653 /* Msg for other node => suppress own NACK at next sync if applicable */
1654 if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from))
1655 l->nack_state = BC_NACK_SND_SUPPRESS;
1656
1657 return 0;
1658 }
1659
1660 void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
1661 {
1662 int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE);
1663
1664 l->window = win;
1665 l->backlog[TIPC_LOW_IMPORTANCE].limit = win / 2;
1666 l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = win;
1667 l->backlog[TIPC_HIGH_IMPORTANCE].limit = win / 2 * 3;
1668 l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = win * 2;
1669 l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk;
1670 }
1671
1672 /**
1673 * link_reset_stats - reset link statistics
1674 * @l: pointer to link
1675 */
1676 void tipc_link_reset_stats(struct tipc_link *l)
1677 {
1678 memset(&l->stats, 0, sizeof(l->stats));
1679 if (!link_is_bc_sndlink(l)) {
1680 l->stats.sent_info = l->snd_nxt;
1681 l->stats.recv_info = l->rcv_nxt;
1682 }
1683 }
1684
1685 static void link_print(struct tipc_link *l, const char *str)
1686 {
1687 struct sk_buff *hskb = skb_peek(&l->transmq);
1688 u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1;
1689 u16 tail = l->snd_nxt - 1;
1690
1691 pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
1692 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
1693 skb_queue_len(&l->transmq), head, tail,
1694 skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
1695 }
1696
1697 /* Parse and validate nested (link) properties valid for media, bearer and link
1698 */
1699 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
1700 {
1701 int err;
1702
1703 err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
1704 tipc_nl_prop_policy);
1705 if (err)
1706 return err;
1707
1708 if (props[TIPC_NLA_PROP_PRIO]) {
1709 u32 prio;
1710
1711 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1712 if (prio > TIPC_MAX_LINK_PRI)
1713 return -EINVAL;
1714 }
1715
1716 if (props[TIPC_NLA_PROP_TOL]) {
1717 u32 tol;
1718
1719 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1720 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1721 return -EINVAL;
1722 }
1723
1724 if (props[TIPC_NLA_PROP_WIN]) {
1725 u32 win;
1726
1727 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1728 if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
1729 return -EINVAL;
1730 }
1731
1732 return 0;
1733 }
1734
1735 static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
1736 {
1737 int i;
1738 struct nlattr *stats;
1739
1740 struct nla_map {
1741 u32 key;
1742 u32 val;
1743 };
1744
1745 struct nla_map map[] = {
1746 {TIPC_NLA_STATS_RX_INFO, s->recv_info},
1747 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
1748 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
1749 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
1750 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
1751 {TIPC_NLA_STATS_TX_INFO, s->sent_info},
1752 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
1753 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
1754 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
1755 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
1756 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
1757 s->msg_length_counts : 1},
1758 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
1759 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
1760 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
1761 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
1762 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
1763 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
1764 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
1765 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
1766 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
1767 {TIPC_NLA_STATS_RX_STATES, s->recv_states},
1768 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
1769 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
1770 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
1771 {TIPC_NLA_STATS_TX_STATES, s->sent_states},
1772 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
1773 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
1774 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
1775 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
1776 {TIPC_NLA_STATS_DUPLICATES, s->duplicates},
1777 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
1778 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
1779 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
1780 (s->accu_queue_sz / s->queue_sz_counts) : 0}
1781 };
1782
1783 stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
1784 if (!stats)
1785 return -EMSGSIZE;
1786
1787 for (i = 0; i < ARRAY_SIZE(map); i++)
1788 if (nla_put_u32(skb, map[i].key, map[i].val))
1789 goto msg_full;
1790
1791 nla_nest_end(skb, stats);
1792
1793 return 0;
1794 msg_full:
1795 nla_nest_cancel(skb, stats);
1796
1797 return -EMSGSIZE;
1798 }
1799
1800 /* Caller should hold appropriate locks to protect the link */
1801 int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
1802 struct tipc_link *link, int nlflags)
1803 {
1804 int err;
1805 void *hdr;
1806 struct nlattr *attrs;
1807 struct nlattr *prop;
1808 struct tipc_net *tn = net_generic(net, tipc_net_id);
1809
1810 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
1811 nlflags, TIPC_NL_LINK_GET);
1812 if (!hdr)
1813 return -EMSGSIZE;
1814
1815 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
1816 if (!attrs)
1817 goto msg_full;
1818
1819 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
1820 goto attr_msg_full;
1821 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
1822 tipc_cluster_mask(tn->own_addr)))
1823 goto attr_msg_full;
1824 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
1825 goto attr_msg_full;
1826 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->rcv_nxt))
1827 goto attr_msg_full;
1828 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->snd_nxt))
1829 goto attr_msg_full;
1830
1831 if (tipc_link_is_up(link))
1832 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
1833 goto attr_msg_full;
1834 if (link->active)
1835 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
1836 goto attr_msg_full;
1837
1838 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
1839 if (!prop)
1840 goto attr_msg_full;
1841 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
1842 goto prop_msg_full;
1843 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
1844 goto prop_msg_full;
1845 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
1846 link->window))
1847 goto prop_msg_full;
1848 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
1849 goto prop_msg_full;
1850 nla_nest_end(msg->skb, prop);
1851
1852 err = __tipc_nl_add_stats(msg->skb, &link->stats);
1853 if (err)
1854 goto attr_msg_full;
1855
1856 nla_nest_end(msg->skb, attrs);
1857 genlmsg_end(msg->skb, hdr);
1858
1859 return 0;
1860
1861 prop_msg_full:
1862 nla_nest_cancel(msg->skb, prop);
1863 attr_msg_full:
1864 nla_nest_cancel(msg->skb, attrs);
1865 msg_full:
1866 genlmsg_cancel(msg->skb, hdr);
1867
1868 return -EMSGSIZE;
1869 }
1870
1871 static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
1872 struct tipc_stats *stats)
1873 {
1874 int i;
1875 struct nlattr *nest;
1876
1877 struct nla_map {
1878 __u32 key;
1879 __u32 val;
1880 };
1881
1882 struct nla_map map[] = {
1883 {TIPC_NLA_STATS_RX_INFO, stats->recv_info},
1884 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
1885 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
1886 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
1887 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
1888 {TIPC_NLA_STATS_TX_INFO, stats->sent_info},
1889 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
1890 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
1891 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
1892 {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
1893 {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
1894 {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
1895 {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
1896 {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
1897 {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
1898 {TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
1899 {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
1900 {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
1901 {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
1902 (stats->accu_queue_sz / stats->queue_sz_counts) : 0}
1903 };
1904
1905 nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
1906 if (!nest)
1907 return -EMSGSIZE;
1908
1909 for (i = 0; i < ARRAY_SIZE(map); i++)
1910 if (nla_put_u32(skb, map[i].key, map[i].val))
1911 goto msg_full;
1912
1913 nla_nest_end(skb, nest);
1914
1915 return 0;
1916 msg_full:
1917 nla_nest_cancel(skb, nest);
1918
1919 return -EMSGSIZE;
1920 }
1921
1922 int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
1923 {
1924 int err;
1925 void *hdr;
1926 struct nlattr *attrs;
1927 struct nlattr *prop;
1928 struct tipc_net *tn = net_generic(net, tipc_net_id);
1929 struct tipc_link *bcl = tn->bcl;
1930
1931 if (!bcl)
1932 return 0;
1933
1934 tipc_bcast_lock(net);
1935
1936 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
1937 NLM_F_MULTI, TIPC_NL_LINK_GET);
1938 if (!hdr) {
1939 tipc_bcast_unlock(net);
1940 return -EMSGSIZE;
1941 }
1942
1943 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
1944 if (!attrs)
1945 goto msg_full;
1946
1947 /* The broadcast link is always up */
1948 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
1949 goto attr_msg_full;
1950
1951 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
1952 goto attr_msg_full;
1953 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
1954 goto attr_msg_full;
1955 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->rcv_nxt))
1956 goto attr_msg_full;
1957 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->snd_nxt))
1958 goto attr_msg_full;
1959
1960 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
1961 if (!prop)
1962 goto attr_msg_full;
1963 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
1964 goto prop_msg_full;
1965 nla_nest_end(msg->skb, prop);
1966
1967 err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
1968 if (err)
1969 goto attr_msg_full;
1970
1971 tipc_bcast_unlock(net);
1972 nla_nest_end(msg->skb, attrs);
1973 genlmsg_end(msg->skb, hdr);
1974
1975 return 0;
1976
1977 prop_msg_full:
1978 nla_nest_cancel(msg->skb, prop);
1979 attr_msg_full:
1980 nla_nest_cancel(msg->skb, attrs);
1981 msg_full:
1982 tipc_bcast_unlock(net);
1983 genlmsg_cancel(msg->skb, hdr);
1984
1985 return -EMSGSIZE;
1986 }
1987
1988 void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
1989 struct sk_buff_head *xmitq)
1990 {
1991 l->tolerance = tol;
1992 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, tol, 0, xmitq);
1993 }
1994
1995 void tipc_link_set_prio(struct tipc_link *l, u32 prio,
1996 struct sk_buff_head *xmitq)
1997 {
1998 l->priority = prio;
1999 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, prio, xmitq);
2000 }
2001
2002 void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
2003 {
2004 l->abort_limit = limit;
2005 }