2 * net/tipc/link.c: TIPC link code
4 * Copyright (c) 1996-2007, 2012-2016, Ericsson AB
5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
42 #include "name_distr.h"
47 #include <linux/pkt_sched.h>
68 u32 link_congs
; /* # port sends blocked by congestion */
71 u32 max_queue_sz
; /* send queue size high water mark */
72 u32 accu_queue_sz
; /* used for send queue size profiling */
73 u32 queue_sz_counts
; /* used for send queue size profiling */
74 u32 msg_length_counts
; /* used for message length profiling */
75 u32 msg_lengths_total
; /* used for message length profiling */
76 u32 msg_length_profile
[7]; /* used for msg. length profiling */
80 * struct tipc_link - TIPC link data structure
81 * @addr: network address of link's peer node
82 * @name: link name character string
83 * @media_addr: media address to use when sending messages over link
85 * @net: pointer to namespace struct
86 * @refcnt: reference counter for permanent references (owner node & timer)
87 * @peer_session: link session # being used by peer end of link
88 * @peer_bearer_id: bearer id used by link's peer endpoint
89 * @bearer_id: local bearer id used by link
90 * @tolerance: minimum link continuity loss needed to reset link [in ms]
91 * @abort_limit: # of unacknowledged continuity probes needed to reset link
92 * @state: current state of link FSM
93 * @peer_caps: bitmap describing capabilities of peer node
94 * @silent_intv_cnt: # of timer intervals without any reception from peer
95 * @proto_msg: template for control messages generated by link
96 * @pmsg: convenience pointer to "proto_msg" field
97 * @priority: current link priority
98 * @net_plane: current link network plane ('A' through 'H')
99 * @mon_state: cookie with information needed by link monitor
100 * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
101 * @exp_msg_count: # of tunnelled messages expected during link changeover
102 * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
103 * @mtu: current maximum packet size for this link
104 * @advertised_mtu: advertised own mtu when link is being established
105 * @transmitq: queue for sent, non-acked messages
106 * @backlogq: queue for messages waiting to be sent
107 * @snt_nxt: next sequence number to use for outbound messages
108 * @last_retransmitted: sequence number of most recently retransmitted message
109 * @stale_cnt: counter for number of identical retransmit attempts
110 * @stale_limit: time when repeated identical retransmits must force link reset
111 * @ackers: # of peers that needs to ack each packet before it can be released
112 * @acked: # last packet acked by a certain peer. Used for broadcast.
113 * @rcv_nxt: next sequence number to expect for inbound messages
114 * @deferred_queue: deferred queue saved OOS b'cast message received from node
115 * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
116 * @inputq: buffer queue for messages to be delivered upwards
117 * @namedq: buffer queue for name table messages to be delivered upwards
118 * @next_out: ptr to first unsent outbound message in queue
119 * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
120 * @long_msg_seq_no: next identifier to use for outbound fragmented messages
121 * @reasm_buf: head of partially reassembled inbound message fragments
122 * @bc_rcvr: marks that this is a broadcast receiver link
123 * @stats: collects statistics regarding link activity
127 char name
[TIPC_MAX_LINK_NAME
];
130 /* Management and link supervision data */
144 char if_name
[TIPC_MAX_IF_NAME
];
147 struct tipc_mon_state mon_state
;
152 struct sk_buff
*failover_reasm_skb
;
154 /* Max packet negotiation */
159 struct sk_buff_head transmq
;
160 struct sk_buff_head backlogq
;
169 unsigned long stale_limit
;
174 struct sk_buff_head deferdq
;
175 struct sk_buff_head
*inputq
;
176 struct sk_buff_head
*namedq
;
178 /* Congestion handling */
179 struct sk_buff_head wakeupq
;
181 /* Fragmentation/reassembly */
182 struct sk_buff
*reasm_buf
;
187 struct tipc_link
*bc_rcvlink
;
188 struct tipc_link
*bc_sndlink
;
189 unsigned long prev_retr
;
196 struct tipc_stats stats
;
200 * Error message prefixes
202 static const char *link_co_err
= "Link tunneling error, ";
203 static const char *link_rst_msg
= "Resetting link ";
205 /* Send states for broadcast NACKs
208 BC_NACK_SND_CONDITIONAL
,
209 BC_NACK_SND_UNCONDITIONAL
,
210 BC_NACK_SND_SUPPRESS
,
213 #define TIPC_BC_RETR_LIMIT 10 /* [ms] */
216 * Interval between NACKs when packets arrive out of order
218 #define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
223 LINK_ESTABLISHED
= 0xe,
224 LINK_ESTABLISHING
= 0xe << 4,
225 LINK_RESET
= 0x1 << 8,
226 LINK_RESETTING
= 0x2 << 12,
227 LINK_PEER_RESET
= 0xd << 16,
228 LINK_FAILINGOVER
= 0xf << 20,
229 LINK_SYNCHING
= 0xc << 24
232 /* Link FSM state checking routines
234 static int link_is_up(struct tipc_link
*l
)
236 return l
->state
& (LINK_ESTABLISHED
| LINK_SYNCHING
);
239 static int tipc_link_proto_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
240 struct sk_buff_head
*xmitq
);
241 static void tipc_link_build_proto_msg(struct tipc_link
*l
, int mtyp
, bool probe
,
242 bool probe_reply
, u16 rcvgap
,
243 int tolerance
, int priority
,
244 struct sk_buff_head
*xmitq
);
245 static void link_print(struct tipc_link
*l
, const char *str
);
246 static int tipc_link_build_nack_msg(struct tipc_link
*l
,
247 struct sk_buff_head
*xmitq
);
248 static void tipc_link_build_bc_init_msg(struct tipc_link
*l
,
249 struct sk_buff_head
*xmitq
);
250 static bool tipc_link_release_pkts(struct tipc_link
*l
, u16 to
);
253 * Simple non-static link routines (i.e. referenced outside this file)
255 bool tipc_link_is_up(struct tipc_link
*l
)
257 return link_is_up(l
);
260 bool tipc_link_peer_is_down(struct tipc_link
*l
)
262 return l
->state
== LINK_PEER_RESET
;
265 bool tipc_link_is_reset(struct tipc_link
*l
)
267 return l
->state
& (LINK_RESET
| LINK_FAILINGOVER
| LINK_ESTABLISHING
);
270 bool tipc_link_is_establishing(struct tipc_link
*l
)
272 return l
->state
== LINK_ESTABLISHING
;
275 bool tipc_link_is_synching(struct tipc_link
*l
)
277 return l
->state
== LINK_SYNCHING
;
280 bool tipc_link_is_failingover(struct tipc_link
*l
)
282 return l
->state
== LINK_FAILINGOVER
;
285 bool tipc_link_is_blocked(struct tipc_link
*l
)
287 return l
->state
& (LINK_RESETTING
| LINK_PEER_RESET
| LINK_FAILINGOVER
);
290 static bool link_is_bc_sndlink(struct tipc_link
*l
)
292 return !l
->bc_sndlink
;
295 static bool link_is_bc_rcvlink(struct tipc_link
*l
)
297 return ((l
->bc_rcvlink
== l
) && !link_is_bc_sndlink(l
));
300 void tipc_link_set_active(struct tipc_link
*l
, bool active
)
305 u32
tipc_link_id(struct tipc_link
*l
)
307 return l
->peer_bearer_id
<< 16 | l
->bearer_id
;
310 int tipc_link_window(struct tipc_link
*l
)
315 int tipc_link_prio(struct tipc_link
*l
)
320 unsigned long tipc_link_tolerance(struct tipc_link
*l
)
325 struct sk_buff_head
*tipc_link_inputq(struct tipc_link
*l
)
330 char tipc_link_plane(struct tipc_link
*l
)
335 void tipc_link_update_caps(struct tipc_link
*l
, u16 capabilities
)
337 l
->peer_caps
= capabilities
;
340 void tipc_link_add_bc_peer(struct tipc_link
*snd_l
,
341 struct tipc_link
*uc_l
,
342 struct sk_buff_head
*xmitq
)
344 struct tipc_link
*rcv_l
= uc_l
->bc_rcvlink
;
347 rcv_l
->acked
= snd_l
->snd_nxt
- 1;
348 snd_l
->state
= LINK_ESTABLISHED
;
349 tipc_link_build_bc_init_msg(uc_l
, xmitq
);
352 void tipc_link_remove_bc_peer(struct tipc_link
*snd_l
,
353 struct tipc_link
*rcv_l
,
354 struct sk_buff_head
*xmitq
)
356 u16 ack
= snd_l
->snd_nxt
- 1;
359 rcv_l
->bc_peer_is_up
= true;
360 rcv_l
->state
= LINK_ESTABLISHED
;
361 tipc_link_bc_ack_rcv(rcv_l
, ack
, xmitq
);
362 tipc_link_reset(rcv_l
);
363 rcv_l
->state
= LINK_RESET
;
364 if (!snd_l
->ackers
) {
365 tipc_link_reset(snd_l
);
366 snd_l
->state
= LINK_RESET
;
367 __skb_queue_purge(xmitq
);
371 int tipc_link_bc_peers(struct tipc_link
*l
)
376 static u16
link_bc_rcv_gap(struct tipc_link
*l
)
378 struct sk_buff
*skb
= skb_peek(&l
->deferdq
);
381 if (more(l
->snd_nxt
, l
->rcv_nxt
))
382 gap
= l
->snd_nxt
- l
->rcv_nxt
;
384 gap
= buf_seqno(skb
) - l
->rcv_nxt
;
388 void tipc_link_set_mtu(struct tipc_link
*l
, int mtu
)
393 int tipc_link_mtu(struct tipc_link
*l
)
398 u16
tipc_link_rcv_nxt(struct tipc_link
*l
)
403 u16
tipc_link_acked(struct tipc_link
*l
)
408 char *tipc_link_name(struct tipc_link
*l
)
414 * tipc_link_create - create a new link
415 * @n: pointer to associated node
416 * @if_name: associated interface name
417 * @bearer_id: id (index) of associated bearer
418 * @tolerance: link tolerance to be used by link
419 * @net_plane: network plane (A,B,c..) this link belongs to
420 * @mtu: mtu to be advertised by link
421 * @priority: priority to be used by link
422 * @window: send window to be used by link
423 * @session: session to be used by link
424 * @ownnode: identity of own node
425 * @peer: node id of peer node
426 * @peer_caps: bitmap describing peer node capabilities
427 * @bc_sndlink: the namespace global link used for broadcast sending
428 * @bc_rcvlink: the peer specific link used for broadcast reception
429 * @inputq: queue to put messages ready for delivery
430 * @namedq: queue to put binding table update messages ready for delivery
431 * @link: return value, pointer to put the created link
433 * Returns true if link was created, otherwise false
435 bool tipc_link_create(struct net
*net
, char *if_name
, int bearer_id
,
436 int tolerance
, char net_plane
, u32 mtu
, int priority
,
437 int window
, u32 session
, u32 self
,
438 u32 peer
, u8
*peer_id
, u16 peer_caps
,
439 struct tipc_link
*bc_sndlink
,
440 struct tipc_link
*bc_rcvlink
,
441 struct sk_buff_head
*inputq
,
442 struct sk_buff_head
*namedq
,
443 struct tipc_link
**link
)
445 char peer_str
[NODE_ID_STR_LEN
] = {0,};
446 char self_str
[NODE_ID_STR_LEN
] = {0,};
449 l
= kzalloc(sizeof(*l
), GFP_ATOMIC
);
453 l
->session
= session
;
455 /* Set link name for unicast links only */
457 tipc_nodeid2string(self_str
, tipc_own_id(net
));
458 if (strlen(self_str
) > 16)
459 sprintf(self_str
, "%x", self
);
460 tipc_nodeid2string(peer_str
, peer_id
);
461 if (strlen(peer_str
) > 16)
462 sprintf(peer_str
, "%x", peer
);
464 /* Peer i/f name will be completed by reset/activate message */
465 snprintf(l
->name
, sizeof(l
->name
), "%s:%s-%s:unknown",
466 self_str
, if_name
, peer_str
);
468 strcpy(l
->if_name
, if_name
);
470 l
->peer_caps
= peer_caps
;
472 l
->in_session
= false;
473 l
->bearer_id
= bearer_id
;
474 l
->tolerance
= tolerance
;
475 l
->net_plane
= net_plane
;
476 l
->advertised_mtu
= mtu
;
478 l
->priority
= priority
;
479 tipc_link_set_queue_limits(l
, window
);
481 l
->bc_sndlink
= bc_sndlink
;
482 l
->bc_rcvlink
= bc_rcvlink
;
485 l
->state
= LINK_RESETTING
;
486 __skb_queue_head_init(&l
->transmq
);
487 __skb_queue_head_init(&l
->backlogq
);
488 __skb_queue_head_init(&l
->deferdq
);
489 skb_queue_head_init(&l
->wakeupq
);
490 skb_queue_head_init(l
->inputq
);
495 * tipc_link_bc_create - create new link to be used for broadcast
496 * @n: pointer to associated node
497 * @mtu: mtu to be used initially if no peers
498 * @window: send window to be used
499 * @inputq: queue to put messages ready for delivery
500 * @namedq: queue to put binding table update messages ready for delivery
501 * @link: return value, pointer to put the created link
503 * Returns true if link was created, otherwise false
505 bool tipc_link_bc_create(struct net
*net
, u32 ownnode
, u32 peer
,
506 int mtu
, int window
, u16 peer_caps
,
507 struct sk_buff_head
*inputq
,
508 struct sk_buff_head
*namedq
,
509 struct tipc_link
*bc_sndlink
,
510 struct tipc_link
**link
)
514 if (!tipc_link_create(net
, "", MAX_BEARERS
, 0, 'Z', mtu
, 0, window
,
515 0, ownnode
, peer
, NULL
, peer_caps
, bc_sndlink
,
516 NULL
, inputq
, namedq
, link
))
520 strcpy(l
->name
, tipc_bclink_name
);
522 l
->state
= LINK_RESET
;
526 /* Broadcast send link is always up */
527 if (link_is_bc_sndlink(l
))
528 l
->state
= LINK_ESTABLISHED
;
530 /* Disable replicast if even a single peer doesn't support it */
531 if (link_is_bc_rcvlink(l
) && !(peer_caps
& TIPC_BCAST_RCAST
))
532 tipc_bcast_disable_rcast(net
);
538 * tipc_link_fsm_evt - link finite state machine
539 * @l: pointer to link
540 * @evt: state machine event to be processed
542 int tipc_link_fsm_evt(struct tipc_link
*l
, int evt
)
549 case LINK_PEER_RESET_EVT
:
550 l
->state
= LINK_PEER_RESET
;
553 l
->state
= LINK_RESET
;
555 case LINK_FAILURE_EVT
:
556 case LINK_FAILOVER_BEGIN_EVT
:
557 case LINK_ESTABLISH_EVT
:
558 case LINK_FAILOVER_END_EVT
:
559 case LINK_SYNCH_BEGIN_EVT
:
560 case LINK_SYNCH_END_EVT
:
567 case LINK_PEER_RESET_EVT
:
568 l
->state
= LINK_ESTABLISHING
;
570 case LINK_FAILOVER_BEGIN_EVT
:
571 l
->state
= LINK_FAILINGOVER
;
572 case LINK_FAILURE_EVT
:
574 case LINK_ESTABLISH_EVT
:
575 case LINK_FAILOVER_END_EVT
:
577 case LINK_SYNCH_BEGIN_EVT
:
578 case LINK_SYNCH_END_EVT
:
583 case LINK_PEER_RESET
:
586 l
->state
= LINK_ESTABLISHING
;
588 case LINK_PEER_RESET_EVT
:
589 case LINK_ESTABLISH_EVT
:
590 case LINK_FAILURE_EVT
:
592 case LINK_SYNCH_BEGIN_EVT
:
593 case LINK_SYNCH_END_EVT
:
594 case LINK_FAILOVER_BEGIN_EVT
:
595 case LINK_FAILOVER_END_EVT
:
600 case LINK_FAILINGOVER
:
602 case LINK_FAILOVER_END_EVT
:
603 l
->state
= LINK_RESET
;
605 case LINK_PEER_RESET_EVT
:
607 case LINK_ESTABLISH_EVT
:
608 case LINK_FAILURE_EVT
:
610 case LINK_FAILOVER_BEGIN_EVT
:
611 case LINK_SYNCH_BEGIN_EVT
:
612 case LINK_SYNCH_END_EVT
:
617 case LINK_ESTABLISHING
:
619 case LINK_ESTABLISH_EVT
:
620 l
->state
= LINK_ESTABLISHED
;
622 case LINK_FAILOVER_BEGIN_EVT
:
623 l
->state
= LINK_FAILINGOVER
;
626 l
->state
= LINK_RESET
;
628 case LINK_FAILURE_EVT
:
629 case LINK_PEER_RESET_EVT
:
630 case LINK_SYNCH_BEGIN_EVT
:
631 case LINK_FAILOVER_END_EVT
:
633 case LINK_SYNCH_END_EVT
:
638 case LINK_ESTABLISHED
:
640 case LINK_PEER_RESET_EVT
:
641 l
->state
= LINK_PEER_RESET
;
642 rc
|= TIPC_LINK_DOWN_EVT
;
644 case LINK_FAILURE_EVT
:
645 l
->state
= LINK_RESETTING
;
646 rc
|= TIPC_LINK_DOWN_EVT
;
649 l
->state
= LINK_RESET
;
651 case LINK_ESTABLISH_EVT
:
652 case LINK_SYNCH_END_EVT
:
654 case LINK_SYNCH_BEGIN_EVT
:
655 l
->state
= LINK_SYNCHING
;
657 case LINK_FAILOVER_BEGIN_EVT
:
658 case LINK_FAILOVER_END_EVT
:
665 case LINK_PEER_RESET_EVT
:
666 l
->state
= LINK_PEER_RESET
;
667 rc
|= TIPC_LINK_DOWN_EVT
;
669 case LINK_FAILURE_EVT
:
670 l
->state
= LINK_RESETTING
;
671 rc
|= TIPC_LINK_DOWN_EVT
;
674 l
->state
= LINK_RESET
;
676 case LINK_ESTABLISH_EVT
:
677 case LINK_SYNCH_BEGIN_EVT
:
679 case LINK_SYNCH_END_EVT
:
680 l
->state
= LINK_ESTABLISHED
;
682 case LINK_FAILOVER_BEGIN_EVT
:
683 case LINK_FAILOVER_END_EVT
:
689 pr_err("Unknown FSM state %x in %s\n", l
->state
, l
->name
);
693 pr_err("Illegal FSM event %x in state %x on link %s\n",
694 evt
, l
->state
, l
->name
);
698 /* link_profile_stats - update statistical profiling of traffic
700 static void link_profile_stats(struct tipc_link
*l
)
703 struct tipc_msg
*msg
;
706 /* Update counters used in statistical profiling of send traffic */
707 l
->stats
.accu_queue_sz
+= skb_queue_len(&l
->transmq
);
708 l
->stats
.queue_sz_counts
++;
710 skb
= skb_peek(&l
->transmq
);
714 length
= msg_size(msg
);
716 if (msg_user(msg
) == MSG_FRAGMENTER
) {
717 if (msg_type(msg
) != FIRST_FRAGMENT
)
719 length
= msg_size(msg_get_wrapped(msg
));
721 l
->stats
.msg_lengths_total
+= length
;
722 l
->stats
.msg_length_counts
++;
724 l
->stats
.msg_length_profile
[0]++;
725 else if (length
<= 256)
726 l
->stats
.msg_length_profile
[1]++;
727 else if (length
<= 1024)
728 l
->stats
.msg_length_profile
[2]++;
729 else if (length
<= 4096)
730 l
->stats
.msg_length_profile
[3]++;
731 else if (length
<= 16384)
732 l
->stats
.msg_length_profile
[4]++;
733 else if (length
<= 32768)
734 l
->stats
.msg_length_profile
[5]++;
736 l
->stats
.msg_length_profile
[6]++;
739 /* tipc_link_timeout - perform periodic task as instructed from node timeout
741 int tipc_link_timeout(struct tipc_link
*l
, struct sk_buff_head
*xmitq
)
748 u16 bc_snt
= l
->bc_sndlink
->snd_nxt
- 1;
749 u16 bc_acked
= l
->bc_rcvlink
->acked
;
750 struct tipc_mon_state
*mstate
= &l
->mon_state
;
753 case LINK_ESTABLISHED
:
756 link_profile_stats(l
);
757 tipc_mon_get_state(l
->net
, l
->addr
, mstate
, l
->bearer_id
);
758 if (mstate
->reset
|| (l
->silent_intv_cnt
> l
->abort_limit
))
759 return tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
760 state
= bc_acked
!= bc_snt
;
761 state
|= l
->bc_rcvlink
->rcv_unacked
;
762 state
|= l
->rcv_unacked
;
763 state
|= !skb_queue_empty(&l
->transmq
);
764 state
|= !skb_queue_empty(&l
->deferdq
);
765 probe
= mstate
->probing
;
766 probe
|= l
->silent_intv_cnt
;
767 if (probe
|| mstate
->monitoring
)
768 l
->silent_intv_cnt
++;
771 setup
= l
->rst_cnt
++ <= 4;
772 setup
|= !(l
->rst_cnt
% 16);
775 case LINK_ESTABLISHING
:
779 case LINK_PEER_RESET
:
781 case LINK_FAILINGOVER
:
787 if (state
|| probe
|| setup
)
788 tipc_link_build_proto_msg(l
, mtyp
, probe
, 0, 0, 0, 0, xmitq
);
794 * link_schedule_user - schedule a message sender for wakeup after congestion
796 * @hdr: header of message that is being sent
797 * Create pseudo msg to send back to user when congestion abates
799 static int link_schedule_user(struct tipc_link
*l
, struct tipc_msg
*hdr
)
801 u32 dnode
= tipc_own_addr(l
->net
);
802 u32 dport
= msg_origport(hdr
);
805 /* Create and schedule wakeup pseudo message */
806 skb
= tipc_msg_create(SOCK_WAKEUP
, 0, INT_H_SIZE
, 0,
807 dnode
, l
->addr
, dport
, 0, 0);
810 msg_set_dest_droppable(buf_msg(skb
), true);
811 TIPC_SKB_CB(skb
)->chain_imp
= msg_importance(hdr
);
812 skb_queue_tail(&l
->wakeupq
, skb
);
813 l
->stats
.link_congs
++;
818 * link_prepare_wakeup - prepare users for wakeup after congestion
820 * Wake up a number of waiting users, as permitted by available space
823 static void link_prepare_wakeup(struct tipc_link
*l
)
825 struct sk_buff
*skb
, *tmp
;
828 skb_queue_walk_safe(&l
->wakeupq
, skb
, tmp
) {
829 imp
= TIPC_SKB_CB(skb
)->chain_imp
;
830 if (l
->backlog
[imp
].len
< l
->backlog
[imp
].limit
) {
831 skb_unlink(skb
, &l
->wakeupq
);
832 skb_queue_tail(l
->inputq
, skb
);
833 } else if (i
++ > 10) {
839 void tipc_link_reset(struct tipc_link
*l
)
841 l
->in_session
= false;
843 l
->mtu
= l
->advertised_mtu
;
844 __skb_queue_purge(&l
->transmq
);
845 __skb_queue_purge(&l
->deferdq
);
846 skb_queue_splice_init(&l
->wakeupq
, l
->inputq
);
847 __skb_queue_purge(&l
->backlogq
);
848 l
->backlog
[TIPC_LOW_IMPORTANCE
].len
= 0;
849 l
->backlog
[TIPC_MEDIUM_IMPORTANCE
].len
= 0;
850 l
->backlog
[TIPC_HIGH_IMPORTANCE
].len
= 0;
851 l
->backlog
[TIPC_CRITICAL_IMPORTANCE
].len
= 0;
852 l
->backlog
[TIPC_SYSTEM_IMPORTANCE
].len
= 0;
853 kfree_skb(l
->reasm_buf
);
854 kfree_skb(l
->failover_reasm_skb
);
856 l
->failover_reasm_skb
= NULL
;
860 l
->snd_nxt_state
= 1;
861 l
->rcv_nxt_state
= 1;
863 l
->silent_intv_cnt
= 0;
866 l
->bc_peer_is_up
= false;
867 memset(&l
->mon_state
, 0, sizeof(l
->mon_state
));
868 tipc_link_reset_stats(l
);
872 * tipc_link_xmit(): enqueue buffer list according to queue situation
874 * @list: chain of buffers containing message
875 * @xmitq: returned list of packets to be sent by caller
877 * Consumes the buffer chain.
878 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
879 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
881 int tipc_link_xmit(struct tipc_link
*l
, struct sk_buff_head
*list
,
882 struct sk_buff_head
*xmitq
)
884 struct tipc_msg
*hdr
= buf_msg(skb_peek(list
));
885 unsigned int maxwin
= l
->window
;
886 int imp
= msg_importance(hdr
);
887 unsigned int mtu
= l
->mtu
;
888 u16 ack
= l
->rcv_nxt
- 1;
889 u16 seqno
= l
->snd_nxt
;
890 u16 bc_ack
= l
->bc_rcvlink
->rcv_nxt
- 1;
891 struct sk_buff_head
*transmq
= &l
->transmq
;
892 struct sk_buff_head
*backlogq
= &l
->backlogq
;
893 struct sk_buff
*skb
, *_skb
, *bskb
;
894 int pkt_cnt
= skb_queue_len(list
);
897 if (unlikely(msg_size(hdr
) > mtu
)) {
898 skb_queue_purge(list
);
902 /* Allow oversubscription of one data msg per source at congestion */
903 if (unlikely(l
->backlog
[imp
].len
>= l
->backlog
[imp
].limit
)) {
904 if (imp
== TIPC_SYSTEM_IMPORTANCE
) {
905 pr_warn("%s<%s>, link overflow", link_rst_msg
, l
->name
);
908 rc
= link_schedule_user(l
, hdr
);
912 l
->stats
.sent_fragmented
++;
913 l
->stats
.sent_fragments
+= pkt_cnt
;
916 /* Prepare each packet for sending, and add to relevant queue: */
917 while (skb_queue_len(list
)) {
918 skb
= skb_peek(list
);
920 msg_set_seqno(hdr
, seqno
);
921 msg_set_ack(hdr
, ack
);
922 msg_set_bcast_ack(hdr
, bc_ack
);
924 if (likely(skb_queue_len(transmq
) < maxwin
)) {
925 _skb
= skb_clone(skb
, GFP_ATOMIC
);
927 skb_queue_purge(list
);
931 __skb_queue_tail(transmq
, skb
);
932 __skb_queue_tail(xmitq
, _skb
);
933 TIPC_SKB_CB(skb
)->ackers
= l
->ackers
;
935 l
->stats
.sent_pkts
++;
939 if (tipc_msg_bundle(skb_peek_tail(backlogq
), hdr
, mtu
)) {
940 kfree_skb(__skb_dequeue(list
));
941 l
->stats
.sent_bundled
++;
944 if (tipc_msg_make_bundle(&bskb
, hdr
, mtu
, l
->addr
)) {
945 kfree_skb(__skb_dequeue(list
));
946 __skb_queue_tail(backlogq
, bskb
);
947 l
->backlog
[msg_importance(buf_msg(bskb
))].len
++;
948 l
->stats
.sent_bundled
++;
949 l
->stats
.sent_bundles
++;
952 l
->backlog
[imp
].len
+= skb_queue_len(list
);
953 skb_queue_splice_tail_init(list
, backlogq
);
959 static void tipc_link_advance_backlog(struct tipc_link
*l
,
960 struct sk_buff_head
*xmitq
)
962 struct sk_buff
*skb
, *_skb
;
963 struct tipc_msg
*hdr
;
964 u16 seqno
= l
->snd_nxt
;
965 u16 ack
= l
->rcv_nxt
- 1;
966 u16 bc_ack
= l
->bc_rcvlink
->rcv_nxt
- 1;
968 while (skb_queue_len(&l
->transmq
) < l
->window
) {
969 skb
= skb_peek(&l
->backlogq
);
972 _skb
= skb_clone(skb
, GFP_ATOMIC
);
975 __skb_dequeue(&l
->backlogq
);
977 l
->backlog
[msg_importance(hdr
)].len
--;
978 __skb_queue_tail(&l
->transmq
, skb
);
979 __skb_queue_tail(xmitq
, _skb
);
980 TIPC_SKB_CB(skb
)->ackers
= l
->ackers
;
981 msg_set_seqno(hdr
, seqno
);
982 msg_set_ack(hdr
, ack
);
983 msg_set_bcast_ack(hdr
, bc_ack
);
985 l
->stats
.sent_pkts
++;
991 static void link_retransmit_failure(struct tipc_link
*l
, struct sk_buff
*skb
)
993 struct tipc_msg
*hdr
= buf_msg(skb
);
995 pr_warn("Retransmission failure on link <%s>\n", l
->name
);
996 link_print(l
, "State of link ");
997 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
998 msg_user(hdr
), msg_type(hdr
), msg_size(hdr
), msg_errcode(hdr
));
999 pr_info("sqno %u, prev: %x, src: %x\n",
1000 msg_seqno(hdr
), msg_prevnode(hdr
), msg_orignode(hdr
));
1003 /* tipc_link_retrans() - retransmit one or more packets
1004 * @l: the link to transmit on
1005 * @r: the receiving link ordering the retransmit. Same as l if unicast
1006 * @from: retransmit from (inclusive) this sequence number
1007 * @to: retransmit to (inclusive) this sequence number
1008 * xmitq: queue for accumulating the retransmitted packets
1010 static int tipc_link_retrans(struct tipc_link
*l
, struct tipc_link
*r
,
1011 u16 from
, u16 to
, struct sk_buff_head
*xmitq
)
1013 struct sk_buff
*_skb
, *skb
= skb_peek(&l
->transmq
);
1014 u16 bc_ack
= l
->bc_rcvlink
->rcv_nxt
- 1;
1015 u16 ack
= l
->rcv_nxt
- 1;
1016 struct tipc_msg
*hdr
;
1021 /* Detect repeated retransmit failures on same packet */
1022 if (r
->last_retransm
!= buf_seqno(skb
)) {
1023 r
->last_retransm
= buf_seqno(skb
);
1024 r
->stale_limit
= jiffies
+ msecs_to_jiffies(l
->tolerance
);
1025 } else if (++r
->stale_cnt
> 99 && time_after(jiffies
, r
->stale_limit
)) {
1026 link_retransmit_failure(l
, skb
);
1027 if (link_is_bc_sndlink(l
))
1028 return TIPC_LINK_DOWN_EVT
;
1029 return tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
1032 skb_queue_walk(&l
->transmq
, skb
) {
1034 if (less(msg_seqno(hdr
), from
))
1036 if (more(msg_seqno(hdr
), to
))
1038 _skb
= __pskb_copy(skb
, MIN_H_SIZE
, GFP_ATOMIC
);
1041 hdr
= buf_msg(_skb
);
1042 msg_set_ack(hdr
, ack
);
1043 msg_set_bcast_ack(hdr
, bc_ack
);
1044 _skb
->priority
= TC_PRIO_CONTROL
;
1045 __skb_queue_tail(xmitq
, _skb
);
1046 l
->stats
.retransmitted
++;
1051 /* tipc_data_input - deliver data and name distr msgs to upper layer
1053 * Consumes buffer if message is of right type
1054 * Node lock must be held
1056 static bool tipc_data_input(struct tipc_link
*l
, struct sk_buff
*skb
,
1057 struct sk_buff_head
*inputq
)
1059 struct sk_buff_head
*mc_inputq
= l
->bc_rcvlink
->inputq
;
1060 struct tipc_msg
*hdr
= buf_msg(skb
);
1062 switch (msg_user(hdr
)) {
1063 case TIPC_LOW_IMPORTANCE
:
1064 case TIPC_MEDIUM_IMPORTANCE
:
1065 case TIPC_HIGH_IMPORTANCE
:
1066 case TIPC_CRITICAL_IMPORTANCE
:
1067 if (unlikely(msg_in_group(hdr
) || msg_mcast(hdr
))) {
1068 skb_queue_tail(mc_inputq
, skb
);
1071 /* else: fall through */
1073 skb_queue_tail(inputq
, skb
);
1075 case GROUP_PROTOCOL
:
1076 skb_queue_tail(mc_inputq
, skb
);
1078 case NAME_DISTRIBUTOR
:
1079 l
->bc_rcvlink
->state
= LINK_ESTABLISHED
;
1080 skb_queue_tail(l
->namedq
, skb
);
1083 case TUNNEL_PROTOCOL
:
1084 case MSG_FRAGMENTER
:
1085 case BCAST_PROTOCOL
:
1088 pr_warn("Dropping received illegal msg type\n");
1094 /* tipc_link_input - process packet that has passed link protocol check
1098 static int tipc_link_input(struct tipc_link
*l
, struct sk_buff
*skb
,
1099 struct sk_buff_head
*inputq
)
1101 struct tipc_msg
*hdr
= buf_msg(skb
);
1102 struct sk_buff
**reasm_skb
= &l
->reasm_buf
;
1103 struct sk_buff
*iskb
;
1104 struct sk_buff_head tmpq
;
1105 int usr
= msg_user(hdr
);
1110 if (unlikely(usr
== TUNNEL_PROTOCOL
)) {
1111 if (msg_type(hdr
) == SYNCH_MSG
) {
1112 __skb_queue_purge(&l
->deferdq
);
1115 if (!tipc_msg_extract(skb
, &iskb
, &ipos
))
1120 if (less(msg_seqno(hdr
), l
->drop_point
))
1122 if (tipc_data_input(l
, skb
, inputq
))
1124 usr
= msg_user(hdr
);
1125 reasm_skb
= &l
->failover_reasm_skb
;
1128 if (usr
== MSG_BUNDLER
) {
1129 skb_queue_head_init(&tmpq
);
1130 l
->stats
.recv_bundles
++;
1131 l
->stats
.recv_bundled
+= msg_msgcnt(hdr
);
1132 while (tipc_msg_extract(skb
, &iskb
, &pos
))
1133 tipc_data_input(l
, iskb
, &tmpq
);
1134 tipc_skb_queue_splice_tail(&tmpq
, inputq
);
1136 } else if (usr
== MSG_FRAGMENTER
) {
1137 l
->stats
.recv_fragments
++;
1138 if (tipc_buf_append(reasm_skb
, &skb
)) {
1139 l
->stats
.recv_fragmented
++;
1140 tipc_data_input(l
, skb
, inputq
);
1141 } else if (!*reasm_skb
&& !link_is_bc_rcvlink(l
)) {
1142 pr_warn_ratelimited("Unable to build fragment list\n");
1143 return tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
1146 } else if (usr
== BCAST_PROTOCOL
) {
1147 tipc_bcast_lock(l
->net
);
1148 tipc_link_bc_init_rcv(l
->bc_rcvlink
, hdr
);
1149 tipc_bcast_unlock(l
->net
);
1156 static bool tipc_link_release_pkts(struct tipc_link
*l
, u16 acked
)
1158 bool released
= false;
1159 struct sk_buff
*skb
, *tmp
;
1161 skb_queue_walk_safe(&l
->transmq
, skb
, tmp
) {
1162 if (more(buf_seqno(skb
), acked
))
1164 __skb_unlink(skb
, &l
->transmq
);
1171 /* tipc_link_build_state_msg: prepare link state message for transmission
1173 * Note that sending of broadcast ack is coordinated among nodes, to reduce
1174 * risk of ack storms towards the sender
1176 int tipc_link_build_state_msg(struct tipc_link
*l
, struct sk_buff_head
*xmitq
)
1181 /* Broadcast ACK must be sent via a unicast link => defer to caller */
1182 if (link_is_bc_rcvlink(l
)) {
1183 if (((l
->rcv_nxt
^ tipc_own_addr(l
->net
)) & 0xf) != 0xf)
1187 /* Use snd_nxt to store peer's snd_nxt in broadcast rcv link */
1188 l
->snd_nxt
= l
->rcv_nxt
;
1189 return TIPC_LINK_SND_STATE
;
1194 l
->stats
.sent_acks
++;
1195 tipc_link_build_proto_msg(l
, STATE_MSG
, 0, 0, 0, 0, 0, xmitq
);
1199 /* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
1201 void tipc_link_build_reset_msg(struct tipc_link
*l
, struct sk_buff_head
*xmitq
)
1203 int mtyp
= RESET_MSG
;
1204 struct sk_buff
*skb
;
1206 if (l
->state
== LINK_ESTABLISHING
)
1207 mtyp
= ACTIVATE_MSG
;
1209 tipc_link_build_proto_msg(l
, mtyp
, 0, 0, 0, 0, 0, xmitq
);
1211 /* Inform peer that this endpoint is going down if applicable */
1212 skb
= skb_peek_tail(xmitq
);
1213 if (skb
&& (l
->state
== LINK_RESET
))
1214 msg_set_peer_stopping(buf_msg(skb
), 1);
1217 /* tipc_link_build_nack_msg: prepare link nack message for transmission
1218 * Note that sending of broadcast NACK is coordinated among nodes, to
1219 * reduce the risk of NACK storms towards the sender
1221 static int tipc_link_build_nack_msg(struct tipc_link
*l
,
1222 struct sk_buff_head
*xmitq
)
1224 u32 def_cnt
= ++l
->stats
.deferred_recv
;
1227 if (link_is_bc_rcvlink(l
)) {
1228 match1
= def_cnt
& 0xf;
1229 match2
= tipc_own_addr(l
->net
) & 0xf;
1230 if (match1
== match2
)
1231 return TIPC_LINK_SND_STATE
;
1235 if ((skb_queue_len(&l
->deferdq
) == 1) || !(def_cnt
% TIPC_NACK_INTV
))
1236 tipc_link_build_proto_msg(l
, STATE_MSG
, 0, 0, 0, 0, 0, xmitq
);
1240 /* tipc_link_rcv - process TIPC packets/messages arriving from off-node
1241 * @l: the link that should handle the message
1243 * @xmitq: queue to place packets to be sent after this call
1245 int tipc_link_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
1246 struct sk_buff_head
*xmitq
)
1248 struct sk_buff_head
*defq
= &l
->deferdq
;
1249 struct tipc_msg
*hdr
;
1250 u16 seqno
, rcv_nxt
, win_lim
;
1255 seqno
= msg_seqno(hdr
);
1256 rcv_nxt
= l
->rcv_nxt
;
1257 win_lim
= rcv_nxt
+ TIPC_MAX_LINK_WIN
;
1259 /* Verify and update link state */
1260 if (unlikely(msg_user(hdr
) == LINK_PROTOCOL
))
1261 return tipc_link_proto_rcv(l
, skb
, xmitq
);
1263 if (unlikely(!link_is_up(l
))) {
1264 if (l
->state
== LINK_ESTABLISHING
)
1265 rc
= TIPC_LINK_UP_EVT
;
1269 /* Don't send probe at next timeout expiration */
1270 l
->silent_intv_cnt
= 0;
1272 /* Drop if outside receive window */
1273 if (unlikely(less(seqno
, rcv_nxt
) || more(seqno
, win_lim
))) {
1274 l
->stats
.duplicates
++;
1278 /* Forward queues and wake up waiting users */
1279 if (likely(tipc_link_release_pkts(l
, msg_ack(hdr
)))) {
1281 tipc_link_advance_backlog(l
, xmitq
);
1282 if (unlikely(!skb_queue_empty(&l
->wakeupq
)))
1283 link_prepare_wakeup(l
);
1286 /* Defer delivery if sequence gap */
1287 if (unlikely(seqno
!= rcv_nxt
)) {
1288 __tipc_skb_queue_sorted(defq
, seqno
, skb
);
1289 rc
|= tipc_link_build_nack_msg(l
, xmitq
);
1293 /* Deliver packet */
1295 l
->stats
.recv_pkts
++;
1296 if (!tipc_data_input(l
, skb
, l
->inputq
))
1297 rc
|= tipc_link_input(l
, skb
, l
->inputq
);
1298 if (unlikely(++l
->rcv_unacked
>= TIPC_MIN_LINK_WIN
))
1299 rc
|= tipc_link_build_state_msg(l
, xmitq
);
1300 if (unlikely(rc
& ~TIPC_LINK_SND_STATE
))
1302 } while ((skb
= __skb_dequeue(defq
)));
1310 static void tipc_link_build_proto_msg(struct tipc_link
*l
, int mtyp
, bool probe
,
1311 bool probe_reply
, u16 rcvgap
,
1312 int tolerance
, int priority
,
1313 struct sk_buff_head
*xmitq
)
1315 struct tipc_link
*bcl
= l
->bc_rcvlink
;
1316 struct sk_buff
*skb
;
1317 struct tipc_msg
*hdr
;
1318 struct sk_buff_head
*dfq
= &l
->deferdq
;
1319 bool node_up
= link_is_up(bcl
);
1320 struct tipc_mon_state
*mstate
= &l
->mon_state
;
1324 /* Don't send protocol message during reset or link failover */
1325 if (tipc_link_is_blocked(l
))
1328 if (!tipc_link_is_up(l
) && (mtyp
== STATE_MSG
))
1331 if (!skb_queue_empty(dfq
))
1332 rcvgap
= buf_seqno(skb_peek(dfq
)) - l
->rcv_nxt
;
1334 skb
= tipc_msg_create(LINK_PROTOCOL
, mtyp
, INT_H_SIZE
,
1335 tipc_max_domain_size
, l
->addr
,
1336 tipc_own_addr(l
->net
), 0, 0, 0);
1341 data
= msg_data(hdr
);
1342 msg_set_session(hdr
, l
->session
);
1343 msg_set_bearer_id(hdr
, l
->bearer_id
);
1344 msg_set_net_plane(hdr
, l
->net_plane
);
1345 msg_set_next_sent(hdr
, l
->snd_nxt
);
1346 msg_set_ack(hdr
, l
->rcv_nxt
- 1);
1347 msg_set_bcast_ack(hdr
, bcl
->rcv_nxt
- 1);
1348 msg_set_bc_ack_invalid(hdr
, !node_up
);
1349 msg_set_last_bcast(hdr
, l
->bc_sndlink
->snd_nxt
- 1);
1350 msg_set_link_tolerance(hdr
, tolerance
);
1351 msg_set_linkprio(hdr
, priority
);
1352 msg_set_redundant_link(hdr
, node_up
);
1353 msg_set_seq_gap(hdr
, 0);
1354 msg_set_seqno(hdr
, l
->snd_nxt
+ U16_MAX
/ 2);
1356 if (mtyp
== STATE_MSG
) {
1357 if (l
->peer_caps
& TIPC_LINK_PROTO_SEQNO
)
1358 msg_set_seqno(hdr
, l
->snd_nxt_state
++);
1359 msg_set_seq_gap(hdr
, rcvgap
);
1360 msg_set_bc_gap(hdr
, link_bc_rcv_gap(bcl
));
1361 msg_set_probe(hdr
, probe
);
1362 msg_set_is_keepalive(hdr
, probe
|| probe_reply
);
1363 tipc_mon_prep(l
->net
, data
, &dlen
, mstate
, l
->bearer_id
);
1364 msg_set_size(hdr
, INT_H_SIZE
+ dlen
);
1365 skb_trim(skb
, INT_H_SIZE
+ dlen
);
1366 l
->stats
.sent_states
++;
1369 /* RESET_MSG or ACTIVATE_MSG */
1370 msg_set_max_pkt(hdr
, l
->advertised_mtu
);
1371 strcpy(data
, l
->if_name
);
1372 msg_set_size(hdr
, INT_H_SIZE
+ TIPC_MAX_IF_NAME
);
1373 skb_trim(skb
, INT_H_SIZE
+ TIPC_MAX_IF_NAME
);
1376 l
->stats
.sent_probes
++;
1378 l
->stats
.sent_nacks
++;
1379 skb
->priority
= TC_PRIO_CONTROL
;
1380 __skb_queue_tail(xmitq
, skb
);
1383 /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
1384 * with contents of the link's transmit and backlog queues.
1386 void tipc_link_tnl_prepare(struct tipc_link
*l
, struct tipc_link
*tnl
,
1387 int mtyp
, struct sk_buff_head
*xmitq
)
1389 struct sk_buff
*skb
, *tnlskb
;
1390 struct tipc_msg
*hdr
, tnlhdr
;
1391 struct sk_buff_head
*queue
= &l
->transmq
;
1392 struct sk_buff_head tmpxq
, tnlq
;
1393 u16 pktlen
, pktcnt
, seqno
= l
->snd_nxt
;
1398 skb_queue_head_init(&tnlq
);
1399 skb_queue_head_init(&tmpxq
);
1401 /* At least one packet required for safe algorithm => add dummy */
1402 skb
= tipc_msg_create(TIPC_LOW_IMPORTANCE
, TIPC_DIRECT_MSG
,
1403 BASIC_H_SIZE
, 0, l
->addr
, tipc_own_addr(l
->net
),
1404 0, 0, TIPC_ERR_NO_PORT
);
1406 pr_warn("%sunable to create tunnel packet\n", link_co_err
);
1409 skb_queue_tail(&tnlq
, skb
);
1410 tipc_link_xmit(l
, &tnlq
, &tmpxq
);
1411 __skb_queue_purge(&tmpxq
);
1413 /* Initialize reusable tunnel packet header */
1414 tipc_msg_init(tipc_own_addr(l
->net
), &tnlhdr
, TUNNEL_PROTOCOL
,
1415 mtyp
, INT_H_SIZE
, l
->addr
);
1416 pktcnt
= skb_queue_len(&l
->transmq
) + skb_queue_len(&l
->backlogq
);
1417 msg_set_msgcnt(&tnlhdr
, pktcnt
);
1418 msg_set_bearer_id(&tnlhdr
, l
->peer_bearer_id
);
1420 /* Wrap each packet into a tunnel packet */
1421 skb_queue_walk(queue
, skb
) {
1423 if (queue
== &l
->backlogq
)
1424 msg_set_seqno(hdr
, seqno
++);
1425 pktlen
= msg_size(hdr
);
1426 msg_set_size(&tnlhdr
, pktlen
+ INT_H_SIZE
);
1427 tnlskb
= tipc_buf_acquire(pktlen
+ INT_H_SIZE
, GFP_ATOMIC
);
1429 pr_warn("%sunable to send packet\n", link_co_err
);
1432 skb_copy_to_linear_data(tnlskb
, &tnlhdr
, INT_H_SIZE
);
1433 skb_copy_to_linear_data_offset(tnlskb
, INT_H_SIZE
, hdr
, pktlen
);
1434 __skb_queue_tail(&tnlq
, tnlskb
);
1436 if (queue
!= &l
->backlogq
) {
1437 queue
= &l
->backlogq
;
1441 tipc_link_xmit(tnl
, &tnlq
, xmitq
);
1443 if (mtyp
== FAILOVER_MSG
) {
1444 tnl
->drop_point
= l
->rcv_nxt
;
1445 tnl
->failover_reasm_skb
= l
->reasm_buf
;
1446 l
->reasm_buf
= NULL
;
1450 /* tipc_link_validate_msg(): validate message against current link state
1451 * Returns true if message should be accepted, otherwise false
1453 bool tipc_link_validate_msg(struct tipc_link
*l
, struct tipc_msg
*hdr
)
1455 u16 curr_session
= l
->peer_session
;
1456 u16 session
= msg_session(hdr
);
1457 int mtyp
= msg_type(hdr
);
1459 if (msg_user(hdr
) != LINK_PROTOCOL
)
1466 /* Accept only RESET with new session number */
1467 return more(session
, curr_session
);
1471 /* Accept only ACTIVATE with new or current session number */
1472 return !less(session
, curr_session
);
1474 /* Accept only STATE with current session number */
1477 if (session
!= curr_session
)
1479 if (!(l
->peer_caps
& TIPC_LINK_PROTO_SEQNO
))
1481 /* Accept only STATE with new sequence number */
1482 return !less(msg_seqno(hdr
), l
->rcv_nxt_state
);
1488 /* tipc_link_proto_rcv(): receive link level protocol message :
1489 * Note that network plane id propagates through the network, and may
1490 * change at any time. The node with lowest numerical id determines
1493 static int tipc_link_proto_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
1494 struct sk_buff_head
*xmitq
)
1496 struct tipc_msg
*hdr
= buf_msg(skb
);
1498 u16 ack
= msg_ack(hdr
);
1499 u16 gap
= msg_seq_gap(hdr
);
1500 u16 peers_snd_nxt
= msg_next_sent(hdr
);
1501 u16 peers_tol
= msg_link_tolerance(hdr
);
1502 u16 peers_prio
= msg_linkprio(hdr
);
1503 u16 rcv_nxt
= l
->rcv_nxt
;
1504 u16 dlen
= msg_data_sz(hdr
);
1505 int mtyp
= msg_type(hdr
);
1506 bool reply
= msg_probe(hdr
);
1511 if (tipc_link_is_blocked(l
) || !xmitq
)
1514 if (tipc_own_addr(l
->net
) > msg_prevnode(hdr
))
1515 l
->net_plane
= msg_net_plane(hdr
);
1519 data
= msg_data(hdr
);
1521 if (!tipc_link_validate_msg(l
, hdr
))
1527 /* Complete own link name with peer's interface name */
1528 if_name
= strrchr(l
->name
, ':') + 1;
1529 if (sizeof(l
->name
) - (if_name
- l
->name
) <= TIPC_MAX_IF_NAME
)
1531 if (msg_data_sz(hdr
) < TIPC_MAX_IF_NAME
)
1533 strncpy(if_name
, data
, TIPC_MAX_IF_NAME
);
1535 /* Update own tolerance if peer indicates a non-zero value */
1536 if (in_range(peers_tol
, TIPC_MIN_LINK_TOL
, TIPC_MAX_LINK_TOL
))
1537 l
->tolerance
= peers_tol
;
1539 /* Update own priority if peer's priority is higher */
1540 if (in_range(peers_prio
, l
->priority
+ 1, TIPC_MAX_LINK_PRI
))
1541 l
->priority
= peers_prio
;
1543 /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
1544 if (msg_peer_stopping(hdr
))
1545 rc
= tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
1546 else if ((mtyp
== RESET_MSG
) || !link_is_up(l
))
1547 rc
= tipc_link_fsm_evt(l
, LINK_PEER_RESET_EVT
);
1549 /* ACTIVATE_MSG takes up link if it was already locally reset */
1550 if ((mtyp
== ACTIVATE_MSG
) && (l
->state
== LINK_ESTABLISHING
))
1551 rc
= TIPC_LINK_UP_EVT
;
1553 l
->peer_session
= msg_session(hdr
);
1554 l
->in_session
= true;
1555 l
->peer_bearer_id
= msg_bearer_id(hdr
);
1556 if (l
->mtu
> msg_max_pkt(hdr
))
1557 l
->mtu
= msg_max_pkt(hdr
);
1561 l
->rcv_nxt_state
= msg_seqno(hdr
) + 1;
1563 /* Update own tolerance if peer indicates a non-zero value */
1564 if (in_range(peers_tol
, TIPC_MIN_LINK_TOL
, TIPC_MAX_LINK_TOL
))
1565 l
->tolerance
= peers_tol
;
1567 /* Update own prio if peer indicates a different value */
1568 if ((peers_prio
!= l
->priority
) &&
1569 in_range(peers_prio
, 1, TIPC_MAX_LINK_PRI
)) {
1570 l
->priority
= peers_prio
;
1571 rc
= tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
1574 l
->silent_intv_cnt
= 0;
1575 l
->stats
.recv_states
++;
1577 l
->stats
.recv_probes
++;
1579 if (!link_is_up(l
)) {
1580 if (l
->state
== LINK_ESTABLISHING
)
1581 rc
= TIPC_LINK_UP_EVT
;
1584 tipc_mon_rcv(l
->net
, data
, dlen
, l
->addr
,
1585 &l
->mon_state
, l
->bearer_id
);
1587 /* Send NACK if peer has sent pkts we haven't received yet */
1588 if (more(peers_snd_nxt
, rcv_nxt
) && !tipc_link_is_synching(l
))
1589 rcvgap
= peers_snd_nxt
- l
->rcv_nxt
;
1590 if (rcvgap
|| reply
)
1591 tipc_link_build_proto_msg(l
, STATE_MSG
, 0, reply
,
1592 rcvgap
, 0, 0, xmitq
);
1593 tipc_link_release_pkts(l
, ack
);
1595 /* If NACK, retransmit will now start at right position */
1597 rc
= tipc_link_retrans(l
, l
, ack
+ 1, ack
+ gap
, xmitq
);
1598 l
->stats
.recv_nacks
++;
1601 tipc_link_advance_backlog(l
, xmitq
);
1602 if (unlikely(!skb_queue_empty(&l
->wakeupq
)))
1603 link_prepare_wakeup(l
);
1610 /* tipc_link_build_bc_proto_msg() - create broadcast protocol message
1612 static bool tipc_link_build_bc_proto_msg(struct tipc_link
*l
, bool bcast
,
1614 struct sk_buff_head
*xmitq
)
1616 struct sk_buff
*skb
;
1617 struct tipc_msg
*hdr
;
1618 struct sk_buff
*dfrd_skb
= skb_peek(&l
->deferdq
);
1619 u16 ack
= l
->rcv_nxt
- 1;
1620 u16 gap_to
= peers_snd_nxt
- 1;
1622 skb
= tipc_msg_create(BCAST_PROTOCOL
, STATE_MSG
, INT_H_SIZE
,
1623 0, l
->addr
, tipc_own_addr(l
->net
), 0, 0, 0);
1627 msg_set_last_bcast(hdr
, l
->bc_sndlink
->snd_nxt
- 1);
1628 msg_set_bcast_ack(hdr
, ack
);
1629 msg_set_bcgap_after(hdr
, ack
);
1631 gap_to
= buf_seqno(dfrd_skb
) - 1;
1632 msg_set_bcgap_to(hdr
, gap_to
);
1633 msg_set_non_seq(hdr
, bcast
);
1634 __skb_queue_tail(xmitq
, skb
);
1638 /* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
1640 * Give a newly added peer node the sequence number where it should
1641 * start receiving and acking broadcast packets.
1643 static void tipc_link_build_bc_init_msg(struct tipc_link
*l
,
1644 struct sk_buff_head
*xmitq
)
1646 struct sk_buff_head list
;
1648 __skb_queue_head_init(&list
);
1649 if (!tipc_link_build_bc_proto_msg(l
->bc_rcvlink
, false, 0, &list
))
1651 msg_set_bc_ack_invalid(buf_msg(skb_peek(&list
)), true);
1652 tipc_link_xmit(l
, &list
, xmitq
);
1655 /* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
1657 void tipc_link_bc_init_rcv(struct tipc_link
*l
, struct tipc_msg
*hdr
)
1659 int mtyp
= msg_type(hdr
);
1660 u16 peers_snd_nxt
= msg_bc_snd_nxt(hdr
);
1665 if (msg_user(hdr
) == BCAST_PROTOCOL
) {
1666 l
->rcv_nxt
= peers_snd_nxt
;
1667 l
->state
= LINK_ESTABLISHED
;
1671 if (l
->peer_caps
& TIPC_BCAST_SYNCH
)
1674 if (msg_peer_node_is_up(hdr
))
1677 /* Compatibility: accept older, less safe initial synch data */
1678 if ((mtyp
== RESET_MSG
) || (mtyp
== ACTIVATE_MSG
))
1679 l
->rcv_nxt
= peers_snd_nxt
;
1682 /* link_bc_retr eval()- check if the indicated range can be retransmitted now
1683 * - Adjust permitted range if there is overlap with previous retransmission
1685 static bool link_bc_retr_eval(struct tipc_link
*l
, u16
*from
, u16
*to
)
1687 unsigned long elapsed
= jiffies_to_msecs(jiffies
- l
->prev_retr
);
1689 if (less(*to
, *from
))
1692 /* New retransmission request */
1693 if ((elapsed
> TIPC_BC_RETR_LIMIT
) ||
1694 less(*to
, l
->prev_from
) || more(*from
, l
->prev_to
)) {
1695 l
->prev_from
= *from
;
1697 l
->prev_retr
= jiffies
;
1701 /* Inside range of previous retransmit */
1702 if (!less(*from
, l
->prev_from
) && !more(*to
, l
->prev_to
))
1705 /* Fully or partially outside previous range => exclude overlap */
1706 if (less(*from
, l
->prev_from
)) {
1707 *to
= l
->prev_from
- 1;
1708 l
->prev_from
= *from
;
1710 if (more(*to
, l
->prev_to
)) {
1711 *from
= l
->prev_to
+ 1;
1714 l
->prev_retr
= jiffies
;
1718 /* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
1720 int tipc_link_bc_sync_rcv(struct tipc_link
*l
, struct tipc_msg
*hdr
,
1721 struct sk_buff_head
*xmitq
)
1723 struct tipc_link
*snd_l
= l
->bc_sndlink
;
1724 u16 peers_snd_nxt
= msg_bc_snd_nxt(hdr
);
1725 u16 from
= msg_bcast_ack(hdr
) + 1;
1726 u16 to
= from
+ msg_bc_gap(hdr
) - 1;
1732 if (!msg_peer_node_is_up(hdr
))
1735 /* Open when peer ackowledges our bcast init msg (pkt #1) */
1737 l
->bc_peer_is_up
= true;
1739 if (!l
->bc_peer_is_up
)
1742 l
->stats
.recv_nacks
++;
1744 /* Ignore if peers_snd_nxt goes beyond receive window */
1745 if (more(peers_snd_nxt
, l
->rcv_nxt
+ l
->window
))
1748 if (link_bc_retr_eval(snd_l
, &from
, &to
))
1749 rc
= tipc_link_retrans(snd_l
, l
, from
, to
, xmitq
);
1751 l
->snd_nxt
= peers_snd_nxt
;
1752 if (link_bc_rcv_gap(l
))
1753 rc
|= TIPC_LINK_SND_STATE
;
1755 /* Return now if sender supports nack via STATE messages */
1756 if (l
->peer_caps
& TIPC_BCAST_STATE_NACK
)
1759 /* Otherwise, be backwards compatible */
1761 if (!more(peers_snd_nxt
, l
->rcv_nxt
)) {
1762 l
->nack_state
= BC_NACK_SND_CONDITIONAL
;
1766 /* Don't NACK if one was recently sent or peeked */
1767 if (l
->nack_state
== BC_NACK_SND_SUPPRESS
) {
1768 l
->nack_state
= BC_NACK_SND_UNCONDITIONAL
;
1772 /* Conditionally delay NACK sending until next synch rcv */
1773 if (l
->nack_state
== BC_NACK_SND_CONDITIONAL
) {
1774 l
->nack_state
= BC_NACK_SND_UNCONDITIONAL
;
1775 if ((peers_snd_nxt
- l
->rcv_nxt
) < TIPC_MIN_LINK_WIN
)
1779 /* Send NACK now but suppress next one */
1780 tipc_link_build_bc_proto_msg(l
, true, peers_snd_nxt
, xmitq
);
1781 l
->nack_state
= BC_NACK_SND_SUPPRESS
;
1785 void tipc_link_bc_ack_rcv(struct tipc_link
*l
, u16 acked
,
1786 struct sk_buff_head
*xmitq
)
1788 struct sk_buff
*skb
, *tmp
;
1789 struct tipc_link
*snd_l
= l
->bc_sndlink
;
1791 if (!link_is_up(l
) || !l
->bc_peer_is_up
)
1794 if (!more(acked
, l
->acked
))
1797 /* Skip over packets peer has already acked */
1798 skb_queue_walk(&snd_l
->transmq
, skb
) {
1799 if (more(buf_seqno(skb
), l
->acked
))
1803 /* Update/release the packets peer is acking now */
1804 skb_queue_walk_from_safe(&snd_l
->transmq
, skb
, tmp
) {
1805 if (more(buf_seqno(skb
), acked
))
1807 if (!--TIPC_SKB_CB(skb
)->ackers
) {
1808 __skb_unlink(skb
, &snd_l
->transmq
);
1813 tipc_link_advance_backlog(snd_l
, xmitq
);
1814 if (unlikely(!skb_queue_empty(&snd_l
->wakeupq
)))
1815 link_prepare_wakeup(snd_l
);
1818 /* tipc_link_bc_nack_rcv(): receive broadcast nack message
1819 * This function is here for backwards compatibility, since
1820 * no BCAST_PROTOCOL/STATE messages occur from TIPC v2.5.
1822 int tipc_link_bc_nack_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
1823 struct sk_buff_head
*xmitq
)
1825 struct tipc_msg
*hdr
= buf_msg(skb
);
1826 u32 dnode
= msg_destnode(hdr
);
1827 int mtyp
= msg_type(hdr
);
1828 u16 acked
= msg_bcast_ack(hdr
);
1829 u16 from
= acked
+ 1;
1830 u16 to
= msg_bcgap_to(hdr
);
1831 u16 peers_snd_nxt
= to
+ 1;
1836 if (!tipc_link_is_up(l
) || !l
->bc_peer_is_up
)
1839 if (mtyp
!= STATE_MSG
)
1842 if (dnode
== tipc_own_addr(l
->net
)) {
1843 tipc_link_bc_ack_rcv(l
, acked
, xmitq
);
1844 rc
= tipc_link_retrans(l
->bc_sndlink
, l
, from
, to
, xmitq
);
1845 l
->stats
.recv_nacks
++;
1849 /* Msg for other node => suppress own NACK at next sync if applicable */
1850 if (more(peers_snd_nxt
, l
->rcv_nxt
) && !less(l
->rcv_nxt
, from
))
1851 l
->nack_state
= BC_NACK_SND_SUPPRESS
;
1856 void tipc_link_set_queue_limits(struct tipc_link
*l
, u32 win
)
1858 int max_bulk
= TIPC_MAX_PUBL
/ (l
->mtu
/ ITEM_SIZE
);
1861 l
->backlog
[TIPC_LOW_IMPORTANCE
].limit
= max_t(u16
, 50, win
);
1862 l
->backlog
[TIPC_MEDIUM_IMPORTANCE
].limit
= max_t(u16
, 100, win
* 2);
1863 l
->backlog
[TIPC_HIGH_IMPORTANCE
].limit
= max_t(u16
, 150, win
* 3);
1864 l
->backlog
[TIPC_CRITICAL_IMPORTANCE
].limit
= max_t(u16
, 200, win
* 4);
1865 l
->backlog
[TIPC_SYSTEM_IMPORTANCE
].limit
= max_bulk
;
1869 * link_reset_stats - reset link statistics
1870 * @l: pointer to link
1872 void tipc_link_reset_stats(struct tipc_link
*l
)
1874 memset(&l
->stats
, 0, sizeof(l
->stats
));
1877 static void link_print(struct tipc_link
*l
, const char *str
)
1879 struct sk_buff
*hskb
= skb_peek(&l
->transmq
);
1880 u16 head
= hskb
? msg_seqno(buf_msg(hskb
)) : l
->snd_nxt
- 1;
1881 u16 tail
= l
->snd_nxt
- 1;
1883 pr_info("%s Link <%s> state %x\n", str
, l
->name
, l
->state
);
1884 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
1885 skb_queue_len(&l
->transmq
), head
, tail
,
1886 skb_queue_len(&l
->backlogq
), l
->snd_nxt
, l
->rcv_nxt
);
1889 /* Parse and validate nested (link) properties valid for media, bearer and link
1891 int tipc_nl_parse_link_prop(struct nlattr
*prop
, struct nlattr
*props
[])
1895 err
= nla_parse_nested(props
, TIPC_NLA_PROP_MAX
, prop
,
1896 tipc_nl_prop_policy
, NULL
);
1900 if (props
[TIPC_NLA_PROP_PRIO
]) {
1903 prio
= nla_get_u32(props
[TIPC_NLA_PROP_PRIO
]);
1904 if (prio
> TIPC_MAX_LINK_PRI
)
1908 if (props
[TIPC_NLA_PROP_TOL
]) {
1911 tol
= nla_get_u32(props
[TIPC_NLA_PROP_TOL
]);
1912 if ((tol
< TIPC_MIN_LINK_TOL
) || (tol
> TIPC_MAX_LINK_TOL
))
1916 if (props
[TIPC_NLA_PROP_WIN
]) {
1919 win
= nla_get_u32(props
[TIPC_NLA_PROP_WIN
]);
1920 if ((win
< TIPC_MIN_LINK_WIN
) || (win
> TIPC_MAX_LINK_WIN
))
1927 static int __tipc_nl_add_stats(struct sk_buff
*skb
, struct tipc_stats
*s
)
1930 struct nlattr
*stats
;
1937 struct nla_map map
[] = {
1938 {TIPC_NLA_STATS_RX_INFO
, 0},
1939 {TIPC_NLA_STATS_RX_FRAGMENTS
, s
->recv_fragments
},
1940 {TIPC_NLA_STATS_RX_FRAGMENTED
, s
->recv_fragmented
},
1941 {TIPC_NLA_STATS_RX_BUNDLES
, s
->recv_bundles
},
1942 {TIPC_NLA_STATS_RX_BUNDLED
, s
->recv_bundled
},
1943 {TIPC_NLA_STATS_TX_INFO
, 0},
1944 {TIPC_NLA_STATS_TX_FRAGMENTS
, s
->sent_fragments
},
1945 {TIPC_NLA_STATS_TX_FRAGMENTED
, s
->sent_fragmented
},
1946 {TIPC_NLA_STATS_TX_BUNDLES
, s
->sent_bundles
},
1947 {TIPC_NLA_STATS_TX_BUNDLED
, s
->sent_bundled
},
1948 {TIPC_NLA_STATS_MSG_PROF_TOT
, (s
->msg_length_counts
) ?
1949 s
->msg_length_counts
: 1},
1950 {TIPC_NLA_STATS_MSG_LEN_CNT
, s
->msg_length_counts
},
1951 {TIPC_NLA_STATS_MSG_LEN_TOT
, s
->msg_lengths_total
},
1952 {TIPC_NLA_STATS_MSG_LEN_P0
, s
->msg_length_profile
[0]},
1953 {TIPC_NLA_STATS_MSG_LEN_P1
, s
->msg_length_profile
[1]},
1954 {TIPC_NLA_STATS_MSG_LEN_P2
, s
->msg_length_profile
[2]},
1955 {TIPC_NLA_STATS_MSG_LEN_P3
, s
->msg_length_profile
[3]},
1956 {TIPC_NLA_STATS_MSG_LEN_P4
, s
->msg_length_profile
[4]},
1957 {TIPC_NLA_STATS_MSG_LEN_P5
, s
->msg_length_profile
[5]},
1958 {TIPC_NLA_STATS_MSG_LEN_P6
, s
->msg_length_profile
[6]},
1959 {TIPC_NLA_STATS_RX_STATES
, s
->recv_states
},
1960 {TIPC_NLA_STATS_RX_PROBES
, s
->recv_probes
},
1961 {TIPC_NLA_STATS_RX_NACKS
, s
->recv_nacks
},
1962 {TIPC_NLA_STATS_RX_DEFERRED
, s
->deferred_recv
},
1963 {TIPC_NLA_STATS_TX_STATES
, s
->sent_states
},
1964 {TIPC_NLA_STATS_TX_PROBES
, s
->sent_probes
},
1965 {TIPC_NLA_STATS_TX_NACKS
, s
->sent_nacks
},
1966 {TIPC_NLA_STATS_TX_ACKS
, s
->sent_acks
},
1967 {TIPC_NLA_STATS_RETRANSMITTED
, s
->retransmitted
},
1968 {TIPC_NLA_STATS_DUPLICATES
, s
->duplicates
},
1969 {TIPC_NLA_STATS_LINK_CONGS
, s
->link_congs
},
1970 {TIPC_NLA_STATS_MAX_QUEUE
, s
->max_queue_sz
},
1971 {TIPC_NLA_STATS_AVG_QUEUE
, s
->queue_sz_counts
?
1972 (s
->accu_queue_sz
/ s
->queue_sz_counts
) : 0}
1975 stats
= nla_nest_start(skb
, TIPC_NLA_LINK_STATS
);
1979 for (i
= 0; i
< ARRAY_SIZE(map
); i
++)
1980 if (nla_put_u32(skb
, map
[i
].key
, map
[i
].val
))
1983 nla_nest_end(skb
, stats
);
1987 nla_nest_cancel(skb
, stats
);
1992 /* Caller should hold appropriate locks to protect the link */
1993 int __tipc_nl_add_link(struct net
*net
, struct tipc_nl_msg
*msg
,
1994 struct tipc_link
*link
, int nlflags
)
1996 u32 self
= tipc_own_addr(net
);
1997 struct nlattr
*attrs
;
1998 struct nlattr
*prop
;
2002 hdr
= genlmsg_put(msg
->skb
, msg
->portid
, msg
->seq
, &tipc_genl_family
,
2003 nlflags
, TIPC_NL_LINK_GET
);
2007 attrs
= nla_nest_start(msg
->skb
, TIPC_NLA_LINK
);
2011 if (nla_put_string(msg
->skb
, TIPC_NLA_LINK_NAME
, link
->name
))
2013 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_DEST
, tipc_cluster_mask(self
)))
2015 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_MTU
, link
->mtu
))
2017 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_RX
, link
->stats
.recv_pkts
))
2019 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_TX
, link
->stats
.sent_pkts
))
2022 if (tipc_link_is_up(link
))
2023 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_UP
))
2026 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_ACTIVE
))
2029 prop
= nla_nest_start(msg
->skb
, TIPC_NLA_LINK_PROP
);
2032 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_PRIO
, link
->priority
))
2034 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_TOL
, link
->tolerance
))
2036 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_WIN
,
2039 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_PRIO
, link
->priority
))
2041 nla_nest_end(msg
->skb
, prop
);
2043 err
= __tipc_nl_add_stats(msg
->skb
, &link
->stats
);
2047 nla_nest_end(msg
->skb
, attrs
);
2048 genlmsg_end(msg
->skb
, hdr
);
2053 nla_nest_cancel(msg
->skb
, prop
);
2055 nla_nest_cancel(msg
->skb
, attrs
);
2057 genlmsg_cancel(msg
->skb
, hdr
);
2062 static int __tipc_nl_add_bc_link_stat(struct sk_buff
*skb
,
2063 struct tipc_stats
*stats
)
2066 struct nlattr
*nest
;
2073 struct nla_map map
[] = {
2074 {TIPC_NLA_STATS_RX_INFO
, stats
->recv_pkts
},
2075 {TIPC_NLA_STATS_RX_FRAGMENTS
, stats
->recv_fragments
},
2076 {TIPC_NLA_STATS_RX_FRAGMENTED
, stats
->recv_fragmented
},
2077 {TIPC_NLA_STATS_RX_BUNDLES
, stats
->recv_bundles
},
2078 {TIPC_NLA_STATS_RX_BUNDLED
, stats
->recv_bundled
},
2079 {TIPC_NLA_STATS_TX_INFO
, stats
->sent_pkts
},
2080 {TIPC_NLA_STATS_TX_FRAGMENTS
, stats
->sent_fragments
},
2081 {TIPC_NLA_STATS_TX_FRAGMENTED
, stats
->sent_fragmented
},
2082 {TIPC_NLA_STATS_TX_BUNDLES
, stats
->sent_bundles
},
2083 {TIPC_NLA_STATS_TX_BUNDLED
, stats
->sent_bundled
},
2084 {TIPC_NLA_STATS_RX_NACKS
, stats
->recv_nacks
},
2085 {TIPC_NLA_STATS_RX_DEFERRED
, stats
->deferred_recv
},
2086 {TIPC_NLA_STATS_TX_NACKS
, stats
->sent_nacks
},
2087 {TIPC_NLA_STATS_TX_ACKS
, stats
->sent_acks
},
2088 {TIPC_NLA_STATS_RETRANSMITTED
, stats
->retransmitted
},
2089 {TIPC_NLA_STATS_DUPLICATES
, stats
->duplicates
},
2090 {TIPC_NLA_STATS_LINK_CONGS
, stats
->link_congs
},
2091 {TIPC_NLA_STATS_MAX_QUEUE
, stats
->max_queue_sz
},
2092 {TIPC_NLA_STATS_AVG_QUEUE
, stats
->queue_sz_counts
?
2093 (stats
->accu_queue_sz
/ stats
->queue_sz_counts
) : 0}
2096 nest
= nla_nest_start(skb
, TIPC_NLA_LINK_STATS
);
2100 for (i
= 0; i
< ARRAY_SIZE(map
); i
++)
2101 if (nla_put_u32(skb
, map
[i
].key
, map
[i
].val
))
2104 nla_nest_end(skb
, nest
);
2108 nla_nest_cancel(skb
, nest
);
2113 int tipc_nl_add_bc_link(struct net
*net
, struct tipc_nl_msg
*msg
)
2117 struct nlattr
*attrs
;
2118 struct nlattr
*prop
;
2119 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
2120 struct tipc_link
*bcl
= tn
->bcl
;
2125 tipc_bcast_lock(net
);
2127 hdr
= genlmsg_put(msg
->skb
, msg
->portid
, msg
->seq
, &tipc_genl_family
,
2128 NLM_F_MULTI
, TIPC_NL_LINK_GET
);
2130 tipc_bcast_unlock(net
);
2134 attrs
= nla_nest_start(msg
->skb
, TIPC_NLA_LINK
);
2138 /* The broadcast link is always up */
2139 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_UP
))
2142 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_BROADCAST
))
2144 if (nla_put_string(msg
->skb
, TIPC_NLA_LINK_NAME
, bcl
->name
))
2146 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_RX
, 0))
2148 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_TX
, 0))
2151 prop
= nla_nest_start(msg
->skb
, TIPC_NLA_LINK_PROP
);
2154 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_WIN
, bcl
->window
))
2156 nla_nest_end(msg
->skb
, prop
);
2158 err
= __tipc_nl_add_bc_link_stat(msg
->skb
, &bcl
->stats
);
2162 tipc_bcast_unlock(net
);
2163 nla_nest_end(msg
->skb
, attrs
);
2164 genlmsg_end(msg
->skb
, hdr
);
2169 nla_nest_cancel(msg
->skb
, prop
);
2171 nla_nest_cancel(msg
->skb
, attrs
);
2173 tipc_bcast_unlock(net
);
2174 genlmsg_cancel(msg
->skb
, hdr
);
2179 void tipc_link_set_tolerance(struct tipc_link
*l
, u32 tol
,
2180 struct sk_buff_head
*xmitq
)
2184 tipc_link_build_proto_msg(l
, STATE_MSG
, 0, 0, 0, tol
, 0, xmitq
);
2187 void tipc_link_set_prio(struct tipc_link
*l
, u32 prio
,
2188 struct sk_buff_head
*xmitq
)
2191 tipc_link_build_proto_msg(l
, STATE_MSG
, 0, 0, 0, 0, prio
, xmitq
);
2194 void tipc_link_set_abort_limit(struct tipc_link
*l
, u32 limit
)
2196 l
->abort_limit
= limit
;