2 * net/tipc/link.c: TIPC link code
4 * Copyright (c) 1996-2007, 2012-2016, Ericsson AB
5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
42 #include "name_distr.h"
49 #include <linux/pkt_sched.h>
70 u32 link_congs
; /* # port sends blocked by congestion */
73 u32 max_queue_sz
; /* send queue size high water mark */
74 u32 accu_queue_sz
; /* used for send queue size profiling */
75 u32 queue_sz_counts
; /* used for send queue size profiling */
76 u32 msg_length_counts
; /* used for message length profiling */
77 u32 msg_lengths_total
; /* used for message length profiling */
78 u32 msg_length_profile
[7]; /* used for msg. length profiling */
82 * struct tipc_link - TIPC link data structure
83 * @addr: network address of link's peer node
84 * @name: link name character string
85 * @media_addr: media address to use when sending messages over link
87 * @net: pointer to namespace struct
88 * @refcnt: reference counter for permanent references (owner node & timer)
89 * @peer_session: link session # being used by peer end of link
90 * @peer_bearer_id: bearer id used by link's peer endpoint
91 * @bearer_id: local bearer id used by link
92 * @tolerance: minimum link continuity loss needed to reset link [in ms]
93 * @abort_limit: # of unacknowledged continuity probes needed to reset link
94 * @state: current state of link FSM
95 * @peer_caps: bitmap describing capabilities of peer node
96 * @silent_intv_cnt: # of timer intervals without any reception from peer
97 * @proto_msg: template for control messages generated by link
98 * @pmsg: convenience pointer to "proto_msg" field
99 * @priority: current link priority
100 * @net_plane: current link network plane ('A' through 'H')
101 * @mon_state: cookie with information needed by link monitor
102 * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
103 * @exp_msg_count: # of tunnelled messages expected during link changeover
104 * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
105 * @mtu: current maximum packet size for this link
106 * @advertised_mtu: advertised own mtu when link is being established
107 * @transmitq: queue for sent, non-acked messages
108 * @backlogq: queue for messages waiting to be sent
109 * @snt_nxt: next sequence number to use for outbound messages
110 * @ackers: # of peers that needs to ack each packet before it can be released
111 * @acked: # last packet acked by a certain peer. Used for broadcast.
112 * @rcv_nxt: next sequence number to expect for inbound messages
113 * @deferred_queue: deferred queue saved OOS b'cast message received from node
114 * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
115 * @inputq: buffer queue for messages to be delivered upwards
116 * @namedq: buffer queue for name table messages to be delivered upwards
117 * @next_out: ptr to first unsent outbound message in queue
118 * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
119 * @long_msg_seq_no: next identifier to use for outbound fragmented messages
120 * @reasm_buf: head of partially reassembled inbound message fragments
121 * @bc_rcvr: marks that this is a broadcast receiver link
122 * @stats: collects statistics regarding link activity
123 * @session: session to be used by link
124 * @snd_nxt_state: next send seq number
125 * @rcv_nxt_state: next rcv seq number
126 * @in_session: have received ACTIVATE_MSG from peer
127 * @active: link is active
128 * @if_name: associated interface name
129 * @rst_cnt: link reset counter
130 * @drop_point: seq number for failover handling (FIXME)
131 * @failover_reasm_skb: saved failover msg ptr (FIXME)
132 * @failover_deferdq: deferred message queue for failover processing (FIXME)
133 * @transmq: the link's transmit queue
134 * @backlog: link's backlog by priority (importance)
135 * @snd_nxt: next sequence number to be used
136 * @rcv_unacked: # messages read by user, but not yet acked back to peer
137 * @deferdq: deferred receive queue
138 * @window: sliding window size for congestion handling
139 * @min_win: minimal send window to be used by link
140 * @ssthresh: slow start threshold for congestion handling
141 * @max_win: maximal send window to be used by link
142 * @cong_acks: congestion acks for congestion avoidance (FIXME)
143 * @checkpoint: seq number for congestion window size handling
144 * @reasm_tnlmsg: fragmentation/reassembly area for tunnel protocol message
145 * @last_gap: last gap ack blocks for bcast (FIXME)
146 * @last_ga: ptr to gap ack blocks
147 * @bc_rcvlink: the peer specific link used for broadcast reception
148 * @bc_sndlink: the namespace global link used for broadcast sending
149 * @nack_state: bcast nack state
150 * @bc_peer_is_up: peer has acked the bcast init msg
154 char name
[TIPC_MAX_LINK_NAME
];
157 /* Management and link supervision data */
171 char if_name
[TIPC_MAX_IF_NAME
];
174 struct tipc_mon_state mon_state
;
179 struct sk_buff
*failover_reasm_skb
;
180 struct sk_buff_head failover_deferdq
;
182 /* Max packet negotiation */
187 struct sk_buff_head transmq
;
188 struct sk_buff_head backlogq
;
192 struct sk_buff
*target_bskb
;
199 struct sk_buff_head deferdq
;
200 struct sk_buff_head
*inputq
;
201 struct sk_buff_head
*namedq
;
203 /* Congestion handling */
204 struct sk_buff_head wakeupq
;
212 /* Fragmentation/reassembly */
213 struct sk_buff
*reasm_buf
;
214 struct sk_buff
*reasm_tnlmsg
;
220 struct tipc_gap_ack_blks
*last_ga
;
221 struct tipc_link
*bc_rcvlink
;
222 struct tipc_link
*bc_sndlink
;
227 struct tipc_stats stats
;
231 * Error message prefixes
233 static const char *link_co_err
= "Link tunneling error, ";
234 static const char *link_rst_msg
= "Resetting link ";
236 /* Send states for broadcast NACKs
239 BC_NACK_SND_CONDITIONAL
,
240 BC_NACK_SND_UNCONDITIONAL
,
241 BC_NACK_SND_SUPPRESS
,
244 #define TIPC_BC_RETR_LIM (jiffies + msecs_to_jiffies(10))
245 #define TIPC_UC_RETR_TIME (jiffies + msecs_to_jiffies(1))
250 LINK_ESTABLISHED
= 0xe,
251 LINK_ESTABLISHING
= 0xe << 4,
252 LINK_RESET
= 0x1 << 8,
253 LINK_RESETTING
= 0x2 << 12,
254 LINK_PEER_RESET
= 0xd << 16,
255 LINK_FAILINGOVER
= 0xf << 20,
256 LINK_SYNCHING
= 0xc << 24
259 /* Link FSM state checking routines
261 static int link_is_up(struct tipc_link
*l
)
263 return l
->state
& (LINK_ESTABLISHED
| LINK_SYNCHING
);
266 static int tipc_link_proto_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
267 struct sk_buff_head
*xmitq
);
268 static void tipc_link_build_proto_msg(struct tipc_link
*l
, int mtyp
, bool probe
,
269 bool probe_reply
, u16 rcvgap
,
270 int tolerance
, int priority
,
271 struct sk_buff_head
*xmitq
);
272 static void link_print(struct tipc_link
*l
, const char *str
);
273 static int tipc_link_build_nack_msg(struct tipc_link
*l
,
274 struct sk_buff_head
*xmitq
);
275 static void tipc_link_build_bc_init_msg(struct tipc_link
*l
,
276 struct sk_buff_head
*xmitq
);
277 static u8
__tipc_build_gap_ack_blks(struct tipc_gap_ack_blks
*ga
,
278 struct tipc_link
*l
, u8 start_index
);
279 static u16
tipc_build_gap_ack_blks(struct tipc_link
*l
, struct tipc_msg
*hdr
);
280 static int tipc_link_advance_transmq(struct tipc_link
*l
, struct tipc_link
*r
,
282 struct tipc_gap_ack_blks
*ga
,
283 struct sk_buff_head
*xmitq
,
284 bool *retransmitted
, int *rc
);
285 static void tipc_link_update_cwin(struct tipc_link
*l
, int released
,
288 * Simple non-static link routines (i.e. referenced outside this file)
290 bool tipc_link_is_up(struct tipc_link
*l
)
292 return link_is_up(l
);
295 bool tipc_link_peer_is_down(struct tipc_link
*l
)
297 return l
->state
== LINK_PEER_RESET
;
300 bool tipc_link_is_reset(struct tipc_link
*l
)
302 return l
->state
& (LINK_RESET
| LINK_FAILINGOVER
| LINK_ESTABLISHING
);
305 bool tipc_link_is_establishing(struct tipc_link
*l
)
307 return l
->state
== LINK_ESTABLISHING
;
310 bool tipc_link_is_synching(struct tipc_link
*l
)
312 return l
->state
== LINK_SYNCHING
;
315 bool tipc_link_is_failingover(struct tipc_link
*l
)
317 return l
->state
== LINK_FAILINGOVER
;
320 bool tipc_link_is_blocked(struct tipc_link
*l
)
322 return l
->state
& (LINK_RESETTING
| LINK_PEER_RESET
| LINK_FAILINGOVER
);
325 static bool link_is_bc_sndlink(struct tipc_link
*l
)
327 return !l
->bc_sndlink
;
330 static bool link_is_bc_rcvlink(struct tipc_link
*l
)
332 return ((l
->bc_rcvlink
== l
) && !link_is_bc_sndlink(l
));
335 void tipc_link_set_active(struct tipc_link
*l
, bool active
)
340 u32
tipc_link_id(struct tipc_link
*l
)
342 return l
->peer_bearer_id
<< 16 | l
->bearer_id
;
345 int tipc_link_min_win(struct tipc_link
*l
)
350 int tipc_link_max_win(struct tipc_link
*l
)
355 int tipc_link_prio(struct tipc_link
*l
)
360 unsigned long tipc_link_tolerance(struct tipc_link
*l
)
365 struct sk_buff_head
*tipc_link_inputq(struct tipc_link
*l
)
370 char tipc_link_plane(struct tipc_link
*l
)
375 void tipc_link_update_caps(struct tipc_link
*l
, u16 capabilities
)
377 l
->peer_caps
= capabilities
;
380 void tipc_link_add_bc_peer(struct tipc_link
*snd_l
,
381 struct tipc_link
*uc_l
,
382 struct sk_buff_head
*xmitq
)
384 struct tipc_link
*rcv_l
= uc_l
->bc_rcvlink
;
387 rcv_l
->acked
= snd_l
->snd_nxt
- 1;
388 snd_l
->state
= LINK_ESTABLISHED
;
389 tipc_link_build_bc_init_msg(uc_l
, xmitq
);
392 void tipc_link_remove_bc_peer(struct tipc_link
*snd_l
,
393 struct tipc_link
*rcv_l
,
394 struct sk_buff_head
*xmitq
)
396 u16 ack
= snd_l
->snd_nxt
- 1;
399 rcv_l
->bc_peer_is_up
= true;
400 rcv_l
->state
= LINK_ESTABLISHED
;
401 tipc_link_bc_ack_rcv(rcv_l
, ack
, 0, NULL
, xmitq
, NULL
);
402 trace_tipc_link_reset(rcv_l
, TIPC_DUMP_ALL
, "bclink removed!");
403 tipc_link_reset(rcv_l
);
404 rcv_l
->state
= LINK_RESET
;
405 if (!snd_l
->ackers
) {
406 trace_tipc_link_reset(snd_l
, TIPC_DUMP_ALL
, "zero ackers!");
407 tipc_link_reset(snd_l
);
408 snd_l
->state
= LINK_RESET
;
409 __skb_queue_purge(xmitq
);
413 int tipc_link_bc_peers(struct tipc_link
*l
)
418 static u16
link_bc_rcv_gap(struct tipc_link
*l
)
420 struct sk_buff
*skb
= skb_peek(&l
->deferdq
);
423 if (more(l
->snd_nxt
, l
->rcv_nxt
))
424 gap
= l
->snd_nxt
- l
->rcv_nxt
;
426 gap
= buf_seqno(skb
) - l
->rcv_nxt
;
430 void tipc_link_set_mtu(struct tipc_link
*l
, int mtu
)
435 int tipc_link_mtu(struct tipc_link
*l
)
440 int tipc_link_mss(struct tipc_link
*l
)
442 #ifdef CONFIG_TIPC_CRYPTO
443 return l
->mtu
- INT_H_SIZE
- EMSG_OVERHEAD
;
445 return l
->mtu
- INT_H_SIZE
;
449 u16
tipc_link_rcv_nxt(struct tipc_link
*l
)
454 u16
tipc_link_acked(struct tipc_link
*l
)
459 char *tipc_link_name(struct tipc_link
*l
)
464 u32
tipc_link_state(struct tipc_link
*l
)
470 * tipc_link_create - create a new link
471 * @net: pointer to associated network namespace
472 * @if_name: associated interface name
473 * @bearer_id: id (index) of associated bearer
474 * @tolerance: link tolerance to be used by link
475 * @net_plane: network plane (A,B,c..) this link belongs to
476 * @mtu: mtu to be advertised by link
477 * @priority: priority to be used by link
478 * @min_win: minimal send window to be used by link
479 * @max_win: maximal send window to be used by link
480 * @session: session to be used by link
481 * @peer: node id of peer node
482 * @peer_caps: bitmap describing peer node capabilities
483 * @bc_sndlink: the namespace global link used for broadcast sending
484 * @bc_rcvlink: the peer specific link used for broadcast reception
485 * @inputq: queue to put messages ready for delivery
486 * @namedq: queue to put binding table update messages ready for delivery
487 * @link: return value, pointer to put the created link
488 * @self: local unicast link id
489 * @peer_id: 128-bit ID of peer
491 * Return: true if link was created, otherwise false
493 bool tipc_link_create(struct net
*net
, char *if_name
, int bearer_id
,
494 int tolerance
, char net_plane
, u32 mtu
, int priority
,
495 u32 min_win
, u32 max_win
, u32 session
, u32 self
,
496 u32 peer
, u8
*peer_id
, u16 peer_caps
,
497 struct tipc_link
*bc_sndlink
,
498 struct tipc_link
*bc_rcvlink
,
499 struct sk_buff_head
*inputq
,
500 struct sk_buff_head
*namedq
,
501 struct tipc_link
**link
)
503 char peer_str
[NODE_ID_STR_LEN
] = {0,};
504 char self_str
[NODE_ID_STR_LEN
] = {0,};
507 l
= kzalloc(sizeof(*l
), GFP_ATOMIC
);
511 l
->session
= session
;
513 /* Set link name for unicast links only */
515 tipc_nodeid2string(self_str
, tipc_own_id(net
));
516 if (strlen(self_str
) > 16)
517 sprintf(self_str
, "%x", self
);
518 tipc_nodeid2string(peer_str
, peer_id
);
519 if (strlen(peer_str
) > 16)
520 sprintf(peer_str
, "%x", peer
);
522 /* Peer i/f name will be completed by reset/activate message */
523 snprintf(l
->name
, sizeof(l
->name
), "%s:%s-%s:unknown",
524 self_str
, if_name
, peer_str
);
526 strcpy(l
->if_name
, if_name
);
528 l
->peer_caps
= peer_caps
;
530 l
->in_session
= false;
531 l
->bearer_id
= bearer_id
;
532 l
->tolerance
= tolerance
;
534 bc_rcvlink
->tolerance
= tolerance
;
535 l
->net_plane
= net_plane
;
536 l
->advertised_mtu
= mtu
;
538 l
->priority
= priority
;
539 tipc_link_set_queue_limits(l
, min_win
, max_win
);
541 l
->bc_sndlink
= bc_sndlink
;
542 l
->bc_rcvlink
= bc_rcvlink
;
545 l
->state
= LINK_RESETTING
;
546 __skb_queue_head_init(&l
->transmq
);
547 __skb_queue_head_init(&l
->backlogq
);
548 __skb_queue_head_init(&l
->deferdq
);
549 __skb_queue_head_init(&l
->failover_deferdq
);
550 skb_queue_head_init(&l
->wakeupq
);
551 skb_queue_head_init(l
->inputq
);
556 * tipc_link_bc_create - create new link to be used for broadcast
557 * @net: pointer to associated network namespace
558 * @mtu: mtu to be used initially if no peers
559 * @min_win: minimal send window to be used by link
560 * @max_win: maximal send window to be used by link
561 * @inputq: queue to put messages ready for delivery
562 * @namedq: queue to put binding table update messages ready for delivery
563 * @link: return value, pointer to put the created link
564 * @ownnode: identity of own node
565 * @peer: node id of peer node
566 * @peer_id: 128-bit ID of peer
567 * @peer_caps: bitmap describing peer node capabilities
568 * @bc_sndlink: the namespace global link used for broadcast sending
570 * Return: true if link was created, otherwise false
572 bool tipc_link_bc_create(struct net
*net
, u32 ownnode
, u32 peer
, u8
*peer_id
,
573 int mtu
, u32 min_win
, u32 max_win
, u16 peer_caps
,
574 struct sk_buff_head
*inputq
,
575 struct sk_buff_head
*namedq
,
576 struct tipc_link
*bc_sndlink
,
577 struct tipc_link
**link
)
581 if (!tipc_link_create(net
, "", MAX_BEARERS
, 0, 'Z', mtu
, 0, min_win
,
582 max_win
, 0, ownnode
, peer
, NULL
, peer_caps
,
583 bc_sndlink
, NULL
, inputq
, namedq
, link
))
588 char peer_str
[NODE_ID_STR_LEN
] = {0,};
590 tipc_nodeid2string(peer_str
, peer_id
);
591 if (strlen(peer_str
) > 16)
592 sprintf(peer_str
, "%x", peer
);
593 /* Broadcast receiver link name: "broadcast-link:<peer>" */
594 snprintf(l
->name
, sizeof(l
->name
), "%s:%s", tipc_bclink_name
,
597 strcpy(l
->name
, tipc_bclink_name
);
599 trace_tipc_link_reset(l
, TIPC_DUMP_ALL
, "bclink created!");
601 l
->state
= LINK_RESET
;
605 /* Broadcast send link is always up */
606 if (link_is_bc_sndlink(l
))
607 l
->state
= LINK_ESTABLISHED
;
609 /* Disable replicast if even a single peer doesn't support it */
610 if (link_is_bc_rcvlink(l
) && !(peer_caps
& TIPC_BCAST_RCAST
))
611 tipc_bcast_toggle_rcast(net
, false);
617 * tipc_link_fsm_evt - link finite state machine
618 * @l: pointer to link
619 * @evt: state machine event to be processed
621 int tipc_link_fsm_evt(struct tipc_link
*l
, int evt
)
624 int old_state
= l
->state
;
629 case LINK_PEER_RESET_EVT
:
630 l
->state
= LINK_PEER_RESET
;
633 l
->state
= LINK_RESET
;
635 case LINK_FAILURE_EVT
:
636 case LINK_FAILOVER_BEGIN_EVT
:
637 case LINK_ESTABLISH_EVT
:
638 case LINK_FAILOVER_END_EVT
:
639 case LINK_SYNCH_BEGIN_EVT
:
640 case LINK_SYNCH_END_EVT
:
647 case LINK_PEER_RESET_EVT
:
648 l
->state
= LINK_ESTABLISHING
;
650 case LINK_FAILOVER_BEGIN_EVT
:
651 l
->state
= LINK_FAILINGOVER
;
652 case LINK_FAILURE_EVT
:
654 case LINK_ESTABLISH_EVT
:
655 case LINK_FAILOVER_END_EVT
:
657 case LINK_SYNCH_BEGIN_EVT
:
658 case LINK_SYNCH_END_EVT
:
663 case LINK_PEER_RESET
:
666 l
->state
= LINK_ESTABLISHING
;
668 case LINK_PEER_RESET_EVT
:
669 case LINK_ESTABLISH_EVT
:
670 case LINK_FAILURE_EVT
:
672 case LINK_SYNCH_BEGIN_EVT
:
673 case LINK_SYNCH_END_EVT
:
674 case LINK_FAILOVER_BEGIN_EVT
:
675 case LINK_FAILOVER_END_EVT
:
680 case LINK_FAILINGOVER
:
682 case LINK_FAILOVER_END_EVT
:
683 l
->state
= LINK_RESET
;
685 case LINK_PEER_RESET_EVT
:
687 case LINK_ESTABLISH_EVT
:
688 case LINK_FAILURE_EVT
:
690 case LINK_FAILOVER_BEGIN_EVT
:
691 case LINK_SYNCH_BEGIN_EVT
:
692 case LINK_SYNCH_END_EVT
:
697 case LINK_ESTABLISHING
:
699 case LINK_ESTABLISH_EVT
:
700 l
->state
= LINK_ESTABLISHED
;
702 case LINK_FAILOVER_BEGIN_EVT
:
703 l
->state
= LINK_FAILINGOVER
;
706 l
->state
= LINK_RESET
;
708 case LINK_FAILURE_EVT
:
709 case LINK_PEER_RESET_EVT
:
710 case LINK_SYNCH_BEGIN_EVT
:
711 case LINK_FAILOVER_END_EVT
:
713 case LINK_SYNCH_END_EVT
:
718 case LINK_ESTABLISHED
:
720 case LINK_PEER_RESET_EVT
:
721 l
->state
= LINK_PEER_RESET
;
722 rc
|= TIPC_LINK_DOWN_EVT
;
724 case LINK_FAILURE_EVT
:
725 l
->state
= LINK_RESETTING
;
726 rc
|= TIPC_LINK_DOWN_EVT
;
729 l
->state
= LINK_RESET
;
731 case LINK_ESTABLISH_EVT
:
732 case LINK_SYNCH_END_EVT
:
734 case LINK_SYNCH_BEGIN_EVT
:
735 l
->state
= LINK_SYNCHING
;
737 case LINK_FAILOVER_BEGIN_EVT
:
738 case LINK_FAILOVER_END_EVT
:
745 case LINK_PEER_RESET_EVT
:
746 l
->state
= LINK_PEER_RESET
;
747 rc
|= TIPC_LINK_DOWN_EVT
;
749 case LINK_FAILURE_EVT
:
750 l
->state
= LINK_RESETTING
;
751 rc
|= TIPC_LINK_DOWN_EVT
;
754 l
->state
= LINK_RESET
;
756 case LINK_ESTABLISH_EVT
:
757 case LINK_SYNCH_BEGIN_EVT
:
759 case LINK_SYNCH_END_EVT
:
760 l
->state
= LINK_ESTABLISHED
;
762 case LINK_FAILOVER_BEGIN_EVT
:
763 case LINK_FAILOVER_END_EVT
:
769 pr_err("Unknown FSM state %x in %s\n", l
->state
, l
->name
);
771 trace_tipc_link_fsm(l
->name
, old_state
, l
->state
, evt
);
774 pr_err("Illegal FSM event %x in state %x on link %s\n",
775 evt
, l
->state
, l
->name
);
776 trace_tipc_link_fsm(l
->name
, old_state
, l
->state
, evt
);
780 /* link_profile_stats - update statistical profiling of traffic
782 static void link_profile_stats(struct tipc_link
*l
)
785 struct tipc_msg
*msg
;
788 /* Update counters used in statistical profiling of send traffic */
789 l
->stats
.accu_queue_sz
+= skb_queue_len(&l
->transmq
);
790 l
->stats
.queue_sz_counts
++;
792 skb
= skb_peek(&l
->transmq
);
796 length
= msg_size(msg
);
798 if (msg_user(msg
) == MSG_FRAGMENTER
) {
799 if (msg_type(msg
) != FIRST_FRAGMENT
)
801 length
= msg_size(msg_inner_hdr(msg
));
803 l
->stats
.msg_lengths_total
+= length
;
804 l
->stats
.msg_length_counts
++;
806 l
->stats
.msg_length_profile
[0]++;
807 else if (length
<= 256)
808 l
->stats
.msg_length_profile
[1]++;
809 else if (length
<= 1024)
810 l
->stats
.msg_length_profile
[2]++;
811 else if (length
<= 4096)
812 l
->stats
.msg_length_profile
[3]++;
813 else if (length
<= 16384)
814 l
->stats
.msg_length_profile
[4]++;
815 else if (length
<= 32768)
816 l
->stats
.msg_length_profile
[5]++;
818 l
->stats
.msg_length_profile
[6]++;
822 * tipc_link_too_silent - check if link is "too silent"
823 * @l: tipc link to be checked
825 * Return: true if the link 'silent_intv_cnt' is about to reach the
826 * 'abort_limit' value, otherwise false
828 bool tipc_link_too_silent(struct tipc_link
*l
)
830 return (l
->silent_intv_cnt
+ 2 > l
->abort_limit
);
833 /* tipc_link_timeout - perform periodic task as instructed from node timeout
835 int tipc_link_timeout(struct tipc_link
*l
, struct sk_buff_head
*xmitq
)
842 u16 bc_snt
= l
->bc_sndlink
->snd_nxt
- 1;
843 u16 bc_acked
= l
->bc_rcvlink
->acked
;
844 struct tipc_mon_state
*mstate
= &l
->mon_state
;
846 trace_tipc_link_timeout(l
, TIPC_DUMP_NONE
, " ");
847 trace_tipc_link_too_silent(l
, TIPC_DUMP_ALL
, " ");
849 case LINK_ESTABLISHED
:
852 link_profile_stats(l
);
853 tipc_mon_get_state(l
->net
, l
->addr
, mstate
, l
->bearer_id
);
854 if (mstate
->reset
|| (l
->silent_intv_cnt
> l
->abort_limit
))
855 return tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
856 state
= bc_acked
!= bc_snt
;
857 state
|= l
->bc_rcvlink
->rcv_unacked
;
858 state
|= l
->rcv_unacked
;
859 state
|= !skb_queue_empty(&l
->transmq
);
860 probe
= mstate
->probing
;
861 probe
|= l
->silent_intv_cnt
;
862 if (probe
|| mstate
->monitoring
)
863 l
->silent_intv_cnt
++;
864 probe
|= !skb_queue_empty(&l
->deferdq
);
865 if (l
->snd_nxt
== l
->checkpoint
) {
866 tipc_link_update_cwin(l
, 0, 0);
869 l
->checkpoint
= l
->snd_nxt
;
872 setup
= l
->rst_cnt
++ <= 4;
873 setup
|= !(l
->rst_cnt
% 16);
876 case LINK_ESTABLISHING
:
880 case LINK_PEER_RESET
:
882 case LINK_FAILINGOVER
:
888 if (state
|| probe
|| setup
)
889 tipc_link_build_proto_msg(l
, mtyp
, probe
, 0, 0, 0, 0, xmitq
);
895 * link_schedule_user - schedule a message sender for wakeup after congestion
897 * @hdr: header of message that is being sent
898 * Create pseudo msg to send back to user when congestion abates
900 static int link_schedule_user(struct tipc_link
*l
, struct tipc_msg
*hdr
)
902 u32 dnode
= tipc_own_addr(l
->net
);
903 u32 dport
= msg_origport(hdr
);
906 /* Create and schedule wakeup pseudo message */
907 skb
= tipc_msg_create(SOCK_WAKEUP
, 0, INT_H_SIZE
, 0,
908 dnode
, l
->addr
, dport
, 0, 0);
911 msg_set_dest_droppable(buf_msg(skb
), true);
912 TIPC_SKB_CB(skb
)->chain_imp
= msg_importance(hdr
);
913 skb_queue_tail(&l
->wakeupq
, skb
);
914 l
->stats
.link_congs
++;
915 trace_tipc_link_conges(l
, TIPC_DUMP_ALL
, "wakeup scheduled!");
920 * link_prepare_wakeup - prepare users for wakeup after congestion
922 * Wake up a number of waiting users, as permitted by available space
925 static void link_prepare_wakeup(struct tipc_link
*l
)
927 struct sk_buff_head
*wakeupq
= &l
->wakeupq
;
928 struct sk_buff_head
*inputq
= l
->inputq
;
929 struct sk_buff
*skb
, *tmp
;
930 struct sk_buff_head tmpq
;
934 __skb_queue_head_init(&tmpq
);
936 for (; imp
<= TIPC_SYSTEM_IMPORTANCE
; imp
++)
937 avail
[imp
] = l
->backlog
[imp
].limit
- l
->backlog
[imp
].len
;
939 skb_queue_walk_safe(wakeupq
, skb
, tmp
) {
940 imp
= TIPC_SKB_CB(skb
)->chain_imp
;
944 __skb_unlink(skb
, wakeupq
);
945 __skb_queue_tail(&tmpq
, skb
);
948 spin_lock_bh(&inputq
->lock
);
949 skb_queue_splice_tail(&tmpq
, inputq
);
950 spin_unlock_bh(&inputq
->lock
);
955 * tipc_link_set_skb_retransmit_time - set the time at which retransmission of
956 * the given skb should be next attempted
957 * @skb: skb to set a future retransmission time for
958 * @l: link the skb will be transmitted on
960 static void tipc_link_set_skb_retransmit_time(struct sk_buff
*skb
,
963 if (link_is_bc_sndlink(l
))
964 TIPC_SKB_CB(skb
)->nxt_retr
= TIPC_BC_RETR_LIM
;
966 TIPC_SKB_CB(skb
)->nxt_retr
= TIPC_UC_RETR_TIME
;
969 void tipc_link_reset(struct tipc_link
*l
)
971 struct sk_buff_head list
;
974 __skb_queue_head_init(&list
);
976 l
->in_session
= false;
977 /* Force re-synch of peer session number before establishing */
980 l
->mtu
= l
->advertised_mtu
;
982 spin_lock_bh(&l
->wakeupq
.lock
);
983 skb_queue_splice_init(&l
->wakeupq
, &list
);
984 spin_unlock_bh(&l
->wakeupq
.lock
);
986 spin_lock_bh(&l
->inputq
->lock
);
987 skb_queue_splice_init(&list
, l
->inputq
);
988 spin_unlock_bh(&l
->inputq
->lock
);
990 __skb_queue_purge(&l
->transmq
);
991 __skb_queue_purge(&l
->deferdq
);
992 __skb_queue_purge(&l
->backlogq
);
993 __skb_queue_purge(&l
->failover_deferdq
);
994 for (imp
= 0; imp
<= TIPC_SYSTEM_IMPORTANCE
; imp
++) {
995 l
->backlog
[imp
].len
= 0;
996 l
->backlog
[imp
].target_bskb
= NULL
;
998 kfree_skb(l
->reasm_buf
);
999 kfree_skb(l
->reasm_tnlmsg
);
1000 kfree_skb(l
->failover_reasm_skb
);
1001 l
->reasm_buf
= NULL
;
1002 l
->reasm_tnlmsg
= NULL
;
1003 l
->failover_reasm_skb
= NULL
;
1007 l
->snd_nxt_state
= 1;
1008 l
->rcv_nxt_state
= 1;
1013 l
->silent_intv_cnt
= 0;
1015 l
->bc_peer_is_up
= false;
1016 memset(&l
->mon_state
, 0, sizeof(l
->mon_state
));
1017 tipc_link_reset_stats(l
);
1021 * tipc_link_xmit(): enqueue buffer list according to queue situation
1023 * @list: chain of buffers containing message
1024 * @xmitq: returned list of packets to be sent by caller
1026 * Consumes the buffer chain.
1027 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
1028 * Return: 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
1030 int tipc_link_xmit(struct tipc_link
*l
, struct sk_buff_head
*list
,
1031 struct sk_buff_head
*xmitq
)
1033 struct sk_buff_head
*backlogq
= &l
->backlogq
;
1034 struct sk_buff_head
*transmq
= &l
->transmq
;
1035 struct sk_buff
*skb
, *_skb
;
1036 u16 bc_ack
= l
->bc_rcvlink
->rcv_nxt
- 1;
1037 u16 ack
= l
->rcv_nxt
- 1;
1038 u16 seqno
= l
->snd_nxt
;
1039 int pkt_cnt
= skb_queue_len(list
);
1040 unsigned int mss
= tipc_link_mss(l
);
1041 unsigned int cwin
= l
->window
;
1042 unsigned int mtu
= l
->mtu
;
1043 struct tipc_msg
*hdr
;
1051 hdr
= buf_msg(skb_peek(list
));
1052 if (unlikely(msg_size(hdr
) > mtu
)) {
1053 pr_warn("Too large msg, purging xmit list %d %d %d %d %d!\n",
1054 skb_queue_len(list
), msg_user(hdr
),
1055 msg_type(hdr
), msg_size(hdr
), mtu
);
1056 __skb_queue_purge(list
);
1060 imp
= msg_importance(hdr
);
1061 /* Allow oversubscription of one data msg per source at congestion */
1062 if (unlikely(l
->backlog
[imp
].len
>= l
->backlog
[imp
].limit
)) {
1063 if (imp
== TIPC_SYSTEM_IMPORTANCE
) {
1064 pr_warn("%s<%s>, link overflow", link_rst_msg
, l
->name
);
1067 rc
= link_schedule_user(l
, hdr
);
1071 l
->stats
.sent_fragmented
++;
1072 l
->stats
.sent_fragments
+= pkt_cnt
;
1075 /* Prepare each packet for sending, and add to relevant queue: */
1076 while ((skb
= __skb_dequeue(list
))) {
1077 if (likely(skb_queue_len(transmq
) < cwin
)) {
1079 msg_set_seqno(hdr
, seqno
);
1080 msg_set_ack(hdr
, ack
);
1081 msg_set_bcast_ack(hdr
, bc_ack
);
1082 _skb
= skb_clone(skb
, GFP_ATOMIC
);
1085 __skb_queue_purge(list
);
1088 __skb_queue_tail(transmq
, skb
);
1089 tipc_link_set_skb_retransmit_time(skb
, l
);
1090 __skb_queue_tail(xmitq
, _skb
);
1091 TIPC_SKB_CB(skb
)->ackers
= l
->ackers
;
1093 l
->stats
.sent_pkts
++;
1097 if (tipc_msg_try_bundle(l
->backlog
[imp
].target_bskb
, &skb
,
1098 mss
, l
->addr
, &new_bundle
)) {
1100 /* Keep a ref. to the skb for next try */
1101 l
->backlog
[imp
].target_bskb
= skb
;
1102 l
->backlog
[imp
].len
++;
1103 __skb_queue_tail(backlogq
, skb
);
1106 l
->stats
.sent_bundles
++;
1107 l
->stats
.sent_bundled
++;
1109 l
->stats
.sent_bundled
++;
1113 l
->backlog
[imp
].target_bskb
= NULL
;
1114 l
->backlog
[imp
].len
+= (1 + skb_queue_len(list
));
1115 __skb_queue_tail(backlogq
, skb
);
1116 skb_queue_splice_tail_init(list
, backlogq
);
1122 static void tipc_link_update_cwin(struct tipc_link
*l
, int released
,
1125 int bklog_len
= skb_queue_len(&l
->backlogq
);
1126 struct sk_buff_head
*txq
= &l
->transmq
;
1127 int txq_len
= skb_queue_len(txq
);
1128 u16 cwin
= l
->window
;
1130 /* Enter fast recovery */
1131 if (unlikely(retransmitted
)) {
1132 l
->ssthresh
= max_t(u16
, l
->window
/ 2, 300);
1133 l
->window
= min_t(u16
, l
->ssthresh
, l
->window
);
1136 /* Enter slow start */
1137 if (unlikely(!released
)) {
1138 l
->ssthresh
= max_t(u16
, l
->window
/ 2, 300);
1139 l
->window
= l
->min_win
;
1142 /* Don't increase window if no pressure on the transmit queue */
1143 if (txq_len
+ bklog_len
< cwin
)
1146 /* Don't increase window if there are holes the transmit queue */
1147 if (txq_len
&& l
->snd_nxt
- buf_seqno(skb_peek(txq
)) != txq_len
)
1150 l
->cong_acks
+= released
;
1153 if (cwin
<= l
->ssthresh
) {
1154 l
->window
= min_t(u16
, cwin
+ released
, l
->max_win
);
1157 /* Congestion avoidance */
1158 if (l
->cong_acks
< cwin
)
1160 l
->window
= min_t(u16
, ++cwin
, l
->max_win
);
1164 static void tipc_link_advance_backlog(struct tipc_link
*l
,
1165 struct sk_buff_head
*xmitq
)
1167 u16 bc_ack
= l
->bc_rcvlink
->rcv_nxt
- 1;
1168 struct sk_buff_head
*txq
= &l
->transmq
;
1169 struct sk_buff
*skb
, *_skb
;
1170 u16 ack
= l
->rcv_nxt
- 1;
1171 u16 seqno
= l
->snd_nxt
;
1172 struct tipc_msg
*hdr
;
1173 u16 cwin
= l
->window
;
1176 while (skb_queue_len(txq
) < cwin
) {
1177 skb
= skb_peek(&l
->backlogq
);
1180 _skb
= skb_clone(skb
, GFP_ATOMIC
);
1183 __skb_dequeue(&l
->backlogq
);
1185 imp
= msg_importance(hdr
);
1186 l
->backlog
[imp
].len
--;
1187 if (unlikely(skb
== l
->backlog
[imp
].target_bskb
))
1188 l
->backlog
[imp
].target_bskb
= NULL
;
1189 __skb_queue_tail(&l
->transmq
, skb
);
1190 tipc_link_set_skb_retransmit_time(skb
, l
);
1192 __skb_queue_tail(xmitq
, _skb
);
1193 TIPC_SKB_CB(skb
)->ackers
= l
->ackers
;
1194 msg_set_seqno(hdr
, seqno
);
1195 msg_set_ack(hdr
, ack
);
1196 msg_set_bcast_ack(hdr
, bc_ack
);
1198 l
->stats
.sent_pkts
++;
1205 * link_retransmit_failure() - Detect repeated retransmit failures
1206 * @l: tipc link sender
1207 * @r: tipc link receiver (= l in case of unicast)
1208 * @rc: returned code
1210 * Return: true if the repeated retransmit failures happens, otherwise
1213 static bool link_retransmit_failure(struct tipc_link
*l
, struct tipc_link
*r
,
1216 struct sk_buff
*skb
= skb_peek(&l
->transmq
);
1217 struct tipc_msg
*hdr
;
1222 if (!TIPC_SKB_CB(skb
)->retr_cnt
)
1225 if (!time_after(jiffies
, TIPC_SKB_CB(skb
)->retr_stamp
+
1226 msecs_to_jiffies(r
->tolerance
* 10)))
1230 if (link_is_bc_sndlink(l
) && !less(r
->acked
, msg_seqno(hdr
)))
1233 pr_warn("Retransmission failure on link <%s>\n", l
->name
);
1234 link_print(l
, "State of link ");
1235 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
1236 msg_user(hdr
), msg_type(hdr
), msg_size(hdr
), msg_errcode(hdr
));
1237 pr_info("sqno %u, prev: %x, dest: %x\n",
1238 msg_seqno(hdr
), msg_prevnode(hdr
), msg_destnode(hdr
));
1239 pr_info("retr_stamp %d, retr_cnt %d\n",
1240 jiffies_to_msecs(TIPC_SKB_CB(skb
)->retr_stamp
),
1241 TIPC_SKB_CB(skb
)->retr_cnt
);
1243 trace_tipc_list_dump(&l
->transmq
, true, "retrans failure!");
1244 trace_tipc_link_dump(l
, TIPC_DUMP_NONE
, "retrans failure!");
1245 trace_tipc_link_dump(r
, TIPC_DUMP_NONE
, "retrans failure!");
1247 if (link_is_bc_sndlink(l
)) {
1248 r
->state
= LINK_RESET
;
1249 *rc
|= TIPC_LINK_DOWN_EVT
;
1251 *rc
|= tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
1257 /* tipc_data_input - deliver data and name distr msgs to upper layer
1259 * Consumes buffer if message is of right type
1260 * Node lock must be held
1262 static bool tipc_data_input(struct tipc_link
*l
, struct sk_buff
*skb
,
1263 struct sk_buff_head
*inputq
)
1265 struct sk_buff_head
*mc_inputq
= l
->bc_rcvlink
->inputq
;
1266 struct tipc_msg
*hdr
= buf_msg(skb
);
1268 switch (msg_user(hdr
)) {
1269 case TIPC_LOW_IMPORTANCE
:
1270 case TIPC_MEDIUM_IMPORTANCE
:
1271 case TIPC_HIGH_IMPORTANCE
:
1272 case TIPC_CRITICAL_IMPORTANCE
:
1273 if (unlikely(msg_in_group(hdr
) || msg_mcast(hdr
))) {
1274 skb_queue_tail(mc_inputq
, skb
);
1279 skb_queue_tail(inputq
, skb
);
1281 case GROUP_PROTOCOL
:
1282 skb_queue_tail(mc_inputq
, skb
);
1284 case NAME_DISTRIBUTOR
:
1285 l
->bc_rcvlink
->state
= LINK_ESTABLISHED
;
1286 skb_queue_tail(l
->namedq
, skb
);
1289 case TUNNEL_PROTOCOL
:
1290 case MSG_FRAGMENTER
:
1291 case BCAST_PROTOCOL
:
1293 #ifdef CONFIG_TIPC_CRYPTO
1295 tipc_crypto_msg_rcv(l
->net
, skb
);
1299 pr_warn("Dropping received illegal msg type\n");
1305 /* tipc_link_input - process packet that has passed link protocol check
1309 static int tipc_link_input(struct tipc_link
*l
, struct sk_buff
*skb
,
1310 struct sk_buff_head
*inputq
,
1311 struct sk_buff
**reasm_skb
)
1313 struct tipc_msg
*hdr
= buf_msg(skb
);
1314 struct sk_buff
*iskb
;
1315 struct sk_buff_head tmpq
;
1316 int usr
= msg_user(hdr
);
1319 if (usr
== MSG_BUNDLER
) {
1320 skb_queue_head_init(&tmpq
);
1321 l
->stats
.recv_bundles
++;
1322 l
->stats
.recv_bundled
+= msg_msgcnt(hdr
);
1323 while (tipc_msg_extract(skb
, &iskb
, &pos
))
1324 tipc_data_input(l
, iskb
, &tmpq
);
1325 tipc_skb_queue_splice_tail(&tmpq
, inputq
);
1327 } else if (usr
== MSG_FRAGMENTER
) {
1328 l
->stats
.recv_fragments
++;
1329 if (tipc_buf_append(reasm_skb
, &skb
)) {
1330 l
->stats
.recv_fragmented
++;
1331 tipc_data_input(l
, skb
, inputq
);
1332 } else if (!*reasm_skb
&& !link_is_bc_rcvlink(l
)) {
1333 pr_warn_ratelimited("Unable to build fragment list\n");
1334 return tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
1337 } else if (usr
== BCAST_PROTOCOL
) {
1338 tipc_bcast_lock(l
->net
);
1339 tipc_link_bc_init_rcv(l
->bc_rcvlink
, hdr
);
1340 tipc_bcast_unlock(l
->net
);
1347 /* tipc_link_tnl_rcv() - receive TUNNEL_PROTOCOL message, drop or process the
1348 * inner message along with the ones in the old link's
1351 * @skb: TUNNEL_PROTOCOL message
1352 * @inputq: queue to put messages ready for delivery
1354 static int tipc_link_tnl_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
1355 struct sk_buff_head
*inputq
)
1357 struct sk_buff
**reasm_skb
= &l
->failover_reasm_skb
;
1358 struct sk_buff
**reasm_tnlmsg
= &l
->reasm_tnlmsg
;
1359 struct sk_buff_head
*fdefq
= &l
->failover_deferdq
;
1360 struct tipc_msg
*hdr
= buf_msg(skb
);
1361 struct sk_buff
*iskb
;
1366 if (msg_type(hdr
) == SYNCH_MSG
) {
1371 /* Not a fragment? */
1372 if (likely(!msg_nof_fragms(hdr
))) {
1373 if (unlikely(!tipc_msg_extract(skb
, &iskb
, &ipos
))) {
1374 pr_warn_ratelimited("Unable to extract msg, defq: %d\n",
1375 skb_queue_len(fdefq
));
1380 /* Set fragment type for buf_append */
1381 if (msg_fragm_no(hdr
) == 1)
1382 msg_set_type(hdr
, FIRST_FRAGMENT
);
1383 else if (msg_fragm_no(hdr
) < msg_nof_fragms(hdr
))
1384 msg_set_type(hdr
, FRAGMENT
);
1386 msg_set_type(hdr
, LAST_FRAGMENT
);
1388 if (!tipc_buf_append(reasm_tnlmsg
, &skb
)) {
1389 /* Successful but non-complete reassembly? */
1390 if (*reasm_tnlmsg
|| link_is_bc_rcvlink(l
))
1392 pr_warn_ratelimited("Unable to reassemble tunnel msg\n");
1393 return tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
1399 seqno
= buf_seqno(iskb
);
1400 if (unlikely(less(seqno
, l
->drop_point
))) {
1404 if (unlikely(seqno
!= l
->drop_point
)) {
1405 __tipc_skb_queue_sorted(fdefq
, seqno
, iskb
);
1410 if (!tipc_data_input(l
, iskb
, inputq
))
1411 rc
|= tipc_link_input(l
, iskb
, inputq
, reasm_skb
);
1414 } while ((iskb
= __tipc_skb_dequeue(fdefq
, l
->drop_point
)));
1420 * tipc_get_gap_ack_blks - get Gap ACK blocks from PROTOCOL/STATE_MSG
1421 * @ga: returned pointer to the Gap ACK blocks if any
1423 * @hdr: the PROTOCOL/STATE_MSG header
1424 * @uc: desired Gap ACK blocks type, i.e. unicast (= 1) or broadcast (= 0)
1426 * Return: the total Gap ACK blocks size
1428 u16
tipc_get_gap_ack_blks(struct tipc_gap_ack_blks
**ga
, struct tipc_link
*l
,
1429 struct tipc_msg
*hdr
, bool uc
)
1431 struct tipc_gap_ack_blks
*p
;
1434 /* Does peer support the Gap ACK blocks feature? */
1435 if (l
->peer_caps
& TIPC_GAP_ACK_BLOCK
) {
1436 p
= (struct tipc_gap_ack_blks
*)msg_data(hdr
);
1439 if (sz
== struct_size(p
, gacks
, p
->ugack_cnt
+ p
->bgack_cnt
)) {
1440 /* Good, check if the desired type exists */
1441 if ((uc
&& p
->ugack_cnt
) || (!uc
&& p
->bgack_cnt
))
1443 /* Backward compatible: peer might not support bc, but uc? */
1444 } else if (uc
&& sz
== struct_size(p
, gacks
, p
->ugack_cnt
)) {
1451 /* Other cases: ignore! */
1459 static u8
__tipc_build_gap_ack_blks(struct tipc_gap_ack_blks
*ga
,
1460 struct tipc_link
*l
, u8 start_index
)
1462 struct tipc_gap_ack
*gacks
= &ga
->gacks
[start_index
];
1463 struct sk_buff
*skb
= skb_peek(&l
->deferdq
);
1464 u16 expect
, seqno
= 0;
1470 expect
= buf_seqno(skb
);
1471 skb_queue_walk(&l
->deferdq
, skb
) {
1472 seqno
= buf_seqno(skb
);
1473 if (unlikely(more(seqno
, expect
))) {
1474 gacks
[n
].ack
= htons(expect
- 1);
1475 gacks
[n
].gap
= htons(seqno
- expect
);
1476 if (++n
>= MAX_GAP_ACK_BLKS
/ 2) {
1477 pr_info_ratelimited("Gacks on %s: %d, ql: %d!\n",
1479 skb_queue_len(&l
->deferdq
));
1482 } else if (unlikely(less(seqno
, expect
))) {
1483 pr_warn("Unexpected skb in deferdq!\n");
1490 gacks
[n
].ack
= htons(seqno
);
1496 /* tipc_build_gap_ack_blks - build Gap ACK blocks
1497 * @l: tipc unicast link
1498 * @hdr: the tipc message buffer to store the Gap ACK blocks after built
1500 * The function builds Gap ACK blocks for both the unicast & broadcast receiver
1501 * links of a certain peer, the buffer after built has the network data format
1502 * as found at the struct tipc_gap_ack_blks definition.
1504 * returns the actual allocated memory size
1506 static u16
tipc_build_gap_ack_blks(struct tipc_link
*l
, struct tipc_msg
*hdr
)
1508 struct tipc_link
*bcl
= l
->bc_rcvlink
;
1509 struct tipc_gap_ack_blks
*ga
;
1512 ga
= (struct tipc_gap_ack_blks
*)msg_data(hdr
);
1514 /* Start with broadcast link first */
1515 tipc_bcast_lock(bcl
->net
);
1516 msg_set_bcast_ack(hdr
, bcl
->rcv_nxt
- 1);
1517 msg_set_bc_gap(hdr
, link_bc_rcv_gap(bcl
));
1518 ga
->bgack_cnt
= __tipc_build_gap_ack_blks(ga
, bcl
, 0);
1519 tipc_bcast_unlock(bcl
->net
);
1521 /* Now for unicast link, but an explicit NACK only (???) */
1522 ga
->ugack_cnt
= (msg_seq_gap(hdr
)) ?
1523 __tipc_build_gap_ack_blks(ga
, l
, ga
->bgack_cnt
) : 0;
1526 len
= struct_size(ga
, gacks
, ga
->bgack_cnt
+ ga
->ugack_cnt
);
1527 ga
->len
= htons(len
);
1531 /* tipc_link_advance_transmq - advance TIPC link transmq queue by releasing
1532 * acked packets, also doing retransmissions if
1534 * @l: tipc link with transmq queue to be advanced
1535 * @r: tipc link "receiver" i.e. in case of broadcast (= "l" if unicast)
1536 * @acked: seqno of last packet acked by peer without any gaps before
1537 * @gap: # of gap packets
1538 * @ga: buffer pointer to Gap ACK blocks from peer
1539 * @xmitq: queue for accumulating the retransmitted packets if any
1540 * @retransmitted: returned boolean value if a retransmission is really issued
1541 * @rc: returned code e.g. TIPC_LINK_DOWN_EVT if a repeated retransmit failures
1542 * happens (- unlikely case)
1544 * Return: the number of packets released from the link transmq
1546 static int tipc_link_advance_transmq(struct tipc_link
*l
, struct tipc_link
*r
,
1548 struct tipc_gap_ack_blks
*ga
,
1549 struct sk_buff_head
*xmitq
,
1550 bool *retransmitted
, int *rc
)
1552 struct tipc_gap_ack_blks
*last_ga
= r
->last_ga
, *this_ga
= NULL
;
1553 struct tipc_gap_ack
*gacks
= NULL
;
1554 struct sk_buff
*skb
, *_skb
, *tmp
;
1555 struct tipc_msg
*hdr
;
1556 u32 qlen
= skb_queue_len(&l
->transmq
);
1557 u16 nacked
= acked
, ngap
= gap
, gack_cnt
= 0;
1558 u16 bc_ack
= l
->bc_rcvlink
->rcv_nxt
- 1;
1559 u16 ack
= l
->rcv_nxt
- 1;
1561 u16 end
= r
->acked
, start
= end
, offset
= r
->last_gap
;
1562 u16 si
= (last_ga
) ? last_ga
->start_index
: 0;
1563 bool is_uc
= !link_is_bc_sndlink(l
);
1564 bool bc_has_acked
= false;
1566 trace_tipc_link_retrans(r
, acked
+ 1, acked
+ gap
, &l
->transmq
);
1568 /* Determine Gap ACK blocks if any for the particular link */
1570 /* Get the Gap ACKs, uc part */
1571 gack_cnt
= ga
->ugack_cnt
;
1572 gacks
= &ga
->gacks
[ga
->bgack_cnt
];
1574 /* Copy the Gap ACKs, bc part, for later renewal if needed */
1575 this_ga
= kmemdup(ga
, struct_size(ga
, gacks
, ga
->bgack_cnt
),
1577 if (likely(this_ga
)) {
1578 this_ga
->start_index
= 0;
1579 /* Start with the bc Gap ACKs */
1580 gack_cnt
= this_ga
->bgack_cnt
;
1581 gacks
= &this_ga
->gacks
[0];
1583 /* Hmm, we can get in trouble..., simply ignore it */
1584 pr_warn_ratelimited("Ignoring bc Gap ACKs, no memory\n");
1588 /* Advance the link transmq */
1589 skb_queue_walk_safe(&l
->transmq
, skb
, tmp
) {
1590 seqno
= buf_seqno(skb
);
1593 if (less_eq(seqno
, nacked
)) {
1596 /* Skip packets peer has already acked */
1597 if (!more(seqno
, r
->acked
))
1599 /* Get the next of last Gap ACK blocks */
1600 while (more(seqno
, end
)) {
1601 if (!last_ga
|| si
>= last_ga
->bgack_cnt
)
1603 start
= end
+ offset
+ 1;
1604 end
= ntohs(last_ga
->gacks
[si
].ack
);
1605 offset
= ntohs(last_ga
->gacks
[si
].gap
);
1607 WARN_ONCE(more(start
, end
) ||
1609 si
< last_ga
->bgack_cnt
) ||
1610 si
> MAX_GAP_ACK_BLKS
,
1611 "Corrupted Gap ACK: %d %d %d %d %d\n",
1612 start
, end
, offset
, si
,
1613 last_ga
->bgack_cnt
);
1615 /* Check against the last Gap ACK block */
1616 if (in_range(seqno
, start
, end
))
1618 /* Update/release the packet peer is acking */
1619 bc_has_acked
= true;
1620 if (--TIPC_SKB_CB(skb
)->ackers
)
1624 __skb_unlink(skb
, &l
->transmq
);
1626 } else if (less_eq(seqno
, nacked
+ ngap
)) {
1627 /* First gap: check if repeated retrans failures? */
1628 if (unlikely(seqno
== acked
+ 1 &&
1629 link_retransmit_failure(l
, r
, rc
))) {
1630 /* Ignore this bc Gap ACKs if any */
1635 /* retransmit skb if unrestricted*/
1636 if (time_before(jiffies
, TIPC_SKB_CB(skb
)->nxt_retr
))
1638 tipc_link_set_skb_retransmit_time(skb
, l
);
1639 _skb
= pskb_copy(skb
, GFP_ATOMIC
);
1642 hdr
= buf_msg(_skb
);
1643 msg_set_ack(hdr
, ack
);
1644 msg_set_bcast_ack(hdr
, bc_ack
);
1645 _skb
->priority
= TC_PRIO_CONTROL
;
1646 __skb_queue_tail(xmitq
, _skb
);
1647 l
->stats
.retransmitted
++;
1649 r
->stats
.retransmitted
++;
1650 *retransmitted
= true;
1651 /* Increase actual retrans counter & mark first time */
1652 if (!TIPC_SKB_CB(skb
)->retr_cnt
++)
1653 TIPC_SKB_CB(skb
)->retr_stamp
= jiffies
;
1655 /* retry with Gap ACK blocks if any */
1658 nacked
= ntohs(gacks
[n
].ack
);
1659 ngap
= ntohs(gacks
[n
].gap
);
1665 /* Renew last Gap ACK blocks for bc if needed */
1669 r
->last_ga
= this_ga
;
1671 } else if (last_ga
) {
1672 if (less(acked
, start
)) {
1674 offset
= start
- acked
- 1;
1675 } else if (less(acked
, end
)) {
1678 if (si
< last_ga
->bgack_cnt
) {
1679 last_ga
->start_index
= si
;
1680 r
->last_gap
= offset
;
1694 return qlen
- skb_queue_len(&l
->transmq
);
1697 /* tipc_link_build_state_msg: prepare link state message for transmission
1699 * Note that sending of broadcast ack is coordinated among nodes, to reduce
1700 * risk of ack storms towards the sender
1702 int tipc_link_build_state_msg(struct tipc_link
*l
, struct sk_buff_head
*xmitq
)
1707 /* Broadcast ACK must be sent via a unicast link => defer to caller */
1708 if (link_is_bc_rcvlink(l
)) {
1709 if (((l
->rcv_nxt
^ tipc_own_addr(l
->net
)) & 0xf) != 0xf)
1713 /* Use snd_nxt to store peer's snd_nxt in broadcast rcv link */
1714 l
->snd_nxt
= l
->rcv_nxt
;
1715 return TIPC_LINK_SND_STATE
;
1719 l
->stats
.sent_acks
++;
1720 tipc_link_build_proto_msg(l
, STATE_MSG
, 0, 0, 0, 0, 0, xmitq
);
1724 /* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
1726 void tipc_link_build_reset_msg(struct tipc_link
*l
, struct sk_buff_head
*xmitq
)
1728 int mtyp
= RESET_MSG
;
1729 struct sk_buff
*skb
;
1731 if (l
->state
== LINK_ESTABLISHING
)
1732 mtyp
= ACTIVATE_MSG
;
1734 tipc_link_build_proto_msg(l
, mtyp
, 0, 0, 0, 0, 0, xmitq
);
1736 /* Inform peer that this endpoint is going down if applicable */
1737 skb
= skb_peek_tail(xmitq
);
1738 if (skb
&& (l
->state
== LINK_RESET
))
1739 msg_set_peer_stopping(buf_msg(skb
), 1);
1742 /* tipc_link_build_nack_msg: prepare link nack message for transmission
1743 * Note that sending of broadcast NACK is coordinated among nodes, to
1744 * reduce the risk of NACK storms towards the sender
1746 static int tipc_link_build_nack_msg(struct tipc_link
*l
,
1747 struct sk_buff_head
*xmitq
)
1749 u32 def_cnt
= ++l
->stats
.deferred_recv
;
1750 struct sk_buff_head
*dfq
= &l
->deferdq
;
1751 u32 defq_len
= skb_queue_len(dfq
);
1754 if (link_is_bc_rcvlink(l
)) {
1755 match1
= def_cnt
& 0xf;
1756 match2
= tipc_own_addr(l
->net
) & 0xf;
1757 if (match1
== match2
)
1758 return TIPC_LINK_SND_STATE
;
1762 if (defq_len
>= 3 && !((defq_len
- 3) % 16)) {
1763 u16 rcvgap
= buf_seqno(skb_peek(dfq
)) - l
->rcv_nxt
;
1765 tipc_link_build_proto_msg(l
, STATE_MSG
, 0, 0,
1766 rcvgap
, 0, 0, xmitq
);
1771 /* tipc_link_rcv - process TIPC packets/messages arriving from off-node
1772 * @l: the link that should handle the message
1774 * @xmitq: queue to place packets to be sent after this call
1776 int tipc_link_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
1777 struct sk_buff_head
*xmitq
)
1779 struct sk_buff_head
*defq
= &l
->deferdq
;
1780 struct tipc_msg
*hdr
= buf_msg(skb
);
1781 u16 seqno
, rcv_nxt
, win_lim
;
1785 /* Verify and update link state */
1786 if (unlikely(msg_user(hdr
) == LINK_PROTOCOL
))
1787 return tipc_link_proto_rcv(l
, skb
, xmitq
);
1789 /* Don't send probe at next timeout expiration */
1790 l
->silent_intv_cnt
= 0;
1794 seqno
= msg_seqno(hdr
);
1795 rcv_nxt
= l
->rcv_nxt
;
1796 win_lim
= rcv_nxt
+ TIPC_MAX_LINK_WIN
;
1798 if (unlikely(!link_is_up(l
))) {
1799 if (l
->state
== LINK_ESTABLISHING
)
1800 rc
= TIPC_LINK_UP_EVT
;
1805 /* Drop if outside receive window */
1806 if (unlikely(less(seqno
, rcv_nxt
) || more(seqno
, win_lim
))) {
1807 l
->stats
.duplicates
++;
1811 released
+= tipc_link_advance_transmq(l
, l
, msg_ack(hdr
), 0,
1812 NULL
, NULL
, NULL
, NULL
);
1814 /* Defer delivery if sequence gap */
1815 if (unlikely(seqno
!= rcv_nxt
)) {
1816 if (!__tipc_skb_queue_sorted(defq
, seqno
, skb
))
1817 l
->stats
.duplicates
++;
1818 rc
|= tipc_link_build_nack_msg(l
, xmitq
);
1822 /* Deliver packet */
1824 l
->stats
.recv_pkts
++;
1826 if (unlikely(msg_user(hdr
) == TUNNEL_PROTOCOL
))
1827 rc
|= tipc_link_tnl_rcv(l
, skb
, l
->inputq
);
1828 else if (!tipc_data_input(l
, skb
, l
->inputq
))
1829 rc
|= tipc_link_input(l
, skb
, l
->inputq
, &l
->reasm_buf
);
1830 if (unlikely(++l
->rcv_unacked
>= TIPC_MIN_LINK_WIN
))
1831 rc
|= tipc_link_build_state_msg(l
, xmitq
);
1832 if (unlikely(rc
& ~TIPC_LINK_SND_STATE
))
1834 } while ((skb
= __tipc_skb_dequeue(defq
, l
->rcv_nxt
)));
1836 /* Forward queues and wake up waiting users */
1838 tipc_link_update_cwin(l
, released
, 0);
1839 tipc_link_advance_backlog(l
, xmitq
);
1840 if (unlikely(!skb_queue_empty(&l
->wakeupq
)))
1841 link_prepare_wakeup(l
);
1846 static void tipc_link_build_proto_msg(struct tipc_link
*l
, int mtyp
, bool probe
,
1847 bool probe_reply
, u16 rcvgap
,
1848 int tolerance
, int priority
,
1849 struct sk_buff_head
*xmitq
)
1851 struct tipc_mon_state
*mstate
= &l
->mon_state
;
1852 struct sk_buff_head
*dfq
= &l
->deferdq
;
1853 struct tipc_link
*bcl
= l
->bc_rcvlink
;
1854 struct tipc_msg
*hdr
;
1855 struct sk_buff
*skb
;
1856 bool node_up
= link_is_up(bcl
);
1857 u16 glen
= 0, bc_rcvgap
= 0;
1861 /* Don't send protocol message during reset or link failover */
1862 if (tipc_link_is_blocked(l
))
1865 if (!tipc_link_is_up(l
) && (mtyp
== STATE_MSG
))
1868 if ((probe
|| probe_reply
) && !skb_queue_empty(dfq
))
1869 rcvgap
= buf_seqno(skb_peek(dfq
)) - l
->rcv_nxt
;
1871 skb
= tipc_msg_create(LINK_PROTOCOL
, mtyp
, INT_H_SIZE
,
1872 tipc_max_domain_size
+ MAX_GAP_ACK_BLKS_SZ
,
1873 l
->addr
, tipc_own_addr(l
->net
), 0, 0, 0);
1878 data
= msg_data(hdr
);
1879 msg_set_session(hdr
, l
->session
);
1880 msg_set_bearer_id(hdr
, l
->bearer_id
);
1881 msg_set_net_plane(hdr
, l
->net_plane
);
1882 msg_set_next_sent(hdr
, l
->snd_nxt
);
1883 msg_set_ack(hdr
, l
->rcv_nxt
- 1);
1884 msg_set_bcast_ack(hdr
, bcl
->rcv_nxt
- 1);
1885 msg_set_bc_ack_invalid(hdr
, !node_up
);
1886 msg_set_last_bcast(hdr
, l
->bc_sndlink
->snd_nxt
- 1);
1887 msg_set_link_tolerance(hdr
, tolerance
);
1888 msg_set_linkprio(hdr
, priority
);
1889 msg_set_redundant_link(hdr
, node_up
);
1890 msg_set_seq_gap(hdr
, 0);
1891 msg_set_seqno(hdr
, l
->snd_nxt
+ U16_MAX
/ 2);
1893 if (mtyp
== STATE_MSG
) {
1894 if (l
->peer_caps
& TIPC_LINK_PROTO_SEQNO
)
1895 msg_set_seqno(hdr
, l
->snd_nxt_state
++);
1896 msg_set_seq_gap(hdr
, rcvgap
);
1897 bc_rcvgap
= link_bc_rcv_gap(bcl
);
1898 msg_set_bc_gap(hdr
, bc_rcvgap
);
1899 msg_set_probe(hdr
, probe
);
1900 msg_set_is_keepalive(hdr
, probe
|| probe_reply
);
1901 if (l
->peer_caps
& TIPC_GAP_ACK_BLOCK
)
1902 glen
= tipc_build_gap_ack_blks(l
, hdr
);
1903 tipc_mon_prep(l
->net
, data
+ glen
, &dlen
, mstate
, l
->bearer_id
);
1904 msg_set_size(hdr
, INT_H_SIZE
+ glen
+ dlen
);
1905 skb_trim(skb
, INT_H_SIZE
+ glen
+ dlen
);
1906 l
->stats
.sent_states
++;
1909 /* RESET_MSG or ACTIVATE_MSG */
1910 if (mtyp
== ACTIVATE_MSG
) {
1911 msg_set_dest_session_valid(hdr
, 1);
1912 msg_set_dest_session(hdr
, l
->peer_session
);
1914 msg_set_max_pkt(hdr
, l
->advertised_mtu
);
1915 strcpy(data
, l
->if_name
);
1916 msg_set_size(hdr
, INT_H_SIZE
+ TIPC_MAX_IF_NAME
);
1917 skb_trim(skb
, INT_H_SIZE
+ TIPC_MAX_IF_NAME
);
1920 l
->stats
.sent_probes
++;
1922 l
->stats
.sent_nacks
++;
1924 bcl
->stats
.sent_nacks
++;
1925 skb
->priority
= TC_PRIO_CONTROL
;
1926 __skb_queue_tail(xmitq
, skb
);
1927 trace_tipc_proto_build(skb
, false, l
->name
);
1930 void tipc_link_create_dummy_tnl_msg(struct tipc_link
*l
,
1931 struct sk_buff_head
*xmitq
)
1933 u32 onode
= tipc_own_addr(l
->net
);
1934 struct tipc_msg
*hdr
, *ihdr
;
1935 struct sk_buff_head tnlq
;
1936 struct sk_buff
*skb
;
1937 u32 dnode
= l
->addr
;
1939 __skb_queue_head_init(&tnlq
);
1940 skb
= tipc_msg_create(TUNNEL_PROTOCOL
, FAILOVER_MSG
,
1941 INT_H_SIZE
, BASIC_H_SIZE
,
1942 dnode
, onode
, 0, 0, 0);
1944 pr_warn("%sunable to create tunnel packet\n", link_co_err
);
1949 msg_set_msgcnt(hdr
, 1);
1950 msg_set_bearer_id(hdr
, l
->peer_bearer_id
);
1952 ihdr
= (struct tipc_msg
*)msg_data(hdr
);
1953 tipc_msg_init(onode
, ihdr
, TIPC_LOW_IMPORTANCE
, TIPC_DIRECT_MSG
,
1954 BASIC_H_SIZE
, dnode
);
1955 msg_set_errcode(ihdr
, TIPC_ERR_NO_PORT
);
1956 __skb_queue_tail(&tnlq
, skb
);
1957 tipc_link_xmit(l
, &tnlq
, xmitq
);
1960 /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
1961 * with contents of the link's transmit and backlog queues.
1963 void tipc_link_tnl_prepare(struct tipc_link
*l
, struct tipc_link
*tnl
,
1964 int mtyp
, struct sk_buff_head
*xmitq
)
1966 struct sk_buff_head
*fdefq
= &tnl
->failover_deferdq
;
1967 struct sk_buff
*skb
, *tnlskb
;
1968 struct tipc_msg
*hdr
, tnlhdr
;
1969 struct sk_buff_head
*queue
= &l
->transmq
;
1970 struct sk_buff_head tmpxq
, tnlq
, frags
;
1971 u16 pktlen
, pktcnt
, seqno
= l
->snd_nxt
;
1972 bool pktcnt_need_update
= false;
1979 __skb_queue_head_init(&tnlq
);
1981 * From now on, send only one single ("dummy") SYNCH message
1982 * to peer. The SYNCH message does not contain any data, just
1983 * a header conveying the synch point to the peer.
1985 if (mtyp
== SYNCH_MSG
&& (tnl
->peer_caps
& TIPC_TUNNEL_ENHANCED
)) {
1986 tnlskb
= tipc_msg_create(TUNNEL_PROTOCOL
, SYNCH_MSG
,
1987 INT_H_SIZE
, 0, l
->addr
,
1988 tipc_own_addr(l
->net
),
1991 pr_warn("%sunable to create dummy SYNCH_MSG\n",
1996 hdr
= buf_msg(tnlskb
);
1997 syncpt
= l
->snd_nxt
+ skb_queue_len(&l
->backlogq
) - 1;
1998 msg_set_syncpt(hdr
, syncpt
);
1999 msg_set_bearer_id(hdr
, l
->peer_bearer_id
);
2000 __skb_queue_tail(&tnlq
, tnlskb
);
2001 tipc_link_xmit(tnl
, &tnlq
, xmitq
);
2005 __skb_queue_head_init(&tmpxq
);
2006 __skb_queue_head_init(&frags
);
2007 /* At least one packet required for safe algorithm => add dummy */
2008 skb
= tipc_msg_create(TIPC_LOW_IMPORTANCE
, TIPC_DIRECT_MSG
,
2009 BASIC_H_SIZE
, 0, l
->addr
, tipc_own_addr(l
->net
),
2010 0, 0, TIPC_ERR_NO_PORT
);
2012 pr_warn("%sunable to create tunnel packet\n", link_co_err
);
2015 __skb_queue_tail(&tnlq
, skb
);
2016 tipc_link_xmit(l
, &tnlq
, &tmpxq
);
2017 __skb_queue_purge(&tmpxq
);
2019 /* Initialize reusable tunnel packet header */
2020 tipc_msg_init(tipc_own_addr(l
->net
), &tnlhdr
, TUNNEL_PROTOCOL
,
2021 mtyp
, INT_H_SIZE
, l
->addr
);
2022 if (mtyp
== SYNCH_MSG
)
2023 pktcnt
= l
->snd_nxt
- buf_seqno(skb_peek(&l
->transmq
));
2025 pktcnt
= skb_queue_len(&l
->transmq
);
2026 pktcnt
+= skb_queue_len(&l
->backlogq
);
2027 msg_set_msgcnt(&tnlhdr
, pktcnt
);
2028 msg_set_bearer_id(&tnlhdr
, l
->peer_bearer_id
);
2030 /* Wrap each packet into a tunnel packet */
2031 skb_queue_walk(queue
, skb
) {
2033 if (queue
== &l
->backlogq
)
2034 msg_set_seqno(hdr
, seqno
++);
2035 pktlen
= msg_size(hdr
);
2037 /* Tunnel link MTU is not large enough? This could be
2039 * 1) Link MTU has just changed or set differently;
2040 * 2) Or FAILOVER on the top of a SYNCH message
2042 * The 2nd case should not happen if peer supports
2043 * TIPC_TUNNEL_ENHANCED
2045 if (pktlen
> tnl
->mtu
- INT_H_SIZE
) {
2046 if (mtyp
== FAILOVER_MSG
&&
2047 (tnl
->peer_caps
& TIPC_TUNNEL_ENHANCED
)) {
2048 rc
= tipc_msg_fragment(skb
, &tnlhdr
, tnl
->mtu
,
2051 pr_warn("%sunable to frag msg: rc %d\n",
2055 pktcnt
+= skb_queue_len(&frags
) - 1;
2056 pktcnt_need_update
= true;
2057 skb_queue_splice_tail_init(&frags
, &tnlq
);
2060 /* Unluckily, peer doesn't have TIPC_TUNNEL_ENHANCED
2061 * => Just warn it and return!
2063 pr_warn_ratelimited("%stoo large msg <%d, %d>: %d!\n",
2064 link_co_err
, msg_user(hdr
),
2065 msg_type(hdr
), msg_size(hdr
));
2069 msg_set_size(&tnlhdr
, pktlen
+ INT_H_SIZE
);
2070 tnlskb
= tipc_buf_acquire(pktlen
+ INT_H_SIZE
, GFP_ATOMIC
);
2072 pr_warn("%sunable to send packet\n", link_co_err
);
2075 skb_copy_to_linear_data(tnlskb
, &tnlhdr
, INT_H_SIZE
);
2076 skb_copy_to_linear_data_offset(tnlskb
, INT_H_SIZE
, hdr
, pktlen
);
2077 __skb_queue_tail(&tnlq
, tnlskb
);
2079 if (queue
!= &l
->backlogq
) {
2080 queue
= &l
->backlogq
;
2084 if (pktcnt_need_update
)
2085 skb_queue_walk(&tnlq
, skb
) {
2087 msg_set_msgcnt(hdr
, pktcnt
);
2090 tipc_link_xmit(tnl
, &tnlq
, xmitq
);
2092 if (mtyp
== FAILOVER_MSG
) {
2093 tnl
->drop_point
= l
->rcv_nxt
;
2094 tnl
->failover_reasm_skb
= l
->reasm_buf
;
2095 l
->reasm_buf
= NULL
;
2097 /* Failover the link's deferdq */
2098 if (unlikely(!skb_queue_empty(fdefq
))) {
2099 pr_warn("Link failover deferdq not empty: %d!\n",
2100 skb_queue_len(fdefq
));
2101 __skb_queue_purge(fdefq
);
2103 skb_queue_splice_init(&l
->deferdq
, fdefq
);
2108 * tipc_link_failover_prepare() - prepare tnl for link failover
2110 * This is a special version of the precursor - tipc_link_tnl_prepare(),
2111 * see the tipc_node_link_failover() for details
2115 * @xmitq: queue for messages to be xmited
2117 void tipc_link_failover_prepare(struct tipc_link
*l
, struct tipc_link
*tnl
,
2118 struct sk_buff_head
*xmitq
)
2120 struct sk_buff_head
*fdefq
= &tnl
->failover_deferdq
;
2122 tipc_link_create_dummy_tnl_msg(tnl
, xmitq
);
2124 /* This failover link endpoint was never established before,
2125 * so it has not received anything from peer.
2126 * Otherwise, it must be a normal failover situation or the
2127 * node has entered SELF_DOWN_PEER_LEAVING and both peer nodes
2128 * would have to start over from scratch instead.
2130 tnl
->drop_point
= 1;
2131 tnl
->failover_reasm_skb
= NULL
;
2133 /* Initiate the link's failover deferdq */
2134 if (unlikely(!skb_queue_empty(fdefq
))) {
2135 pr_warn("Link failover deferdq not empty: %d!\n",
2136 skb_queue_len(fdefq
));
2137 __skb_queue_purge(fdefq
);
2141 /* tipc_link_validate_msg(): validate message against current link state
2142 * Returns true if message should be accepted, otherwise false
2144 bool tipc_link_validate_msg(struct tipc_link
*l
, struct tipc_msg
*hdr
)
2146 u16 curr_session
= l
->peer_session
;
2147 u16 session
= msg_session(hdr
);
2148 int mtyp
= msg_type(hdr
);
2150 if (msg_user(hdr
) != LINK_PROTOCOL
)
2157 /* Accept only RESET with new session number */
2158 return more(session
, curr_session
);
2162 /* Accept only ACTIVATE with new or current session number */
2163 return !less(session
, curr_session
);
2165 /* Accept only STATE with current session number */
2168 if (session
!= curr_session
)
2170 /* Extra sanity check */
2171 if (!link_is_up(l
) && msg_ack(hdr
))
2173 if (!(l
->peer_caps
& TIPC_LINK_PROTO_SEQNO
))
2175 /* Accept only STATE with new sequence number */
2176 return !less(msg_seqno(hdr
), l
->rcv_nxt_state
);
2182 /* tipc_link_proto_rcv(): receive link level protocol message :
2183 * Note that network plane id propagates through the network, and may
2184 * change at any time. The node with lowest numerical id determines
2187 static int tipc_link_proto_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
2188 struct sk_buff_head
*xmitq
)
2190 struct tipc_msg
*hdr
= buf_msg(skb
);
2191 struct tipc_gap_ack_blks
*ga
= NULL
;
2192 bool reply
= msg_probe(hdr
), retransmitted
= false;
2193 u16 dlen
= msg_data_sz(hdr
), glen
= 0;
2194 u16 peers_snd_nxt
= msg_next_sent(hdr
);
2195 u16 peers_tol
= msg_link_tolerance(hdr
);
2196 u16 peers_prio
= msg_linkprio(hdr
);
2197 u16 gap
= msg_seq_gap(hdr
);
2198 u16 ack
= msg_ack(hdr
);
2199 u16 rcv_nxt
= l
->rcv_nxt
;
2201 int mtyp
= msg_type(hdr
);
2202 int rc
= 0, released
;
2206 trace_tipc_proto_rcv(skb
, false, l
->name
);
2207 if (tipc_link_is_blocked(l
) || !xmitq
)
2210 if (tipc_own_addr(l
->net
) > msg_prevnode(hdr
))
2211 l
->net_plane
= msg_net_plane(hdr
);
2215 data
= msg_data(hdr
);
2217 if (!tipc_link_validate_msg(l
, hdr
)) {
2218 trace_tipc_skb_dump(skb
, false, "PROTO invalid (1)!");
2219 trace_tipc_link_dump(l
, TIPC_DUMP_NONE
, "PROTO invalid (1)!");
2226 /* Complete own link name with peer's interface name */
2227 if_name
= strrchr(l
->name
, ':') + 1;
2228 if (sizeof(l
->name
) - (if_name
- l
->name
) <= TIPC_MAX_IF_NAME
)
2230 if (msg_data_sz(hdr
) < TIPC_MAX_IF_NAME
)
2232 strncpy(if_name
, data
, TIPC_MAX_IF_NAME
);
2234 /* Update own tolerance if peer indicates a non-zero value */
2235 if (in_range(peers_tol
, TIPC_MIN_LINK_TOL
, TIPC_MAX_LINK_TOL
)) {
2236 l
->tolerance
= peers_tol
;
2237 l
->bc_rcvlink
->tolerance
= peers_tol
;
2239 /* Update own priority if peer's priority is higher */
2240 if (in_range(peers_prio
, l
->priority
+ 1, TIPC_MAX_LINK_PRI
))
2241 l
->priority
= peers_prio
;
2243 /* If peer is going down we want full re-establish cycle */
2244 if (msg_peer_stopping(hdr
)) {
2245 rc
= tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
2249 /* If this endpoint was re-created while peer was ESTABLISHING
2250 * it doesn't know current session number. Force re-synch.
2252 if (mtyp
== ACTIVATE_MSG
&& msg_dest_session_valid(hdr
) &&
2253 l
->session
!= msg_dest_session(hdr
)) {
2254 if (less(l
->session
, msg_dest_session(hdr
)))
2255 l
->session
= msg_dest_session(hdr
) + 1;
2259 /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
2260 if (mtyp
== RESET_MSG
|| !link_is_up(l
))
2261 rc
= tipc_link_fsm_evt(l
, LINK_PEER_RESET_EVT
);
2263 /* ACTIVATE_MSG takes up link if it was already locally reset */
2264 if (mtyp
== ACTIVATE_MSG
&& l
->state
== LINK_ESTABLISHING
)
2265 rc
= TIPC_LINK_UP_EVT
;
2267 l
->peer_session
= msg_session(hdr
);
2268 l
->in_session
= true;
2269 l
->peer_bearer_id
= msg_bearer_id(hdr
);
2270 if (l
->mtu
> msg_max_pkt(hdr
))
2271 l
->mtu
= msg_max_pkt(hdr
);
2275 l
->rcv_nxt_state
= msg_seqno(hdr
) + 1;
2277 /* Update own tolerance if peer indicates a non-zero value */
2278 if (in_range(peers_tol
, TIPC_MIN_LINK_TOL
, TIPC_MAX_LINK_TOL
)) {
2279 l
->tolerance
= peers_tol
;
2280 l
->bc_rcvlink
->tolerance
= peers_tol
;
2282 /* Update own prio if peer indicates a different value */
2283 if ((peers_prio
!= l
->priority
) &&
2284 in_range(peers_prio
, 1, TIPC_MAX_LINK_PRI
)) {
2285 l
->priority
= peers_prio
;
2286 rc
= tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
2289 l
->silent_intv_cnt
= 0;
2290 l
->stats
.recv_states
++;
2292 l
->stats
.recv_probes
++;
2294 if (!link_is_up(l
)) {
2295 if (l
->state
== LINK_ESTABLISHING
)
2296 rc
= TIPC_LINK_UP_EVT
;
2300 /* Receive Gap ACK blocks from peer if any */
2301 glen
= tipc_get_gap_ack_blks(&ga
, l
, hdr
, true);
2303 tipc_mon_rcv(l
->net
, data
+ glen
, dlen
- glen
, l
->addr
,
2304 &l
->mon_state
, l
->bearer_id
);
2306 /* Send NACK if peer has sent pkts we haven't received yet */
2307 if ((reply
|| msg_is_keepalive(hdr
)) &&
2308 more(peers_snd_nxt
, rcv_nxt
) &&
2309 !tipc_link_is_synching(l
) &&
2310 skb_queue_empty(&l
->deferdq
))
2311 rcvgap
= peers_snd_nxt
- l
->rcv_nxt
;
2312 if (rcvgap
|| reply
)
2313 tipc_link_build_proto_msg(l
, STATE_MSG
, 0, reply
,
2314 rcvgap
, 0, 0, xmitq
);
2316 released
= tipc_link_advance_transmq(l
, l
, ack
, gap
, ga
, xmitq
,
2317 &retransmitted
, &rc
);
2319 l
->stats
.recv_nacks
++;
2320 if (released
|| retransmitted
)
2321 tipc_link_update_cwin(l
, released
, retransmitted
);
2323 tipc_link_advance_backlog(l
, xmitq
);
2324 if (unlikely(!skb_queue_empty(&l
->wakeupq
)))
2325 link_prepare_wakeup(l
);
2332 /* tipc_link_build_bc_proto_msg() - create broadcast protocol message
2334 static bool tipc_link_build_bc_proto_msg(struct tipc_link
*l
, bool bcast
,
2336 struct sk_buff_head
*xmitq
)
2338 struct sk_buff
*skb
;
2339 struct tipc_msg
*hdr
;
2340 struct sk_buff
*dfrd_skb
= skb_peek(&l
->deferdq
);
2341 u16 ack
= l
->rcv_nxt
- 1;
2342 u16 gap_to
= peers_snd_nxt
- 1;
2344 skb
= tipc_msg_create(BCAST_PROTOCOL
, STATE_MSG
, INT_H_SIZE
,
2345 0, l
->addr
, tipc_own_addr(l
->net
), 0, 0, 0);
2349 msg_set_last_bcast(hdr
, l
->bc_sndlink
->snd_nxt
- 1);
2350 msg_set_bcast_ack(hdr
, ack
);
2351 msg_set_bcgap_after(hdr
, ack
);
2353 gap_to
= buf_seqno(dfrd_skb
) - 1;
2354 msg_set_bcgap_to(hdr
, gap_to
);
2355 msg_set_non_seq(hdr
, bcast
);
2356 __skb_queue_tail(xmitq
, skb
);
2360 /* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
2362 * Give a newly added peer node the sequence number where it should
2363 * start receiving and acking broadcast packets.
2365 static void tipc_link_build_bc_init_msg(struct tipc_link
*l
,
2366 struct sk_buff_head
*xmitq
)
2368 struct sk_buff_head list
;
2370 __skb_queue_head_init(&list
);
2371 if (!tipc_link_build_bc_proto_msg(l
->bc_rcvlink
, false, 0, &list
))
2373 msg_set_bc_ack_invalid(buf_msg(skb_peek(&list
)), true);
2374 tipc_link_xmit(l
, &list
, xmitq
);
2377 /* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
2379 void tipc_link_bc_init_rcv(struct tipc_link
*l
, struct tipc_msg
*hdr
)
2381 int mtyp
= msg_type(hdr
);
2382 u16 peers_snd_nxt
= msg_bc_snd_nxt(hdr
);
2387 if (msg_user(hdr
) == BCAST_PROTOCOL
) {
2388 l
->rcv_nxt
= peers_snd_nxt
;
2389 l
->state
= LINK_ESTABLISHED
;
2393 if (l
->peer_caps
& TIPC_BCAST_SYNCH
)
2396 if (msg_peer_node_is_up(hdr
))
2399 /* Compatibility: accept older, less safe initial synch data */
2400 if ((mtyp
== RESET_MSG
) || (mtyp
== ACTIVATE_MSG
))
2401 l
->rcv_nxt
= peers_snd_nxt
;
2404 /* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
2406 int tipc_link_bc_sync_rcv(struct tipc_link
*l
, struct tipc_msg
*hdr
,
2407 struct sk_buff_head
*xmitq
)
2409 u16 peers_snd_nxt
= msg_bc_snd_nxt(hdr
);
2415 if (!msg_peer_node_is_up(hdr
))
2418 /* Open when peer acknowledges our bcast init msg (pkt #1) */
2420 l
->bc_peer_is_up
= true;
2422 if (!l
->bc_peer_is_up
)
2425 /* Ignore if peers_snd_nxt goes beyond receive window */
2426 if (more(peers_snd_nxt
, l
->rcv_nxt
+ l
->window
))
2429 l
->snd_nxt
= peers_snd_nxt
;
2430 if (link_bc_rcv_gap(l
))
2431 rc
|= TIPC_LINK_SND_STATE
;
2433 /* Return now if sender supports nack via STATE messages */
2434 if (l
->peer_caps
& TIPC_BCAST_STATE_NACK
)
2437 /* Otherwise, be backwards compatible */
2439 if (!more(peers_snd_nxt
, l
->rcv_nxt
)) {
2440 l
->nack_state
= BC_NACK_SND_CONDITIONAL
;
2444 /* Don't NACK if one was recently sent or peeked */
2445 if (l
->nack_state
== BC_NACK_SND_SUPPRESS
) {
2446 l
->nack_state
= BC_NACK_SND_UNCONDITIONAL
;
2450 /* Conditionally delay NACK sending until next synch rcv */
2451 if (l
->nack_state
== BC_NACK_SND_CONDITIONAL
) {
2452 l
->nack_state
= BC_NACK_SND_UNCONDITIONAL
;
2453 if ((peers_snd_nxt
- l
->rcv_nxt
) < TIPC_MIN_LINK_WIN
)
2457 /* Send NACK now but suppress next one */
2458 tipc_link_build_bc_proto_msg(l
, true, peers_snd_nxt
, xmitq
);
2459 l
->nack_state
= BC_NACK_SND_SUPPRESS
;
2463 int tipc_link_bc_ack_rcv(struct tipc_link
*r
, u16 acked
, u16 gap
,
2464 struct tipc_gap_ack_blks
*ga
,
2465 struct sk_buff_head
*xmitq
,
2466 struct sk_buff_head
*retrq
)
2468 struct tipc_link
*l
= r
->bc_sndlink
;
2469 bool unused
= false;
2472 if (!link_is_up(r
) || !r
->bc_peer_is_up
)
2476 l
->stats
.recv_nacks
++;
2477 r
->stats
.recv_nacks
++;
2480 if (less(acked
, r
->acked
) || (acked
== r
->acked
&& !gap
&& !ga
))
2483 trace_tipc_link_bc_ack(r
, acked
, gap
, &l
->transmq
);
2484 tipc_link_advance_transmq(l
, r
, acked
, gap
, ga
, retrq
, &unused
, &rc
);
2486 tipc_link_advance_backlog(l
, xmitq
);
2487 if (unlikely(!skb_queue_empty(&l
->wakeupq
)))
2488 link_prepare_wakeup(l
);
2493 /* tipc_link_bc_nack_rcv(): receive broadcast nack message
2494 * This function is here for backwards compatibility, since
2495 * no BCAST_PROTOCOL/STATE messages occur from TIPC v2.5.
2497 int tipc_link_bc_nack_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
2498 struct sk_buff_head
*xmitq
)
2500 struct tipc_msg
*hdr
= buf_msg(skb
);
2501 u32 dnode
= msg_destnode(hdr
);
2502 int mtyp
= msg_type(hdr
);
2503 u16 acked
= msg_bcast_ack(hdr
);
2504 u16 from
= acked
+ 1;
2505 u16 to
= msg_bcgap_to(hdr
);
2506 u16 peers_snd_nxt
= to
+ 1;
2511 if (!tipc_link_is_up(l
) || !l
->bc_peer_is_up
)
2514 if (mtyp
!= STATE_MSG
)
2517 if (dnode
== tipc_own_addr(l
->net
)) {
2518 rc
= tipc_link_bc_ack_rcv(l
, acked
, to
- acked
, NULL
, xmitq
,
2520 l
->stats
.recv_nacks
++;
2524 /* Msg for other node => suppress own NACK at next sync if applicable */
2525 if (more(peers_snd_nxt
, l
->rcv_nxt
) && !less(l
->rcv_nxt
, from
))
2526 l
->nack_state
= BC_NACK_SND_SUPPRESS
;
2531 void tipc_link_set_queue_limits(struct tipc_link
*l
, u32 min_win
, u32 max_win
)
2533 int max_bulk
= TIPC_MAX_PUBL
/ (l
->mtu
/ ITEM_SIZE
);
2535 l
->min_win
= min_win
;
2536 l
->ssthresh
= max_win
;
2537 l
->max_win
= max_win
;
2538 l
->window
= min_win
;
2539 l
->backlog
[TIPC_LOW_IMPORTANCE
].limit
= min_win
* 2;
2540 l
->backlog
[TIPC_MEDIUM_IMPORTANCE
].limit
= min_win
* 4;
2541 l
->backlog
[TIPC_HIGH_IMPORTANCE
].limit
= min_win
* 6;
2542 l
->backlog
[TIPC_CRITICAL_IMPORTANCE
].limit
= min_win
* 8;
2543 l
->backlog
[TIPC_SYSTEM_IMPORTANCE
].limit
= max_bulk
;
2547 * tipc_link_reset_stats - reset link statistics
2548 * @l: pointer to link
2550 void tipc_link_reset_stats(struct tipc_link
*l
)
2552 memset(&l
->stats
, 0, sizeof(l
->stats
));
2555 static void link_print(struct tipc_link
*l
, const char *str
)
2557 struct sk_buff
*hskb
= skb_peek(&l
->transmq
);
2558 u16 head
= hskb
? msg_seqno(buf_msg(hskb
)) : l
->snd_nxt
- 1;
2559 u16 tail
= l
->snd_nxt
- 1;
2561 pr_info("%s Link <%s> state %x\n", str
, l
->name
, l
->state
);
2562 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
2563 skb_queue_len(&l
->transmq
), head
, tail
,
2564 skb_queue_len(&l
->backlogq
), l
->snd_nxt
, l
->rcv_nxt
);
2567 /* Parse and validate nested (link) properties valid for media, bearer and link
2569 int tipc_nl_parse_link_prop(struct nlattr
*prop
, struct nlattr
*props
[])
2573 err
= nla_parse_nested_deprecated(props
, TIPC_NLA_PROP_MAX
, prop
,
2574 tipc_nl_prop_policy
, NULL
);
2578 if (props
[TIPC_NLA_PROP_PRIO
]) {
2581 prio
= nla_get_u32(props
[TIPC_NLA_PROP_PRIO
]);
2582 if (prio
> TIPC_MAX_LINK_PRI
)
2586 if (props
[TIPC_NLA_PROP_TOL
]) {
2589 tol
= nla_get_u32(props
[TIPC_NLA_PROP_TOL
]);
2590 if ((tol
< TIPC_MIN_LINK_TOL
) || (tol
> TIPC_MAX_LINK_TOL
))
2594 if (props
[TIPC_NLA_PROP_WIN
]) {
2597 max_win
= nla_get_u32(props
[TIPC_NLA_PROP_WIN
]);
2598 if (max_win
< TIPC_DEF_LINK_WIN
|| max_win
> TIPC_MAX_LINK_WIN
)
2605 static int __tipc_nl_add_stats(struct sk_buff
*skb
, struct tipc_stats
*s
)
2608 struct nlattr
*stats
;
2615 struct nla_map map
[] = {
2616 {TIPC_NLA_STATS_RX_INFO
, 0},
2617 {TIPC_NLA_STATS_RX_FRAGMENTS
, s
->recv_fragments
},
2618 {TIPC_NLA_STATS_RX_FRAGMENTED
, s
->recv_fragmented
},
2619 {TIPC_NLA_STATS_RX_BUNDLES
, s
->recv_bundles
},
2620 {TIPC_NLA_STATS_RX_BUNDLED
, s
->recv_bundled
},
2621 {TIPC_NLA_STATS_TX_INFO
, 0},
2622 {TIPC_NLA_STATS_TX_FRAGMENTS
, s
->sent_fragments
},
2623 {TIPC_NLA_STATS_TX_FRAGMENTED
, s
->sent_fragmented
},
2624 {TIPC_NLA_STATS_TX_BUNDLES
, s
->sent_bundles
},
2625 {TIPC_NLA_STATS_TX_BUNDLED
, s
->sent_bundled
},
2626 {TIPC_NLA_STATS_MSG_PROF_TOT
, (s
->msg_length_counts
) ?
2627 s
->msg_length_counts
: 1},
2628 {TIPC_NLA_STATS_MSG_LEN_CNT
, s
->msg_length_counts
},
2629 {TIPC_NLA_STATS_MSG_LEN_TOT
, s
->msg_lengths_total
},
2630 {TIPC_NLA_STATS_MSG_LEN_P0
, s
->msg_length_profile
[0]},
2631 {TIPC_NLA_STATS_MSG_LEN_P1
, s
->msg_length_profile
[1]},
2632 {TIPC_NLA_STATS_MSG_LEN_P2
, s
->msg_length_profile
[2]},
2633 {TIPC_NLA_STATS_MSG_LEN_P3
, s
->msg_length_profile
[3]},
2634 {TIPC_NLA_STATS_MSG_LEN_P4
, s
->msg_length_profile
[4]},
2635 {TIPC_NLA_STATS_MSG_LEN_P5
, s
->msg_length_profile
[5]},
2636 {TIPC_NLA_STATS_MSG_LEN_P6
, s
->msg_length_profile
[6]},
2637 {TIPC_NLA_STATS_RX_STATES
, s
->recv_states
},
2638 {TIPC_NLA_STATS_RX_PROBES
, s
->recv_probes
},
2639 {TIPC_NLA_STATS_RX_NACKS
, s
->recv_nacks
},
2640 {TIPC_NLA_STATS_RX_DEFERRED
, s
->deferred_recv
},
2641 {TIPC_NLA_STATS_TX_STATES
, s
->sent_states
},
2642 {TIPC_NLA_STATS_TX_PROBES
, s
->sent_probes
},
2643 {TIPC_NLA_STATS_TX_NACKS
, s
->sent_nacks
},
2644 {TIPC_NLA_STATS_TX_ACKS
, s
->sent_acks
},
2645 {TIPC_NLA_STATS_RETRANSMITTED
, s
->retransmitted
},
2646 {TIPC_NLA_STATS_DUPLICATES
, s
->duplicates
},
2647 {TIPC_NLA_STATS_LINK_CONGS
, s
->link_congs
},
2648 {TIPC_NLA_STATS_MAX_QUEUE
, s
->max_queue_sz
},
2649 {TIPC_NLA_STATS_AVG_QUEUE
, s
->queue_sz_counts
?
2650 (s
->accu_queue_sz
/ s
->queue_sz_counts
) : 0}
2653 stats
= nla_nest_start_noflag(skb
, TIPC_NLA_LINK_STATS
);
2657 for (i
= 0; i
< ARRAY_SIZE(map
); i
++)
2658 if (nla_put_u32(skb
, map
[i
].key
, map
[i
].val
))
2661 nla_nest_end(skb
, stats
);
2665 nla_nest_cancel(skb
, stats
);
2670 /* Caller should hold appropriate locks to protect the link */
2671 int __tipc_nl_add_link(struct net
*net
, struct tipc_nl_msg
*msg
,
2672 struct tipc_link
*link
, int nlflags
)
2674 u32 self
= tipc_own_addr(net
);
2675 struct nlattr
*attrs
;
2676 struct nlattr
*prop
;
2680 hdr
= genlmsg_put(msg
->skb
, msg
->portid
, msg
->seq
, &tipc_genl_family
,
2681 nlflags
, TIPC_NL_LINK_GET
);
2685 attrs
= nla_nest_start_noflag(msg
->skb
, TIPC_NLA_LINK
);
2689 if (nla_put_string(msg
->skb
, TIPC_NLA_LINK_NAME
, link
->name
))
2691 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_DEST
, tipc_cluster_mask(self
)))
2693 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_MTU
, link
->mtu
))
2695 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_RX
, link
->stats
.recv_pkts
))
2697 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_TX
, link
->stats
.sent_pkts
))
2700 if (tipc_link_is_up(link
))
2701 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_UP
))
2704 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_ACTIVE
))
2707 prop
= nla_nest_start_noflag(msg
->skb
, TIPC_NLA_LINK_PROP
);
2710 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_PRIO
, link
->priority
))
2712 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_TOL
, link
->tolerance
))
2714 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_WIN
,
2717 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_PRIO
, link
->priority
))
2719 nla_nest_end(msg
->skb
, prop
);
2721 err
= __tipc_nl_add_stats(msg
->skb
, &link
->stats
);
2725 nla_nest_end(msg
->skb
, attrs
);
2726 genlmsg_end(msg
->skb
, hdr
);
2731 nla_nest_cancel(msg
->skb
, prop
);
2733 nla_nest_cancel(msg
->skb
, attrs
);
2735 genlmsg_cancel(msg
->skb
, hdr
);
2740 static int __tipc_nl_add_bc_link_stat(struct sk_buff
*skb
,
2741 struct tipc_stats
*stats
)
2744 struct nlattr
*nest
;
2751 struct nla_map map
[] = {
2752 {TIPC_NLA_STATS_RX_INFO
, stats
->recv_pkts
},
2753 {TIPC_NLA_STATS_RX_FRAGMENTS
, stats
->recv_fragments
},
2754 {TIPC_NLA_STATS_RX_FRAGMENTED
, stats
->recv_fragmented
},
2755 {TIPC_NLA_STATS_RX_BUNDLES
, stats
->recv_bundles
},
2756 {TIPC_NLA_STATS_RX_BUNDLED
, stats
->recv_bundled
},
2757 {TIPC_NLA_STATS_TX_INFO
, stats
->sent_pkts
},
2758 {TIPC_NLA_STATS_TX_FRAGMENTS
, stats
->sent_fragments
},
2759 {TIPC_NLA_STATS_TX_FRAGMENTED
, stats
->sent_fragmented
},
2760 {TIPC_NLA_STATS_TX_BUNDLES
, stats
->sent_bundles
},
2761 {TIPC_NLA_STATS_TX_BUNDLED
, stats
->sent_bundled
},
2762 {TIPC_NLA_STATS_RX_NACKS
, stats
->recv_nacks
},
2763 {TIPC_NLA_STATS_RX_DEFERRED
, stats
->deferred_recv
},
2764 {TIPC_NLA_STATS_TX_NACKS
, stats
->sent_nacks
},
2765 {TIPC_NLA_STATS_TX_ACKS
, stats
->sent_acks
},
2766 {TIPC_NLA_STATS_RETRANSMITTED
, stats
->retransmitted
},
2767 {TIPC_NLA_STATS_DUPLICATES
, stats
->duplicates
},
2768 {TIPC_NLA_STATS_LINK_CONGS
, stats
->link_congs
},
2769 {TIPC_NLA_STATS_MAX_QUEUE
, stats
->max_queue_sz
},
2770 {TIPC_NLA_STATS_AVG_QUEUE
, stats
->queue_sz_counts
?
2771 (stats
->accu_queue_sz
/ stats
->queue_sz_counts
) : 0}
2774 nest
= nla_nest_start_noflag(skb
, TIPC_NLA_LINK_STATS
);
2778 for (i
= 0; i
< ARRAY_SIZE(map
); i
++)
2779 if (nla_put_u32(skb
, map
[i
].key
, map
[i
].val
))
2782 nla_nest_end(skb
, nest
);
2786 nla_nest_cancel(skb
, nest
);
2791 int tipc_nl_add_bc_link(struct net
*net
, struct tipc_nl_msg
*msg
,
2792 struct tipc_link
*bcl
)
2796 struct nlattr
*attrs
;
2797 struct nlattr
*prop
;
2798 u32 bc_mode
= tipc_bcast_get_mode(net
);
2799 u32 bc_ratio
= tipc_bcast_get_broadcast_ratio(net
);
2804 tipc_bcast_lock(net
);
2806 hdr
= genlmsg_put(msg
->skb
, msg
->portid
, msg
->seq
, &tipc_genl_family
,
2807 NLM_F_MULTI
, TIPC_NL_LINK_GET
);
2809 tipc_bcast_unlock(net
);
2813 attrs
= nla_nest_start_noflag(msg
->skb
, TIPC_NLA_LINK
);
2817 /* The broadcast link is always up */
2818 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_UP
))
2821 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_BROADCAST
))
2823 if (nla_put_string(msg
->skb
, TIPC_NLA_LINK_NAME
, bcl
->name
))
2825 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_RX
, 0))
2827 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_TX
, 0))
2830 prop
= nla_nest_start_noflag(msg
->skb
, TIPC_NLA_LINK_PROP
);
2833 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_WIN
, bcl
->max_win
))
2835 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_BROADCAST
, bc_mode
))
2837 if (bc_mode
& BCLINK_MODE_SEL
)
2838 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_BROADCAST_RATIO
,
2841 nla_nest_end(msg
->skb
, prop
);
2843 err
= __tipc_nl_add_bc_link_stat(msg
->skb
, &bcl
->stats
);
2847 tipc_bcast_unlock(net
);
2848 nla_nest_end(msg
->skb
, attrs
);
2849 genlmsg_end(msg
->skb
, hdr
);
2854 nla_nest_cancel(msg
->skb
, prop
);
2856 nla_nest_cancel(msg
->skb
, attrs
);
2858 tipc_bcast_unlock(net
);
2859 genlmsg_cancel(msg
->skb
, hdr
);
2864 void tipc_link_set_tolerance(struct tipc_link
*l
, u32 tol
,
2865 struct sk_buff_head
*xmitq
)
2869 l
->bc_rcvlink
->tolerance
= tol
;
2871 tipc_link_build_proto_msg(l
, STATE_MSG
, 0, 0, 0, tol
, 0, xmitq
);
2874 void tipc_link_set_prio(struct tipc_link
*l
, u32 prio
,
2875 struct sk_buff_head
*xmitq
)
2878 tipc_link_build_proto_msg(l
, STATE_MSG
, 0, 0, 0, 0, prio
, xmitq
);
2881 void tipc_link_set_abort_limit(struct tipc_link
*l
, u32 limit
)
2883 l
->abort_limit
= limit
;
2887 * tipc_link_dump - dump TIPC link data
2888 * @l: tipc link to be dumped
2889 * @dqueues: bitmask to decide if any link queue to be dumped?
2890 * - TIPC_DUMP_NONE: don't dump link queues
2891 * - TIPC_DUMP_TRANSMQ: dump link transmq queue
2892 * - TIPC_DUMP_BACKLOGQ: dump link backlog queue
2893 * - TIPC_DUMP_DEFERDQ: dump link deferd queue
2894 * - TIPC_DUMP_INPUTQ: dump link input queue
2895 * - TIPC_DUMP_WAKEUP: dump link wakeup queue
2896 * - TIPC_DUMP_ALL: dump all the link queues above
2897 * @buf: returned buffer of dump data in format
2899 int tipc_link_dump(struct tipc_link
*l
, u16 dqueues
, char *buf
)
2902 size_t sz
= (dqueues
) ? LINK_LMAX
: LINK_LMIN
;
2903 struct sk_buff_head
*list
;
2904 struct sk_buff
*hskb
, *tskb
;
2908 i
+= scnprintf(buf
, sz
, "link data: (null)\n");
2912 i
+= scnprintf(buf
, sz
, "link data: %x", l
->addr
);
2913 i
+= scnprintf(buf
+ i
, sz
- i
, " %x", l
->state
);
2914 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", l
->in_session
);
2915 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", l
->session
);
2916 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", l
->peer_session
);
2917 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", l
->snd_nxt
);
2918 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", l
->rcv_nxt
);
2919 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", l
->snd_nxt_state
);
2920 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", l
->rcv_nxt_state
);
2921 i
+= scnprintf(buf
+ i
, sz
- i
, " %x", l
->peer_caps
);
2922 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", l
->silent_intv_cnt
);
2923 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", l
->rst_cnt
);
2924 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", 0);
2925 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", 0);
2926 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", l
->acked
);
2929 len
= skb_queue_len(list
);
2930 hskb
= skb_peek(list
);
2931 tskb
= skb_peek_tail(list
);
2932 i
+= scnprintf(buf
+ i
, sz
- i
, " | %u %u %u", len
,
2933 (hskb
) ? msg_seqno(buf_msg(hskb
)) : 0,
2934 (tskb
) ? msg_seqno(buf_msg(tskb
)) : 0);
2937 len
= skb_queue_len(list
);
2938 hskb
= skb_peek(list
);
2939 tskb
= skb_peek_tail(list
);
2940 i
+= scnprintf(buf
+ i
, sz
- i
, " | %u %u %u", len
,
2941 (hskb
) ? msg_seqno(buf_msg(hskb
)) : 0,
2942 (tskb
) ? msg_seqno(buf_msg(tskb
)) : 0);
2944 list
= &l
->backlogq
;
2945 len
= skb_queue_len(list
);
2946 hskb
= skb_peek(list
);
2947 tskb
= skb_peek_tail(list
);
2948 i
+= scnprintf(buf
+ i
, sz
- i
, " | %u %u %u", len
,
2949 (hskb
) ? msg_seqno(buf_msg(hskb
)) : 0,
2950 (tskb
) ? msg_seqno(buf_msg(tskb
)) : 0);
2953 len
= skb_queue_len(list
);
2954 hskb
= skb_peek(list
);
2955 tskb
= skb_peek_tail(list
);
2956 i
+= scnprintf(buf
+ i
, sz
- i
, " | %u %u %u\n", len
,
2957 (hskb
) ? msg_seqno(buf_msg(hskb
)) : 0,
2958 (tskb
) ? msg_seqno(buf_msg(tskb
)) : 0);
2960 if (dqueues
& TIPC_DUMP_TRANSMQ
) {
2961 i
+= scnprintf(buf
+ i
, sz
- i
, "transmq: ");
2962 i
+= tipc_list_dump(&l
->transmq
, false, buf
+ i
);
2964 if (dqueues
& TIPC_DUMP_BACKLOGQ
) {
2965 i
+= scnprintf(buf
+ i
, sz
- i
,
2966 "backlogq: <%u %u %u %u %u>, ",
2967 l
->backlog
[TIPC_LOW_IMPORTANCE
].len
,
2968 l
->backlog
[TIPC_MEDIUM_IMPORTANCE
].len
,
2969 l
->backlog
[TIPC_HIGH_IMPORTANCE
].len
,
2970 l
->backlog
[TIPC_CRITICAL_IMPORTANCE
].len
,
2971 l
->backlog
[TIPC_SYSTEM_IMPORTANCE
].len
);
2972 i
+= tipc_list_dump(&l
->backlogq
, false, buf
+ i
);
2974 if (dqueues
& TIPC_DUMP_DEFERDQ
) {
2975 i
+= scnprintf(buf
+ i
, sz
- i
, "deferdq: ");
2976 i
+= tipc_list_dump(&l
->deferdq
, false, buf
+ i
);
2978 if (dqueues
& TIPC_DUMP_INPUTQ
) {
2979 i
+= scnprintf(buf
+ i
, sz
- i
, "inputq: ");
2980 i
+= tipc_list_dump(l
->inputq
, false, buf
+ i
);
2982 if (dqueues
& TIPC_DUMP_WAKEUP
) {
2983 i
+= scnprintf(buf
+ i
, sz
- i
, "wakeup: ");
2984 i
+= tipc_list_dump(&l
->wakeupq
, false, buf
+ i
);