]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - net/tipc/link.c
tipc: change reception of tunnelled duplicate packets
[mirror_ubuntu-hirsute-kernel.git] / net / tipc / link.c
CommitLineData
b97bf3fd
PL
1/*
2 * net/tipc/link.c: TIPC link code
c4307285 3 *
170b3927 4 * Copyright (c) 1996-2007, 2012-2014, Ericsson AB
198d73b8 5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
b97bf3fd
PL
6 * All rights reserved.
7 *
9ea1fd3c 8 * Redistribution and use in source and binary forms, with or without
b97bf3fd
PL
9 * modification, are permitted provided that the following conditions are met:
10 *
9ea1fd3c
PL
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
b97bf3fd 19 *
9ea1fd3c
PL
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
b97bf3fd
PL
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
b97bf3fd 38#include "link.h"
b97bf3fd 39#include "port.h"
b97bf3fd 40#include "name_distr.h"
b97bf3fd
PL
41#include "discover.h"
42#include "config.h"
b97bf3fd 43
796c75d0
YX
44#include <linux/pkt_sched.h>
45
2cf8aa19
EH
46/*
47 * Error message prefixes
48 */
49static const char *link_co_err = "Link changeover error, ";
50static const char *link_rst_msg = "Resetting link ";
51static const char *link_unk_evt = "Unknown link event ";
b97bf3fd 52
a686e685
AS
53/*
54 * Out-of-range value for link session numbers
55 */
a686e685
AS
56#define INVALID_SESSION 0x10000
57
c4307285
YH
58/*
59 * Link state events:
b97bf3fd 60 */
b97bf3fd
PL
61#define STARTING_EVT 856384768 /* link processing trigger */
62#define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */
63#define TIMEOUT_EVT 560817u /* link timer expired */
64
c4307285
YH
65/*
66 * The following two 'message types' is really just implementation
67 * data conveniently stored in the message header.
b97bf3fd
PL
68 * They must not be considered part of the protocol
69 */
70#define OPEN_MSG 0
71#define CLOSED_MSG 1
72
c4307285 73/*
b97bf3fd
PL
74 * State value stored in 'exp_msg_count'
75 */
b97bf3fd
PL
76#define START_CHANGEOVER 100000u
77
a18c4bc3 78static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
b97bf3fd 79 struct sk_buff *buf);
a18c4bc3 80static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf);
170b3927
JPM
81static int tipc_link_tunnel_rcv(struct tipc_link **l_ptr,
82 struct sk_buff **buf);
a18c4bc3 83static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance);
23dd4cce 84static int link_send_sections_long(struct tipc_port *sender,
b97bf3fd 85 struct iovec const *msg_sect,
9446b87a 86 unsigned int len, u32 destnode);
a18c4bc3
PG
87static void link_state_event(struct tipc_link *l_ptr, u32 event);
88static void link_reset_statistics(struct tipc_link *l_ptr);
89static void link_print(struct tipc_link *l_ptr, const char *str);
a18c4bc3 90static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf);
c64f7a6a
JM
91static void tipc_link_send_sync(struct tipc_link *l);
92static void tipc_link_recv_sync(struct tipc_node *n, struct sk_buff *buf);
31e3c3f6 93
b97bf3fd 94/*
05790c64 95 * Simple link routines
b97bf3fd 96 */
05790c64 97static unsigned int align(unsigned int i)
b97bf3fd
PL
98{
99 return (i + 3) & ~3u;
100}
101
a18c4bc3 102static void link_init_max_pkt(struct tipc_link *l_ptr)
b97bf3fd
PL
103{
104 u32 max_pkt;
c4307285 105
2d627b92 106 max_pkt = (l_ptr->b_ptr->mtu & ~3);
b97bf3fd
PL
107 if (max_pkt > MAX_MSG_SIZE)
108 max_pkt = MAX_MSG_SIZE;
109
c4307285 110 l_ptr->max_pkt_target = max_pkt;
b97bf3fd
PL
111 if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
112 l_ptr->max_pkt = l_ptr->max_pkt_target;
c4307285 113 else
b97bf3fd
PL
114 l_ptr->max_pkt = MAX_PKT_DEFAULT;
115
c4307285 116 l_ptr->max_pkt_probes = 0;
b97bf3fd
PL
117}
118
a18c4bc3 119static u32 link_next_sent(struct tipc_link *l_ptr)
b97bf3fd
PL
120{
121 if (l_ptr->next_out)
f905730c 122 return buf_seqno(l_ptr->next_out);
b97bf3fd
PL
123 return mod(l_ptr->next_out_no);
124}
125
a18c4bc3 126static u32 link_last_sent(struct tipc_link *l_ptr)
b97bf3fd
PL
127{
128 return mod(link_next_sent(l_ptr) - 1);
129}
130
131/*
05790c64 132 * Simple non-static link routines (i.e. referenced outside this file)
b97bf3fd 133 */
a18c4bc3 134int tipc_link_is_up(struct tipc_link *l_ptr)
b97bf3fd
PL
135{
136 if (!l_ptr)
137 return 0;
a02cec21 138 return link_working_working(l_ptr) || link_working_unknown(l_ptr);
b97bf3fd
PL
139}
140
a18c4bc3 141int tipc_link_is_active(struct tipc_link *l_ptr)
b97bf3fd 142{
a02cec21
ED
143 return (l_ptr->owner->active_links[0] == l_ptr) ||
144 (l_ptr->owner->active_links[1] == l_ptr);
b97bf3fd
PL
145}
146
b97bf3fd
PL
147/**
148 * link_timeout - handle expiration of link timer
149 * @l_ptr: pointer to link
b97bf3fd 150 */
a18c4bc3 151static void link_timeout(struct tipc_link *l_ptr)
b97bf3fd 152{
4323add6 153 tipc_node_lock(l_ptr->owner);
b97bf3fd
PL
154
155 /* update counters used in statistical profiling of send traffic */
b97bf3fd
PL
156 l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size;
157 l_ptr->stats.queue_sz_counts++;
158
b97bf3fd
PL
159 if (l_ptr->first_out) {
160 struct tipc_msg *msg = buf_msg(l_ptr->first_out);
161 u32 length = msg_size(msg);
162
f64f9e71
JP
163 if ((msg_user(msg) == MSG_FRAGMENTER) &&
164 (msg_type(msg) == FIRST_FRAGMENT)) {
b97bf3fd
PL
165 length = msg_size(msg_get_wrapped(msg));
166 }
167 if (length) {
168 l_ptr->stats.msg_lengths_total += length;
169 l_ptr->stats.msg_length_counts++;
170 if (length <= 64)
171 l_ptr->stats.msg_length_profile[0]++;
172 else if (length <= 256)
173 l_ptr->stats.msg_length_profile[1]++;
174 else if (length <= 1024)
175 l_ptr->stats.msg_length_profile[2]++;
176 else if (length <= 4096)
177 l_ptr->stats.msg_length_profile[3]++;
178 else if (length <= 16384)
179 l_ptr->stats.msg_length_profile[4]++;
180 else if (length <= 32768)
181 l_ptr->stats.msg_length_profile[5]++;
182 else
183 l_ptr->stats.msg_length_profile[6]++;
184 }
185 }
186
187 /* do all other link processing performed on a periodic basis */
b97bf3fd
PL
188
189 link_state_event(l_ptr, TIMEOUT_EVT);
190
191 if (l_ptr->next_out)
4323add6 192 tipc_link_push_queue(l_ptr);
b97bf3fd 193
4323add6 194 tipc_node_unlock(l_ptr->owner);
b97bf3fd
PL
195}
196
a18c4bc3 197static void link_set_timer(struct tipc_link *l_ptr, u32 time)
b97bf3fd
PL
198{
199 k_start_timer(&l_ptr->timer, time);
200}
201
202/**
4323add6 203 * tipc_link_create - create a new link
37b9c08a 204 * @n_ptr: pointer to associated node
b97bf3fd 205 * @b_ptr: pointer to associated bearer
b97bf3fd 206 * @media_addr: media address to use when sending messages over link
c4307285 207 *
b97bf3fd
PL
208 * Returns pointer to link.
209 */
a18c4bc3 210struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
c61dd61d
YX
211 struct tipc_bearer *b_ptr,
212 const struct tipc_media_addr *media_addr)
b97bf3fd 213{
a18c4bc3 214 struct tipc_link *l_ptr;
b97bf3fd
PL
215 struct tipc_msg *msg;
216 char *if_name;
37b9c08a
AS
217 char addr_string[16];
218 u32 peer = n_ptr->addr;
219
220 if (n_ptr->link_cnt >= 2) {
221 tipc_addr_string_fill(addr_string, n_ptr->addr);
2cf8aa19 222 pr_err("Attempt to establish third link to %s\n", addr_string);
37b9c08a
AS
223 return NULL;
224 }
225
226 if (n_ptr->links[b_ptr->identity]) {
227 tipc_addr_string_fill(addr_string, n_ptr->addr);
2cf8aa19
EH
228 pr_err("Attempt to establish second link on <%s> to %s\n",
229 b_ptr->name, addr_string);
37b9c08a
AS
230 return NULL;
231 }
b97bf3fd 232
0da974f4 233 l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
b97bf3fd 234 if (!l_ptr) {
2cf8aa19 235 pr_warn("Link creation failed, no memory\n");
b97bf3fd
PL
236 return NULL;
237 }
b97bf3fd
PL
238
239 l_ptr->addr = peer;
2d627b92 240 if_name = strchr(b_ptr->name, ':') + 1;
062b4c99 241 sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
b97bf3fd 242 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
c4307285 243 tipc_node(tipc_own_addr),
b97bf3fd
PL
244 if_name,
245 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
062b4c99 246 /* note: peer i/f name is updated by reset/activate message */
b97bf3fd 247 memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
37b9c08a 248 l_ptr->owner = n_ptr;
b97bf3fd 249 l_ptr->checkpoint = 1;
f882cb76 250 l_ptr->peer_session = INVALID_SESSION;
b97bf3fd 251 l_ptr->b_ptr = b_ptr;
5c216e1d 252 link_set_supervision_props(l_ptr, b_ptr->tolerance);
b97bf3fd
PL
253 l_ptr->state = RESET_UNKNOWN;
254
255 l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
256 msg = l_ptr->pmsg;
c68ca7b7 257 tipc_msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr);
b97bf3fd 258 msg_set_size(msg, sizeof(l_ptr->proto_msg));
a686e685 259 msg_set_session(msg, (tipc_random & 0xffff));
b97bf3fd
PL
260 msg_set_bearer_id(msg, b_ptr->identity);
261 strcpy((char *)msg_data(msg), if_name);
262
263 l_ptr->priority = b_ptr->priority;
5c216e1d 264 tipc_link_set_queue_limits(l_ptr, b_ptr->window);
b97bf3fd
PL
265
266 link_init_max_pkt(l_ptr);
267
268 l_ptr->next_out_no = 1;
269 INIT_LIST_HEAD(&l_ptr->waiting_ports);
270
271 link_reset_statistics(l_ptr);
272
37b9c08a 273 tipc_node_attach_link(n_ptr, l_ptr);
b97bf3fd 274
170b3927
JPM
275 k_init_timer(&l_ptr->timer, (Handler)link_timeout,
276 (unsigned long)l_ptr);
581465fa
JPM
277
278 link_state_event(l_ptr, STARTING_EVT);
b97bf3fd 279
b97bf3fd
PL
280 return l_ptr;
281}
282
b97bf3fd 283
c61dd61d 284void tipc_link_delete_list(unsigned int bearer_id)
8d8439b6
YX
285{
286 struct tipc_link *l_ptr;
c61dd61d 287 struct tipc_node *n_ptr;
8d8439b6 288
c61dd61d
YX
289 list_for_each_entry(n_ptr, &tipc_node_list, list) {
290 spin_lock_bh(&n_ptr->lock);
291 l_ptr = n_ptr->links[bearer_id];
292 if (l_ptr) {
293 tipc_link_reset(l_ptr);
294 tipc_node_detach_link(n_ptr, l_ptr);
295 spin_unlock_bh(&n_ptr->lock);
296
297 /* Nobody else can access this link now: */
298 del_timer_sync(&l_ptr->timer);
299 kfree(l_ptr);
300 continue;
301 }
302 spin_unlock_bh(&n_ptr->lock);
8d8439b6
YX
303 }
304}
b97bf3fd
PL
305
306/**
c4307285 307 * link_schedule_port - schedule port for deferred sending
b97bf3fd
PL
308 * @l_ptr: pointer to link
309 * @origport: reference to sending port
310 * @sz: amount of data to be sent
c4307285
YH
311 *
312 * Schedules port for renewed sending of messages after link congestion
b97bf3fd
PL
313 * has abated.
314 */
a18c4bc3 315static int link_schedule_port(struct tipc_link *l_ptr, u32 origport, u32 sz)
b97bf3fd 316{
23dd4cce 317 struct tipc_port *p_ptr;
b97bf3fd 318
4323add6
PL
319 spin_lock_bh(&tipc_port_list_lock);
320 p_ptr = tipc_port_lock(origport);
b97bf3fd
PL
321 if (p_ptr) {
322 if (!p_ptr->wakeup)
323 goto exit;
324 if (!list_empty(&p_ptr->wait_list))
325 goto exit;
23dd4cce 326 p_ptr->congested = 1;
15e979da 327 p_ptr->waiting_pkts = 1 + ((sz - 1) / l_ptr->max_pkt);
b97bf3fd
PL
328 list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports);
329 l_ptr->stats.link_congs++;
330exit:
4323add6 331 tipc_port_unlock(p_ptr);
b97bf3fd 332 }
4323add6 333 spin_unlock_bh(&tipc_port_list_lock);
b97bf3fd
PL
334 return -ELINKCONG;
335}
336
a18c4bc3 337void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all)
b97bf3fd 338{
23dd4cce
AS
339 struct tipc_port *p_ptr;
340 struct tipc_port *temp_p_ptr;
b97bf3fd
PL
341 int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size;
342
343 if (all)
344 win = 100000;
345 if (win <= 0)
346 return;
4323add6 347 if (!spin_trylock_bh(&tipc_port_list_lock))
b97bf3fd
PL
348 return;
349 if (link_congested(l_ptr))
350 goto exit;
c4307285 351 list_for_each_entry_safe(p_ptr, temp_p_ptr, &l_ptr->waiting_ports,
b97bf3fd
PL
352 wait_list) {
353 if (win <= 0)
354 break;
355 list_del_init(&p_ptr->wait_list);
23dd4cce
AS
356 spin_lock_bh(p_ptr->lock);
357 p_ptr->congested = 0;
358 p_ptr->wakeup(p_ptr);
b97bf3fd 359 win -= p_ptr->waiting_pkts;
23dd4cce 360 spin_unlock_bh(p_ptr->lock);
b97bf3fd
PL
361 }
362
363exit:
4323add6 364 spin_unlock_bh(&tipc_port_list_lock);
b97bf3fd
PL
365}
366
c4307285 367/**
b97bf3fd
PL
368 * link_release_outqueue - purge link's outbound message queue
369 * @l_ptr: pointer to link
370 */
a18c4bc3 371static void link_release_outqueue(struct tipc_link *l_ptr)
b97bf3fd 372{
d77b3831 373 kfree_skb_list(l_ptr->first_out);
b97bf3fd
PL
374 l_ptr->first_out = NULL;
375 l_ptr->out_queue_size = 0;
376}
377
378/**
4323add6 379 * tipc_link_reset_fragments - purge link's inbound message fragments queue
b97bf3fd
PL
380 * @l_ptr: pointer to link
381 */
a18c4bc3 382void tipc_link_reset_fragments(struct tipc_link *l_ptr)
b97bf3fd 383{
40ba3cdf
EH
384 kfree_skb(l_ptr->reasm_head);
385 l_ptr->reasm_head = NULL;
386 l_ptr->reasm_tail = NULL;
b97bf3fd
PL
387}
388
c4307285 389/**
581465fa 390 * tipc_link_purge_queues - purge all pkt queues associated with link
b97bf3fd
PL
391 * @l_ptr: pointer to link
392 */
581465fa 393void tipc_link_purge_queues(struct tipc_link *l_ptr)
b97bf3fd 394{
d77b3831
YX
395 kfree_skb_list(l_ptr->oldest_deferred_in);
396 kfree_skb_list(l_ptr->first_out);
4323add6 397 tipc_link_reset_fragments(l_ptr);
5f6d9123 398 kfree_skb(l_ptr->proto_msg_queue);
b97bf3fd
PL
399 l_ptr->proto_msg_queue = NULL;
400}
401
a18c4bc3 402void tipc_link_reset(struct tipc_link *l_ptr)
b97bf3fd 403{
b97bf3fd
PL
404 u32 prev_state = l_ptr->state;
405 u32 checkpoint = l_ptr->next_in_no;
5392d646 406 int was_active_link = tipc_link_is_active(l_ptr);
c4307285 407
a686e685 408 msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
b97bf3fd 409
a686e685
AS
410 /* Link is down, accept any session */
411 l_ptr->peer_session = INVALID_SESSION;
b97bf3fd 412
c4307285 413 /* Prepare for max packet size negotiation */
b97bf3fd 414 link_init_max_pkt(l_ptr);
c4307285 415
b97bf3fd 416 l_ptr->state = RESET_UNKNOWN;
b97bf3fd
PL
417
418 if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
419 return;
420
4323add6
PL
421 tipc_node_link_down(l_ptr->owner, l_ptr);
422 tipc_bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr);
7368ddf1 423
b9d4c339 424 if (was_active_link && tipc_node_active_links(l_ptr->owner)) {
b97bf3fd
PL
425 l_ptr->reset_checkpoint = checkpoint;
426 l_ptr->exp_msg_count = START_CHANGEOVER;
427 }
428
429 /* Clean up all queues: */
b97bf3fd 430 link_release_outqueue(l_ptr);
5f6d9123 431 kfree_skb(l_ptr->proto_msg_queue);
b97bf3fd 432 l_ptr->proto_msg_queue = NULL;
d77b3831 433 kfree_skb_list(l_ptr->oldest_deferred_in);
b97bf3fd 434 if (!list_empty(&l_ptr->waiting_ports))
4323add6 435 tipc_link_wakeup_ports(l_ptr, 1);
b97bf3fd
PL
436
437 l_ptr->retransm_queue_head = 0;
438 l_ptr->retransm_queue_size = 0;
439 l_ptr->last_out = NULL;
440 l_ptr->first_out = NULL;
441 l_ptr->next_out = NULL;
442 l_ptr->unacked_window = 0;
443 l_ptr->checkpoint = 1;
444 l_ptr->next_out_no = 1;
445 l_ptr->deferred_inqueue_sz = 0;
446 l_ptr->oldest_deferred_in = NULL;
447 l_ptr->newest_deferred_in = NULL;
448 l_ptr->fsm_msg_cnt = 0;
449 l_ptr->stale_count = 0;
450 link_reset_statistics(l_ptr);
b97bf3fd
PL
451}
452
c61dd61d 453void tipc_link_reset_list(unsigned int bearer_id)
e0ca2c30
YX
454{
455 struct tipc_link *l_ptr;
c61dd61d 456 struct tipc_node *n_ptr;
e0ca2c30 457
c61dd61d 458 list_for_each_entry(n_ptr, &tipc_node_list, list) {
e0ca2c30 459 spin_lock_bh(&n_ptr->lock);
c61dd61d
YX
460 l_ptr = n_ptr->links[bearer_id];
461 if (l_ptr)
462 tipc_link_reset(l_ptr);
e0ca2c30
YX
463 spin_unlock_bh(&n_ptr->lock);
464 }
465}
b97bf3fd 466
a18c4bc3 467static void link_activate(struct tipc_link *l_ptr)
b97bf3fd 468{
5392d646 469 l_ptr->next_in_no = l_ptr->stats.recv_info = 1;
4323add6
PL
470 tipc_node_link_up(l_ptr->owner, l_ptr);
471 tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr);
b97bf3fd
PL
472}
473
474/**
475 * link_state_event - link finite state machine
476 * @l_ptr: pointer to link
477 * @event: state machine event to process
478 */
95c96174 479static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
b97bf3fd 480{
a18c4bc3 481 struct tipc_link *other;
b97bf3fd
PL
482 u32 cont_intv = l_ptr->continuity_interval;
483
135daee6 484 if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT))
b97bf3fd
PL
485 return; /* Not yet. */
486
77a7e07a
YX
487 /* Check whether changeover is going on */
488 if (l_ptr->exp_msg_count) {
a016892c 489 if (event == TIMEOUT_EVT)
b97bf3fd 490 link_set_timer(l_ptr, cont_intv);
77a7e07a 491 return;
b97bf3fd 492 }
b97bf3fd
PL
493
494 switch (l_ptr->state) {
495 case WORKING_WORKING:
b97bf3fd
PL
496 switch (event) {
497 case TRAFFIC_MSG_EVT:
b97bf3fd 498 case ACTIVATE_MSG:
b97bf3fd
PL
499 break;
500 case TIMEOUT_EVT:
b97bf3fd
PL
501 if (l_ptr->next_in_no != l_ptr->checkpoint) {
502 l_ptr->checkpoint = l_ptr->next_in_no;
4323add6 503 if (tipc_bclink_acks_missing(l_ptr->owner)) {
c4307285 504 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
4323add6 505 0, 0, 0, 0, 0);
b97bf3fd
PL
506 l_ptr->fsm_msg_cnt++;
507 } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
c4307285 508 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
4323add6 509 1, 0, 0, 0, 0);
b97bf3fd
PL
510 l_ptr->fsm_msg_cnt++;
511 }
512 link_set_timer(l_ptr, cont_intv);
513 break;
514 }
b97bf3fd
PL
515 l_ptr->state = WORKING_UNKNOWN;
516 l_ptr->fsm_msg_cnt = 0;
4323add6 517 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
b97bf3fd
PL
518 l_ptr->fsm_msg_cnt++;
519 link_set_timer(l_ptr, cont_intv / 4);
520 break;
521 case RESET_MSG:
2cf8aa19
EH
522 pr_info("%s<%s>, requested by peer\n", link_rst_msg,
523 l_ptr->name);
4323add6 524 tipc_link_reset(l_ptr);
b97bf3fd
PL
525 l_ptr->state = RESET_RESET;
526 l_ptr->fsm_msg_cnt = 0;
4323add6 527 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
b97bf3fd
PL
528 l_ptr->fsm_msg_cnt++;
529 link_set_timer(l_ptr, cont_intv);
530 break;
531 default:
2cf8aa19 532 pr_err("%s%u in WW state\n", link_unk_evt, event);
b97bf3fd
PL
533 }
534 break;
535 case WORKING_UNKNOWN:
b97bf3fd
PL
536 switch (event) {
537 case TRAFFIC_MSG_EVT:
b97bf3fd 538 case ACTIVATE_MSG:
b97bf3fd
PL
539 l_ptr->state = WORKING_WORKING;
540 l_ptr->fsm_msg_cnt = 0;
541 link_set_timer(l_ptr, cont_intv);
542 break;
543 case RESET_MSG:
2cf8aa19
EH
544 pr_info("%s<%s>, requested by peer while probing\n",
545 link_rst_msg, l_ptr->name);
4323add6 546 tipc_link_reset(l_ptr);
b97bf3fd
PL
547 l_ptr->state = RESET_RESET;
548 l_ptr->fsm_msg_cnt = 0;
4323add6 549 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
b97bf3fd
PL
550 l_ptr->fsm_msg_cnt++;
551 link_set_timer(l_ptr, cont_intv);
552 break;
553 case TIMEOUT_EVT:
b97bf3fd 554 if (l_ptr->next_in_no != l_ptr->checkpoint) {
b97bf3fd
PL
555 l_ptr->state = WORKING_WORKING;
556 l_ptr->fsm_msg_cnt = 0;
557 l_ptr->checkpoint = l_ptr->next_in_no;
4323add6
PL
558 if (tipc_bclink_acks_missing(l_ptr->owner)) {
559 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
560 0, 0, 0, 0, 0);
b97bf3fd
PL
561 l_ptr->fsm_msg_cnt++;
562 }
563 link_set_timer(l_ptr, cont_intv);
564 } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
c4307285 565 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
4323add6 566 1, 0, 0, 0, 0);
b97bf3fd
PL
567 l_ptr->fsm_msg_cnt++;
568 link_set_timer(l_ptr, cont_intv / 4);
569 } else { /* Link has failed */
2cf8aa19
EH
570 pr_warn("%s<%s>, peer not responding\n",
571 link_rst_msg, l_ptr->name);
4323add6 572 tipc_link_reset(l_ptr);
b97bf3fd
PL
573 l_ptr->state = RESET_UNKNOWN;
574 l_ptr->fsm_msg_cnt = 0;
4323add6
PL
575 tipc_link_send_proto_msg(l_ptr, RESET_MSG,
576 0, 0, 0, 0, 0);
b97bf3fd
PL
577 l_ptr->fsm_msg_cnt++;
578 link_set_timer(l_ptr, cont_intv);
579 }
580 break;
581 default:
2cf8aa19 582 pr_err("%s%u in WU state\n", link_unk_evt, event);
b97bf3fd
PL
583 }
584 break;
585 case RESET_UNKNOWN:
b97bf3fd
PL
586 switch (event) {
587 case TRAFFIC_MSG_EVT:
b97bf3fd
PL
588 break;
589 case ACTIVATE_MSG:
590 other = l_ptr->owner->active_links[0];
8d64a5ba 591 if (other && link_working_unknown(other))
b97bf3fd 592 break;
b97bf3fd
PL
593 l_ptr->state = WORKING_WORKING;
594 l_ptr->fsm_msg_cnt = 0;
595 link_activate(l_ptr);
4323add6 596 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
b97bf3fd 597 l_ptr->fsm_msg_cnt++;
c64f7a6a
JM
598 if (l_ptr->owner->working_links == 1)
599 tipc_link_send_sync(l_ptr);
b97bf3fd
PL
600 link_set_timer(l_ptr, cont_intv);
601 break;
602 case RESET_MSG:
b97bf3fd
PL
603 l_ptr->state = RESET_RESET;
604 l_ptr->fsm_msg_cnt = 0;
4323add6 605 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0);
b97bf3fd
PL
606 l_ptr->fsm_msg_cnt++;
607 link_set_timer(l_ptr, cont_intv);
608 break;
609 case STARTING_EVT:
135daee6 610 l_ptr->flags |= LINK_STARTED;
b97bf3fd
PL
611 /* fall through */
612 case TIMEOUT_EVT:
4323add6 613 tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
b97bf3fd
PL
614 l_ptr->fsm_msg_cnt++;
615 link_set_timer(l_ptr, cont_intv);
616 break;
617 default:
2cf8aa19 618 pr_err("%s%u in RU state\n", link_unk_evt, event);
b97bf3fd
PL
619 }
620 break;
621 case RESET_RESET:
b97bf3fd
PL
622 switch (event) {
623 case TRAFFIC_MSG_EVT:
b97bf3fd
PL
624 case ACTIVATE_MSG:
625 other = l_ptr->owner->active_links[0];
8d64a5ba 626 if (other && link_working_unknown(other))
b97bf3fd 627 break;
b97bf3fd
PL
628 l_ptr->state = WORKING_WORKING;
629 l_ptr->fsm_msg_cnt = 0;
630 link_activate(l_ptr);
4323add6 631 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
b97bf3fd 632 l_ptr->fsm_msg_cnt++;
c64f7a6a
JM
633 if (l_ptr->owner->working_links == 1)
634 tipc_link_send_sync(l_ptr);
b97bf3fd
PL
635 link_set_timer(l_ptr, cont_intv);
636 break;
637 case RESET_MSG:
b97bf3fd
PL
638 break;
639 case TIMEOUT_EVT:
4323add6 640 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
b97bf3fd
PL
641 l_ptr->fsm_msg_cnt++;
642 link_set_timer(l_ptr, cont_intv);
b97bf3fd
PL
643 break;
644 default:
2cf8aa19 645 pr_err("%s%u in RR state\n", link_unk_evt, event);
b97bf3fd
PL
646 }
647 break;
648 default:
2cf8aa19 649 pr_err("Unknown link state %u/%u\n", l_ptr->state, event);
b97bf3fd
PL
650 }
651}
652
653/*
654 * link_bundle_buf(): Append contents of a buffer to
c4307285 655 * the tail of an existing one.
b97bf3fd 656 */
ae8509c4 657static int link_bundle_buf(struct tipc_link *l_ptr, struct sk_buff *bundler,
b97bf3fd
PL
658 struct sk_buff *buf)
659{
660 struct tipc_msg *bundler_msg = buf_msg(bundler);
661 struct tipc_msg *msg = buf_msg(buf);
662 u32 size = msg_size(msg);
e49060c7
AS
663 u32 bundle_size = msg_size(bundler_msg);
664 u32 to_pos = align(bundle_size);
665 u32 pad = to_pos - bundle_size;
b97bf3fd
PL
666
667 if (msg_user(bundler_msg) != MSG_BUNDLER)
668 return 0;
669 if (msg_type(bundler_msg) != OPEN_MSG)
670 return 0;
e49060c7 671 if (skb_tailroom(bundler) < (pad + size))
b97bf3fd 672 return 0;
15e979da 673 if (l_ptr->max_pkt < (to_pos + size))
863fae66 674 return 0;
b97bf3fd 675
e49060c7 676 skb_put(bundler, pad + size);
27d7ff46 677 skb_copy_to_linear_data_offset(bundler, to_pos, buf->data, size);
b97bf3fd
PL
678 msg_set_size(bundler_msg, to_pos + size);
679 msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1);
5f6d9123 680 kfree_skb(buf);
b97bf3fd
PL
681 l_ptr->stats.sent_bundled++;
682 return 1;
683}
684
a18c4bc3 685static void link_add_to_outqueue(struct tipc_link *l_ptr,
05790c64
SR
686 struct sk_buff *buf,
687 struct tipc_msg *msg)
b97bf3fd
PL
688{
689 u32 ack = mod(l_ptr->next_in_no - 1);
690 u32 seqno = mod(l_ptr->next_out_no++);
691
692 msg_set_word(msg, 2, ((ack << 16) | seqno));
693 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
694 buf->next = NULL;
695 if (l_ptr->first_out) {
696 l_ptr->last_out->next = buf;
697 l_ptr->last_out = buf;
698 } else
699 l_ptr->first_out = l_ptr->last_out = buf;
9bd80b60 700
b97bf3fd 701 l_ptr->out_queue_size++;
9bd80b60
AS
702 if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz)
703 l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
b97bf3fd
PL
704}
705
a18c4bc3 706static void link_add_chain_to_outqueue(struct tipc_link *l_ptr,
dc63d91e
AS
707 struct sk_buff *buf_chain,
708 u32 long_msgno)
709{
710 struct sk_buff *buf;
711 struct tipc_msg *msg;
712
713 if (!l_ptr->next_out)
714 l_ptr->next_out = buf_chain;
715 while (buf_chain) {
716 buf = buf_chain;
717 buf_chain = buf_chain->next;
718
719 msg = buf_msg(buf);
720 msg_set_long_msgno(msg, long_msgno);
721 link_add_to_outqueue(l_ptr, buf, msg);
722 }
723}
724
c4307285
YH
725/*
726 * tipc_link_send_buf() is the 'full path' for messages, called from
b97bf3fd
PL
727 * inside TIPC when the 'fast path' in tipc_send_buf
728 * has failed, and from link_send()
729 */
a18c4bc3 730int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
b97bf3fd
PL
731{
732 struct tipc_msg *msg = buf_msg(buf);
733 u32 size = msg_size(msg);
734 u32 dsz = msg_data_sz(msg);
735 u32 queue_size = l_ptr->out_queue_size;
c68ca7b7 736 u32 imp = tipc_msg_tot_importance(msg);
b97bf3fd 737 u32 queue_limit = l_ptr->queue_limit[imp];
15e979da 738 u32 max_packet = l_ptr->max_pkt;
b97bf3fd 739
b97bf3fd 740 /* Match msg importance against queue limits: */
b97bf3fd
PL
741 if (unlikely(queue_size >= queue_limit)) {
742 if (imp <= TIPC_CRITICAL_IMPORTANCE) {
bebc55ae 743 link_schedule_port(l_ptr, msg_origport(msg), size);
5f6d9123 744 kfree_skb(buf);
bebc55ae 745 return -ELINKCONG;
b97bf3fd 746 }
5f6d9123 747 kfree_skb(buf);
b97bf3fd 748 if (imp > CONN_MANAGER) {
2cf8aa19
EH
749 pr_warn("%s<%s>, send queue full", link_rst_msg,
750 l_ptr->name);
4323add6 751 tipc_link_reset(l_ptr);
b97bf3fd
PL
752 }
753 return dsz;
754 }
755
756 /* Fragmentation needed ? */
b97bf3fd 757 if (size > max_packet)
31e3c3f6 758 return link_send_long_buf(l_ptr, buf);
b97bf3fd 759
617d3c7a 760 /* Packet can be queued or sent. */
512137ee 761 if (likely(!link_congested(l_ptr))) {
b97bf3fd
PL
762 link_add_to_outqueue(l_ptr, buf, msg);
763
3c294cb3
YX
764 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
765 l_ptr->unacked_window = 0;
b97bf3fd
PL
766 return dsz;
767 }
617d3c7a 768 /* Congestion: can message be bundled ? */
b97bf3fd
PL
769 if ((msg_user(msg) != CHANGEOVER_PROTOCOL) &&
770 (msg_user(msg) != MSG_FRAGMENTER)) {
771
772 /* Try adding message to an existing bundle */
c4307285 773 if (l_ptr->next_out &&
3c294cb3 774 link_bundle_buf(l_ptr, l_ptr->last_out, buf))
b97bf3fd 775 return dsz;
b97bf3fd
PL
776
777 /* Try creating a new bundle */
b97bf3fd 778 if (size <= max_packet * 2 / 3) {
31e3c3f6 779 struct sk_buff *bundler = tipc_buf_acquire(max_packet);
b97bf3fd
PL
780 struct tipc_msg bundler_hdr;
781
782 if (bundler) {
c68ca7b7 783 tipc_msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG,
75715217 784 INT_H_SIZE, l_ptr->addr);
27d7ff46
ACM
785 skb_copy_to_linear_data(bundler, &bundler_hdr,
786 INT_H_SIZE);
b97bf3fd
PL
787 skb_trim(bundler, INT_H_SIZE);
788 link_bundle_buf(l_ptr, bundler, buf);
789 buf = bundler;
790 msg = buf_msg(buf);
791 l_ptr->stats.sent_bundles++;
792 }
793 }
794 }
795 if (!l_ptr->next_out)
796 l_ptr->next_out = buf;
797 link_add_to_outqueue(l_ptr, buf, msg);
b97bf3fd
PL
798 return dsz;
799}
800
c4307285
YH
801/*
802 * tipc_link_send(): same as tipc_link_send_buf(), but the link to use has
b97bf3fd
PL
803 * not been selected yet, and the the owner node is not locked
804 * Called by TIPC internal users, e.g. the name distributor
805 */
4323add6 806int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
b97bf3fd 807{
a18c4bc3 808 struct tipc_link *l_ptr;
6c00055a 809 struct tipc_node *n_ptr;
b97bf3fd
PL
810 int res = -ELINKCONG;
811
4323add6 812 read_lock_bh(&tipc_net_lock);
51a8e4de 813 n_ptr = tipc_node_find(dest);
b97bf3fd 814 if (n_ptr) {
4323add6 815 tipc_node_lock(n_ptr);
b97bf3fd 816 l_ptr = n_ptr->active_links[selector & 1];
a016892c 817 if (l_ptr)
4323add6 818 res = tipc_link_send_buf(l_ptr, buf);
a016892c 819 else
5f6d9123 820 kfree_skb(buf);
4323add6 821 tipc_node_unlock(n_ptr);
b97bf3fd 822 } else {
5f6d9123 823 kfree_skb(buf);
b97bf3fd 824 }
4323add6 825 read_unlock_bh(&tipc_net_lock);
b97bf3fd
PL
826 return res;
827}
828
c64f7a6a
JM
829/*
830 * tipc_link_send_sync - synchronize broadcast link endpoints.
831 *
832 * Give a newly added peer node the sequence number where it should
833 * start receiving and acking broadcast packets.
834 *
835 * Called with node locked
836 */
837static void tipc_link_send_sync(struct tipc_link *l)
838{
839 struct sk_buff *buf;
840 struct tipc_msg *msg;
841
842 buf = tipc_buf_acquire(INT_H_SIZE);
843 if (!buf)
844 return;
845
846 msg = buf_msg(buf);
847 tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, l->addr);
848 msg_set_last_bcast(msg, l->owner->bclink.acked);
849 link_add_chain_to_outqueue(l, buf, 0);
850 tipc_link_push_queue(l);
851}
852
853/*
854 * tipc_link_recv_sync - synchronize broadcast link endpoints.
855 * Receive the sequence number where we should start receiving and
856 * acking broadcast packets from a newly added peer node, and open
857 * up for reception of such packets.
858 *
859 * Called with node locked
860 */
861static void tipc_link_recv_sync(struct tipc_node *n, struct sk_buff *buf)
862{
863 struct tipc_msg *msg = buf_msg(buf);
864
865 n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg);
866 n->bclink.recv_permitted = true;
867 kfree_skb(buf);
868}
869
870/*
9aa88c2a
AS
871 * tipc_link_send_names - send name table entries to new neighbor
872 *
873 * Send routine for bulk delivery of name table messages when contact
874 * with a new neighbor occurs. No link congestion checking is performed
875 * because name table messages *must* be delivered. The messages must be
876 * small enough not to require fragmentation.
877 * Called without any locks held.
878 */
9aa88c2a
AS
879void tipc_link_send_names(struct list_head *message_list, u32 dest)
880{
881 struct tipc_node *n_ptr;
a18c4bc3 882 struct tipc_link *l_ptr;
9aa88c2a
AS
883 struct sk_buff *buf;
884 struct sk_buff *temp_buf;
885
886 if (list_empty(message_list))
887 return;
888
889 read_lock_bh(&tipc_net_lock);
890 n_ptr = tipc_node_find(dest);
891 if (n_ptr) {
892 tipc_node_lock(n_ptr);
893 l_ptr = n_ptr->active_links[0];
894 if (l_ptr) {
895 /* convert circular list to linear list */
896 ((struct sk_buff *)message_list->prev)->next = NULL;
897 link_add_chain_to_outqueue(l_ptr,
898 (struct sk_buff *)message_list->next, 0);
899 tipc_link_push_queue(l_ptr);
900 INIT_LIST_HEAD(message_list);
901 }
902 tipc_node_unlock(n_ptr);
903 }
904 read_unlock_bh(&tipc_net_lock);
905
906 /* discard the messages if they couldn't be sent */
9aa88c2a
AS
907 list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) {
908 list_del((struct list_head *)buf);
5f6d9123 909 kfree_skb(buf);
9aa88c2a
AS
910 }
911}
912
c4307285
YH
913/*
914 * link_send_buf_fast: Entry for data messages where the
b97bf3fd
PL
915 * destination link is known and the header is complete,
916 * inclusive total message length. Very time critical.
917 * Link is locked. Returns user data length.
918 */
a18c4bc3 919static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf,
05790c64 920 u32 *used_max_pkt)
b97bf3fd
PL
921{
922 struct tipc_msg *msg = buf_msg(buf);
923 int res = msg_data_sz(msg);
924
925 if (likely(!link_congested(l_ptr))) {
15e979da 926 if (likely(msg_size(msg) <= l_ptr->max_pkt)) {
512137ee
EH
927 link_add_to_outqueue(l_ptr, buf, msg);
928 tipc_bearer_send(l_ptr->b_ptr, buf,
929 &l_ptr->media_addr);
930 l_ptr->unacked_window = 0;
931 return res;
932 }
933 else
15e979da 934 *used_max_pkt = l_ptr->max_pkt;
b97bf3fd 935 }
4323add6 936 return tipc_link_send_buf(l_ptr, buf); /* All other cases */
b97bf3fd
PL
937}
938
c4307285
YH
939/*
940 * tipc_link_send_sections_fast: Entry for messages where the
b97bf3fd 941 * destination processor is known and the header is complete,
c4307285 942 * except for total message length.
b97bf3fd
PL
943 * Returns user data length or errno.
944 */
23dd4cce 945int tipc_link_send_sections_fast(struct tipc_port *sender,
4323add6 946 struct iovec const *msg_sect,
9446b87a 947 unsigned int len, u32 destaddr)
b97bf3fd 948{
23dd4cce 949 struct tipc_msg *hdr = &sender->phdr;
a18c4bc3 950 struct tipc_link *l_ptr;
b97bf3fd 951 struct sk_buff *buf;
6c00055a 952 struct tipc_node *node;
b97bf3fd
PL
953 int res;
954 u32 selector = msg_origport(hdr) & 1;
955
b97bf3fd
PL
956again:
957 /*
958 * Try building message using port's max_pkt hint.
959 * (Must not hold any locks while building message.)
960 */
9446b87a 961 res = tipc_msg_build(hdr, msg_sect, len, sender->max_pkt, &buf);
7410f967
YX
962 /* Exit if build request was invalid */
963 if (unlikely(res < 0))
964 return res;
b97bf3fd 965
4323add6 966 read_lock_bh(&tipc_net_lock);
51a8e4de 967 node = tipc_node_find(destaddr);
b97bf3fd 968 if (likely(node)) {
4323add6 969 tipc_node_lock(node);
b97bf3fd
PL
970 l_ptr = node->active_links[selector];
971 if (likely(l_ptr)) {
972 if (likely(buf)) {
973 res = link_send_buf_fast(l_ptr, buf,
23dd4cce 974 &sender->max_pkt);
b97bf3fd 975exit:
4323add6
PL
976 tipc_node_unlock(node);
977 read_unlock_bh(&tipc_net_lock);
b97bf3fd
PL
978 return res;
979 }
980
b97bf3fd 981 /* Exit if link (or bearer) is congested */
512137ee 982 if (link_congested(l_ptr)) {
b97bf3fd 983 res = link_schedule_port(l_ptr,
23dd4cce 984 sender->ref, res);
b97bf3fd
PL
985 goto exit;
986 }
987
c4307285 988 /*
b97bf3fd
PL
989 * Message size exceeds max_pkt hint; update hint,
990 * then re-try fast path or fragment the message
991 */
23dd4cce 992 sender->max_pkt = l_ptr->max_pkt;
4323add6
PL
993 tipc_node_unlock(node);
994 read_unlock_bh(&tipc_net_lock);
b97bf3fd
PL
995
996
23dd4cce 997 if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
b97bf3fd
PL
998 goto again;
999
9446b87a 1000 return link_send_sections_long(sender, msg_sect, len,
26896904 1001 destaddr);
b97bf3fd 1002 }
4323add6 1003 tipc_node_unlock(node);
b97bf3fd 1004 }
4323add6 1005 read_unlock_bh(&tipc_net_lock);
b97bf3fd
PL
1006
1007 /* Couldn't find a link to the destination node */
b97bf3fd
PL
1008 if (buf)
1009 return tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
1010 if (res >= 0)
9446b87a
YX
1011 return tipc_port_reject_sections(sender, hdr, msg_sect,
1012 len, TIPC_ERR_NO_NODE);
b97bf3fd
PL
1013 return res;
1014}
1015
c4307285
YH
1016/*
1017 * link_send_sections_long(): Entry for long messages where the
b97bf3fd 1018 * destination node is known and the header is complete,
c4307285 1019 * inclusive total message length.
b97bf3fd
PL
1020 * Link and bearer congestion status have been checked to be ok,
1021 * and are ignored if they change.
1022 *
1023 * Note that fragments do not use the full link MTU so that they won't have
1024 * to undergo refragmentation if link changeover causes them to be sent
1025 * over another link with an additional tunnel header added as prefix.
1026 * (Refragmentation will still occur if the other link has a smaller MTU.)
1027 *
1028 * Returns user data length or errno.
1029 */
23dd4cce 1030static int link_send_sections_long(struct tipc_port *sender,
b97bf3fd 1031 struct iovec const *msg_sect,
9446b87a 1032 unsigned int len, u32 destaddr)
b97bf3fd 1033{
a18c4bc3 1034 struct tipc_link *l_ptr;
6c00055a 1035 struct tipc_node *node;
23dd4cce 1036 struct tipc_msg *hdr = &sender->phdr;
9446b87a 1037 u32 dsz = len;
0e65967e 1038 u32 max_pkt, fragm_sz, rest;
b97bf3fd 1039 struct tipc_msg fragm_hdr;
0e65967e
AS
1040 struct sk_buff *buf, *buf_chain, *prev;
1041 u32 fragm_crs, fragm_rest, hsz, sect_rest;
40682432 1042 const unchar __user *sect_crs;
b97bf3fd
PL
1043 int curr_sect;
1044 u32 fragm_no;
126c0524 1045 int res = 0;
b97bf3fd
PL
1046
1047again:
1048 fragm_no = 1;
23dd4cce 1049 max_pkt = sender->max_pkt - INT_H_SIZE;
b97bf3fd 1050 /* leave room for tunnel header in case of link changeover */
c4307285 1051 fragm_sz = max_pkt - INT_H_SIZE;
b97bf3fd
PL
1052 /* leave room for fragmentation header in each fragment */
1053 rest = dsz;
1054 fragm_crs = 0;
1055 fragm_rest = 0;
1056 sect_rest = 0;
1fc54d8f 1057 sect_crs = NULL;
b97bf3fd
PL
1058 curr_sect = -1;
1059
617d3c7a 1060 /* Prepare reusable fragment header */
c68ca7b7 1061 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
75715217 1062 INT_H_SIZE, msg_destnode(hdr));
b97bf3fd
PL
1063 msg_set_size(&fragm_hdr, max_pkt);
1064 msg_set_fragm_no(&fragm_hdr, 1);
1065
617d3c7a 1066 /* Prepare header of first fragment */
31e3c3f6 1067 buf_chain = buf = tipc_buf_acquire(max_pkt);
b97bf3fd
PL
1068 if (!buf)
1069 return -ENOMEM;
1070 buf->next = NULL;
27d7ff46 1071 skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE);
b97bf3fd 1072 hsz = msg_hdr_sz(hdr);
27d7ff46 1073 skb_copy_to_linear_data_offset(buf, INT_H_SIZE, hdr, hsz);
b97bf3fd 1074
617d3c7a 1075 /* Chop up message */
b97bf3fd
PL
1076 fragm_crs = INT_H_SIZE + hsz;
1077 fragm_rest = fragm_sz - hsz;
1078
1079 do { /* For all sections */
1080 u32 sz;
1081
1082 if (!sect_rest) {
1083 sect_rest = msg_sect[++curr_sect].iov_len;
40682432 1084 sect_crs = msg_sect[curr_sect].iov_base;
b97bf3fd
PL
1085 }
1086
1087 if (sect_rest < fragm_rest)
1088 sz = sect_rest;
1089 else
1090 sz = fragm_rest;
1091
f1733d75 1092 if (copy_from_user(buf->data + fragm_crs, sect_crs, sz)) {
126c0524 1093 res = -EFAULT;
b97bf3fd 1094error:
d77b3831 1095 kfree_skb_list(buf_chain);
126c0524 1096 return res;
f1733d75 1097 }
b97bf3fd
PL
1098 sect_crs += sz;
1099 sect_rest -= sz;
1100 fragm_crs += sz;
1101 fragm_rest -= sz;
1102 rest -= sz;
1103
1104 if (!fragm_rest && rest) {
1105
1106 /* Initiate new fragment: */
1107 if (rest <= fragm_sz) {
1108 fragm_sz = rest;
0e65967e 1109 msg_set_type(&fragm_hdr, LAST_FRAGMENT);
b97bf3fd
PL
1110 } else {
1111 msg_set_type(&fragm_hdr, FRAGMENT);
1112 }
1113 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
1114 msg_set_fragm_no(&fragm_hdr, ++fragm_no);
1115 prev = buf;
31e3c3f6 1116 buf = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
126c0524
YX
1117 if (!buf) {
1118 res = -ENOMEM;
b97bf3fd 1119 goto error;
126c0524 1120 }
b97bf3fd 1121
c4307285 1122 buf->next = NULL;
b97bf3fd 1123 prev->next = buf;
27d7ff46 1124 skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE);
b97bf3fd
PL
1125 fragm_crs = INT_H_SIZE;
1126 fragm_rest = fragm_sz;
b97bf3fd 1127 }
0e65967e 1128 } while (rest > 0);
b97bf3fd 1129
c4307285 1130 /*
b97bf3fd
PL
1131 * Now we have a buffer chain. Select a link and check
1132 * that packet size is still OK
1133 */
51a8e4de 1134 node = tipc_node_find(destaddr);
b97bf3fd 1135 if (likely(node)) {
4323add6 1136 tipc_node_lock(node);
23dd4cce 1137 l_ptr = node->active_links[sender->ref & 1];
b97bf3fd 1138 if (!l_ptr) {
4323add6 1139 tipc_node_unlock(node);
b97bf3fd
PL
1140 goto reject;
1141 }
15e979da 1142 if (l_ptr->max_pkt < max_pkt) {
23dd4cce 1143 sender->max_pkt = l_ptr->max_pkt;
4323add6 1144 tipc_node_unlock(node);
d77b3831 1145 kfree_skb_list(buf_chain);
b97bf3fd
PL
1146 goto again;
1147 }
1148 } else {
1149reject:
d77b3831 1150 kfree_skb_list(buf_chain);
9446b87a
YX
1151 return tipc_port_reject_sections(sender, hdr, msg_sect,
1152 len, TIPC_ERR_NO_NODE);
b97bf3fd
PL
1153 }
1154
dc63d91e 1155 /* Append chain of fragments to send queue & send them */
e0f08596 1156 l_ptr->long_msg_seq_no++;
dc63d91e
AS
1157 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
1158 l_ptr->stats.sent_fragments += fragm_no;
b97bf3fd 1159 l_ptr->stats.sent_fragmented++;
4323add6
PL
1160 tipc_link_push_queue(l_ptr);
1161 tipc_node_unlock(node);
b97bf3fd
PL
1162 return dsz;
1163}
1164
c4307285 1165/*
4323add6 1166 * tipc_link_push_packet: Push one unsent packet to the media
b97bf3fd 1167 */
98056963 1168static u32 tipc_link_push_packet(struct tipc_link *l_ptr)
b97bf3fd
PL
1169{
1170 struct sk_buff *buf = l_ptr->first_out;
1171 u32 r_q_size = l_ptr->retransm_queue_size;
1172 u32 r_q_head = l_ptr->retransm_queue_head;
1173
1174 /* Step to position where retransmission failed, if any, */
1175 /* consider that buffers may have been released in meantime */
b97bf3fd 1176 if (r_q_size && buf) {
c4307285 1177 u32 last = lesser(mod(r_q_head + r_q_size),
b97bf3fd 1178 link_last_sent(l_ptr));
f905730c 1179 u32 first = buf_seqno(buf);
b97bf3fd
PL
1180
1181 while (buf && less(first, r_q_head)) {
1182 first = mod(first + 1);
1183 buf = buf->next;
1184 }
1185 l_ptr->retransm_queue_head = r_q_head = first;
1186 l_ptr->retransm_queue_size = r_q_size = mod(last - first);
1187 }
1188
1189 /* Continue retransmission now, if there is anything: */
ca509101 1190 if (r_q_size && buf) {
b97bf3fd 1191 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
c4307285 1192 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
3c294cb3
YX
1193 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
1194 l_ptr->retransm_queue_head = mod(++r_q_head);
1195 l_ptr->retransm_queue_size = --r_q_size;
1196 l_ptr->stats.retransmitted++;
1197 return 0;
b97bf3fd
PL
1198 }
1199
1200 /* Send deferred protocol message, if any: */
b97bf3fd
PL
1201 buf = l_ptr->proto_msg_queue;
1202 if (buf) {
1203 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
0e65967e 1204 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
3c294cb3
YX
1205 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
1206 l_ptr->unacked_window = 0;
1207 kfree_skb(buf);
1208 l_ptr->proto_msg_queue = NULL;
1209 return 0;
b97bf3fd
PL
1210 }
1211
1212 /* Send one deferred data message, if send window not full: */
b97bf3fd
PL
1213 buf = l_ptr->next_out;
1214 if (buf) {
1215 struct tipc_msg *msg = buf_msg(buf);
1216 u32 next = msg_seqno(msg);
f905730c 1217 u32 first = buf_seqno(l_ptr->first_out);
b97bf3fd
PL
1218
1219 if (mod(next - first) < l_ptr->queue_limit[0]) {
1220 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
c4307285 1221 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
3c294cb3
YX
1222 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
1223 if (msg_user(msg) == MSG_BUNDLER)
1224 msg_set_type(msg, CLOSED_MSG);
1225 l_ptr->next_out = buf->next;
1226 return 0;
b97bf3fd
PL
1227 }
1228 }
3c294cb3 1229 return 1;
b97bf3fd
PL
1230}
1231
1232/*
1233 * push_queue(): push out the unsent messages of a link where
1234 * congestion has abated. Node is locked
1235 */
a18c4bc3 1236void tipc_link_push_queue(struct tipc_link *l_ptr)
b97bf3fd
PL
1237{
1238 u32 res;
1239
b97bf3fd 1240 do {
4323add6 1241 res = tipc_link_push_packet(l_ptr);
0e35fd5e 1242 } while (!res);
b97bf3fd
PL
1243}
1244
d356eeba
AS
1245static void link_reset_all(unsigned long addr)
1246{
6c00055a 1247 struct tipc_node *n_ptr;
d356eeba
AS
1248 char addr_string[16];
1249 u32 i;
1250
1251 read_lock_bh(&tipc_net_lock);
1252 n_ptr = tipc_node_find((u32)addr);
1253 if (!n_ptr) {
1254 read_unlock_bh(&tipc_net_lock);
1255 return; /* node no longer exists */
1256 }
1257
1258 tipc_node_lock(n_ptr);
1259
2cf8aa19
EH
1260 pr_warn("Resetting all links to %s\n",
1261 tipc_addr_string_fill(addr_string, n_ptr->addr));
d356eeba
AS
1262
1263 for (i = 0; i < MAX_BEARERS; i++) {
1264 if (n_ptr->links[i]) {
8d64a5ba 1265 link_print(n_ptr->links[i], "Resetting link\n");
d356eeba
AS
1266 tipc_link_reset(n_ptr->links[i]);
1267 }
1268 }
1269
1270 tipc_node_unlock(n_ptr);
1271 read_unlock_bh(&tipc_net_lock);
1272}
1273
a18c4bc3 1274static void link_retransmit_failure(struct tipc_link *l_ptr,
ae8509c4 1275 struct sk_buff *buf)
d356eeba
AS
1276{
1277 struct tipc_msg *msg = buf_msg(buf);
1278
2cf8aa19 1279 pr_warn("Retransmission failure on link <%s>\n", l_ptr->name);
d356eeba
AS
1280
1281 if (l_ptr->addr) {
d356eeba 1282 /* Handle failure on standard link */
8d64a5ba 1283 link_print(l_ptr, "Resetting link\n");
d356eeba
AS
1284 tipc_link_reset(l_ptr);
1285
1286 } else {
d356eeba 1287 /* Handle failure on broadcast link */
6c00055a 1288 struct tipc_node *n_ptr;
d356eeba
AS
1289 char addr_string[16];
1290
2cf8aa19
EH
1291 pr_info("Msg seq number: %u, ", msg_seqno(msg));
1292 pr_cont("Outstanding acks: %lu\n",
1293 (unsigned long) TIPC_SKB_CB(buf)->handle);
617dbeaa 1294
01d83edd 1295 n_ptr = tipc_bclink_retransmit_to();
d356eeba
AS
1296 tipc_node_lock(n_ptr);
1297
c68ca7b7 1298 tipc_addr_string_fill(addr_string, n_ptr->addr);
2cf8aa19 1299 pr_info("Broadcast link info for %s\n", addr_string);
389dd9bc
YX
1300 pr_info("Reception permitted: %d, Acked: %u\n",
1301 n_ptr->bclink.recv_permitted,
2cf8aa19
EH
1302 n_ptr->bclink.acked);
1303 pr_info("Last in: %u, Oos state: %u, Last sent: %u\n",
1304 n_ptr->bclink.last_in,
1305 n_ptr->bclink.oos_state,
1306 n_ptr->bclink.last_sent);
d356eeba
AS
1307
1308 tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr);
1309
1310 tipc_node_unlock(n_ptr);
1311
1312 l_ptr->stale_count = 0;
1313 }
1314}
1315
a18c4bc3 1316void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
4323add6 1317 u32 retransmits)
b97bf3fd
PL
1318{
1319 struct tipc_msg *msg;
1320
d356eeba
AS
1321 if (!buf)
1322 return;
1323
1324 msg = buf_msg(buf);
c4307285 1325
512137ee
EH
1326 /* Detect repeated retransmit failures */
1327 if (l_ptr->last_retransmitted == msg_seqno(msg)) {
1328 if (++l_ptr->stale_count > 100) {
1329 link_retransmit_failure(l_ptr, buf);
1330 return;
d356eeba
AS
1331 }
1332 } else {
512137ee
EH
1333 l_ptr->last_retransmitted = msg_seqno(msg);
1334 l_ptr->stale_count = 1;
b97bf3fd 1335 }
d356eeba 1336
ca509101 1337 while (retransmits && (buf != l_ptr->next_out) && buf) {
b97bf3fd
PL
1338 msg = buf_msg(buf);
1339 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
c4307285 1340 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
3c294cb3
YX
1341 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
1342 buf = buf->next;
1343 retransmits--;
1344 l_ptr->stats.retransmitted++;
b97bf3fd 1345 }
d356eeba 1346
b97bf3fd
PL
1347 l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
1348}
1349
c4307285 1350/**
b97bf3fd
PL
1351 * link_insert_deferred_queue - insert deferred messages back into receive chain
1352 */
a18c4bc3 1353static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr,
b97bf3fd
PL
1354 struct sk_buff *buf)
1355{
1356 u32 seq_no;
1357
1358 if (l_ptr->oldest_deferred_in == NULL)
1359 return buf;
1360
f905730c 1361 seq_no = buf_seqno(l_ptr->oldest_deferred_in);
b97bf3fd
PL
1362 if (seq_no == mod(l_ptr->next_in_no)) {
1363 l_ptr->newest_deferred_in->next = buf;
1364 buf = l_ptr->oldest_deferred_in;
1365 l_ptr->oldest_deferred_in = NULL;
1366 l_ptr->deferred_inqueue_sz = 0;
1367 }
1368 return buf;
1369}
1370
85035568
AS
1371/**
1372 * link_recv_buf_validate - validate basic format of received message
1373 *
1374 * This routine ensures a TIPC message has an acceptable header, and at least
1375 * as much data as the header indicates it should. The routine also ensures
1376 * that the entire message header is stored in the main fragment of the message
1377 * buffer, to simplify future access to message header fields.
1378 *
1379 * Note: Having extra info present in the message header or data areas is OK.
1380 * TIPC will ignore the excess, under the assumption that it is optional info
1381 * introduced by a later release of the protocol.
1382 */
85035568
AS
1383static int link_recv_buf_validate(struct sk_buff *buf)
1384{
1385 static u32 min_data_hdr_size[8] = {
741d9eb7 1386 SHORT_H_SIZE, MCAST_H_SIZE, NAMED_H_SIZE, BASIC_H_SIZE,
85035568
AS
1387 MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE
1388 };
1389
1390 struct tipc_msg *msg;
1391 u32 tipc_hdr[2];
1392 u32 size;
1393 u32 hdr_size;
1394 u32 min_hdr_size;
1395
1396 if (unlikely(buf->len < MIN_H_SIZE))
1397 return 0;
1398
1399 msg = skb_header_pointer(buf, 0, sizeof(tipc_hdr), tipc_hdr);
1400 if (msg == NULL)
1401 return 0;
1402
1403 if (unlikely(msg_version(msg) != TIPC_VERSION))
1404 return 0;
1405
1406 size = msg_size(msg);
1407 hdr_size = msg_hdr_sz(msg);
1408 min_hdr_size = msg_isdata(msg) ?
1409 min_data_hdr_size[msg_type(msg)] : INT_H_SIZE;
1410
1411 if (unlikely((hdr_size < min_hdr_size) ||
1412 (size < hdr_size) ||
1413 (buf->len < size) ||
1414 (size - hdr_size > TIPC_MAX_USER_MSG_SIZE)))
1415 return 0;
1416
1417 return pskb_may_pull(buf, hdr_size);
1418}
1419
b02b69c8 1420/**
170b3927 1421 * tipc_rcv - process TIPC packets/messages arriving from off-node
b02b69c8
AS
1422 * @head: pointer to message buffer chain
1423 * @tb_ptr: pointer to bearer message arrived on
1424 *
1425 * Invoked with no locks held. Bearer pointer must point to a valid bearer
1426 * structure (i.e. cannot be NULL), but bearer can be inactive.
1427 */
170b3927 1428void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
b97bf3fd 1429{
4323add6 1430 read_lock_bh(&tipc_net_lock);
b97bf3fd 1431 while (head) {
6c00055a 1432 struct tipc_node *n_ptr;
a18c4bc3 1433 struct tipc_link *l_ptr;
b97bf3fd
PL
1434 struct sk_buff *crs;
1435 struct sk_buff *buf = head;
85035568
AS
1436 struct tipc_msg *msg;
1437 u32 seq_no;
1438 u32 ackd;
b97bf3fd 1439 u32 released = 0;
b97bf3fd 1440
b97bf3fd 1441 head = head->next;
732256b9 1442 buf->next = NULL;
85035568 1443
b02b69c8 1444 /* Ensure bearer is still enabled */
b02b69c8 1445 if (unlikely(!b_ptr->active))
3af390e2 1446 goto discard;
b02b69c8 1447
85035568 1448 /* Ensure message is well-formed */
85035568 1449 if (unlikely(!link_recv_buf_validate(buf)))
3af390e2 1450 goto discard;
b97bf3fd 1451
fe13dda2 1452 /* Ensure message data is a single contiguous unit */
5f6d9123 1453 if (unlikely(skb_linearize(buf)))
3af390e2 1454 goto discard;
fe13dda2 1455
85035568 1456 /* Handle arrival of a non-unicast link message */
85035568
AS
1457 msg = buf_msg(buf);
1458
b97bf3fd 1459 if (unlikely(msg_non_seq(msg))) {
1265a021
AS
1460 if (msg_user(msg) == LINK_CONFIG)
1461 tipc_disc_recv_msg(buf, b_ptr);
1462 else
1463 tipc_bclink_recv_pkt(buf);
b97bf3fd
PL
1464 continue;
1465 }
c4307285 1466
ed33a9c4 1467 /* Discard unicast link messages destined for another node */
26008247
AS
1468 if (unlikely(!msg_short(msg) &&
1469 (msg_destnode(msg) != tipc_own_addr)))
3af390e2 1470 goto discard;
c4307285 1471
5a68d5ee 1472 /* Locate neighboring node that sent message */
4323add6 1473 n_ptr = tipc_node_find(msg_prevnode(msg));
b97bf3fd 1474 if (unlikely(!n_ptr))
3af390e2 1475 goto discard;
4323add6 1476 tipc_node_lock(n_ptr);
85035568 1477
b4b56102 1478 /* Locate unicast link endpoint that should handle message */
b4b56102 1479 l_ptr = n_ptr->links[b_ptr->identity];
3af390e2
YX
1480 if (unlikely(!l_ptr))
1481 goto unlock_discard;
5a68d5ee 1482
b4b56102 1483 /* Verify that communication with node is currently allowed */
b4b56102
AS
1484 if ((n_ptr->block_setup & WAIT_PEER_DOWN) &&
1485 msg_user(msg) == LINK_PROTOCOL &&
1486 (msg_type(msg) == RESET_MSG ||
1487 msg_type(msg) == ACTIVATE_MSG) &&
1488 !msg_redundant_link(msg))
1489 n_ptr->block_setup &= ~WAIT_PEER_DOWN;
1490
3af390e2
YX
1491 if (n_ptr->block_setup)
1492 goto unlock_discard;
85035568
AS
1493
1494 /* Validate message sequence number info */
85035568
AS
1495 seq_no = msg_seqno(msg);
1496 ackd = msg_ack(msg);
1497
1498 /* Release acked messages */
389dd9bc 1499 if (n_ptr->bclink.recv_permitted)
36559591 1500 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
b97bf3fd
PL
1501
1502 crs = l_ptr->first_out;
c4307285 1503 while ((crs != l_ptr->next_out) &&
f905730c 1504 less_eq(buf_seqno(crs), ackd)) {
b97bf3fd
PL
1505 struct sk_buff *next = crs->next;
1506
5f6d9123 1507 kfree_skb(crs);
b97bf3fd
PL
1508 crs = next;
1509 released++;
1510 }
1511 if (released) {
1512 l_ptr->first_out = crs;
1513 l_ptr->out_queue_size -= released;
1514 }
85035568
AS
1515
1516 /* Try sending any messages link endpoint has pending */
b97bf3fd 1517 if (unlikely(l_ptr->next_out))
4323add6 1518 tipc_link_push_queue(l_ptr);
b97bf3fd 1519 if (unlikely(!list_empty(&l_ptr->waiting_ports)))
4323add6 1520 tipc_link_wakeup_ports(l_ptr, 0);
b97bf3fd
PL
1521 if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
1522 l_ptr->stats.sent_acks++;
4323add6 1523 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
b97bf3fd
PL
1524 }
1525
85035568 1526 /* Now (finally!) process the incoming message */
3af390e2
YX
1527 if (unlikely(!link_working_working(l_ptr))) {
1528 if (msg_user(msg) == LINK_PROTOCOL) {
1529 link_recv_proto_msg(l_ptr, buf);
1530 head = link_insert_deferred_queue(l_ptr, head);
4323add6 1531 tipc_node_unlock(n_ptr);
b97bf3fd
PL
1532 continue;
1533 }
3af390e2
YX
1534
1535 /* Traffic message. Conditionally activate link */
1536 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1537
1538 if (link_working_working(l_ptr)) {
1539 /* Re-insert buffer in front of queue */
1540 buf->next = head;
1541 head = buf;
1542 tipc_node_unlock(n_ptr);
1543 continue;
1544 }
1545 goto unlock_discard;
1546 }
1547
1548 /* Link is now in state WORKING_WORKING */
1549 if (unlikely(seq_no != mod(l_ptr->next_in_no))) {
b97bf3fd
PL
1550 link_handle_out_of_seq_msg(l_ptr, buf);
1551 head = link_insert_deferred_queue(l_ptr, head);
4323add6 1552 tipc_node_unlock(n_ptr);
b97bf3fd
PL
1553 continue;
1554 }
3af390e2
YX
1555 l_ptr->next_in_no++;
1556 if (unlikely(l_ptr->oldest_deferred_in))
b97bf3fd 1557 head = link_insert_deferred_queue(l_ptr, head);
3af390e2
YX
1558deliver:
1559 if (likely(msg_isdata(msg))) {
4323add6 1560 tipc_node_unlock(n_ptr);
3af390e2 1561 tipc_port_recv_msg(buf);
b97bf3fd
PL
1562 continue;
1563 }
3af390e2
YX
1564 switch (msg_user(msg)) {
1565 int ret;
1566 case MSG_BUNDLER:
1567 l_ptr->stats.recv_bundles++;
1568 l_ptr->stats.recv_bundled += msg_msgcnt(msg);
1569 tipc_node_unlock(n_ptr);
1570 tipc_link_recv_bundle(buf);
1571 continue;
1572 case NAME_DISTRIBUTOR:
1573 n_ptr->bclink.recv_permitted = true;
1574 tipc_node_unlock(n_ptr);
1575 tipc_named_recv(buf);
1576 continue;
1577 case BCAST_PROTOCOL:
1578 tipc_link_recv_sync(n_ptr, buf);
4323add6 1579 tipc_node_unlock(n_ptr);
b97bf3fd 1580 continue;
3af390e2
YX
1581 case CONN_MANAGER:
1582 tipc_node_unlock(n_ptr);
1583 tipc_port_recv_proto_msg(buf);
1584 continue;
1585 case MSG_FRAGMENTER:
1586 l_ptr->stats.recv_fragments++;
03b92017
JPM
1587 ret = tipc_link_frag_rcv(&l_ptr->reasm_head,
1588 &l_ptr->reasm_tail,
1589 &buf);
40ba3cdf 1590 if (ret == LINK_REASM_COMPLETE) {
3af390e2 1591 l_ptr->stats.recv_fragmented++;
40ba3cdf 1592 msg = buf_msg(buf);
3af390e2
YX
1593 goto deliver;
1594 }
40ba3cdf 1595 if (ret == LINK_REASM_ERROR)
a715b49e 1596 tipc_link_reset(l_ptr);
528f6f4b
EH
1597 tipc_node_unlock(n_ptr);
1598 continue;
3af390e2 1599 case CHANGEOVER_PROTOCOL:
1dab3d5a
JPM
1600 if (!tipc_link_tunnel_rcv(&l_ptr, &buf))
1601 break;
1602 msg = buf_msg(buf);
1603 seq_no = msg_seqno(msg);
1604 goto deliver;
3af390e2
YX
1605 default:
1606 kfree_skb(buf);
1607 buf = NULL;
1608 break;
b97bf3fd 1609 }
4323add6 1610 tipc_node_unlock(n_ptr);
3af390e2
YX
1611 tipc_net_route_msg(buf);
1612 continue;
1613unlock_discard:
1614
1615 tipc_node_unlock(n_ptr);
1616discard:
5f6d9123 1617 kfree_skb(buf);
b97bf3fd 1618 }
4323add6 1619 read_unlock_bh(&tipc_net_lock);
b97bf3fd
PL
1620}
1621
2c53040f 1622/**
8809b255
AS
1623 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
1624 *
1625 * Returns increase in queue length (i.e. 0 or 1)
b97bf3fd 1626 */
8809b255 1627u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
4323add6 1628 struct sk_buff *buf)
b97bf3fd 1629{
8809b255
AS
1630 struct sk_buff *queue_buf;
1631 struct sk_buff **prev;
f905730c 1632 u32 seq_no = buf_seqno(buf);
b97bf3fd
PL
1633
1634 buf->next = NULL;
1635
1636 /* Empty queue ? */
1637 if (*head == NULL) {
1638 *head = *tail = buf;
1639 return 1;
1640 }
1641
1642 /* Last ? */
f905730c 1643 if (less(buf_seqno(*tail), seq_no)) {
b97bf3fd
PL
1644 (*tail)->next = buf;
1645 *tail = buf;
1646 return 1;
1647 }
1648
8809b255
AS
1649 /* Locate insertion point in queue, then insert; discard if duplicate */
1650 prev = head;
1651 queue_buf = *head;
1652 for (;;) {
1653 u32 curr_seqno = buf_seqno(queue_buf);
b97bf3fd 1654
8809b255 1655 if (seq_no == curr_seqno) {
5f6d9123 1656 kfree_skb(buf);
8809b255 1657 return 0;
b97bf3fd 1658 }
8809b255
AS
1659
1660 if (less(seq_no, curr_seqno))
b97bf3fd 1661 break;
b97bf3fd 1662
8809b255
AS
1663 prev = &queue_buf->next;
1664 queue_buf = queue_buf->next;
1665 }
b97bf3fd 1666
8809b255
AS
1667 buf->next = queue_buf;
1668 *prev = buf;
1669 return 1;
b97bf3fd
PL
1670}
1671
8809b255 1672/*
b97bf3fd
PL
1673 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
1674 */
a18c4bc3 1675static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
b97bf3fd
PL
1676 struct sk_buff *buf)
1677{
f905730c 1678 u32 seq_no = buf_seqno(buf);
b97bf3fd
PL
1679
1680 if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
1681 link_recv_proto_msg(l_ptr, buf);
1682 return;
1683 }
1684
b97bf3fd 1685 /* Record OOS packet arrival (force mismatch on next timeout) */
b97bf3fd
PL
1686 l_ptr->checkpoint--;
1687
c4307285 1688 /*
b97bf3fd
PL
1689 * Discard packet if a duplicate; otherwise add it to deferred queue
1690 * and notify peer of gap as per protocol specification
1691 */
b97bf3fd
PL
1692 if (less(seq_no, mod(l_ptr->next_in_no))) {
1693 l_ptr->stats.duplicates++;
5f6d9123 1694 kfree_skb(buf);
b97bf3fd
PL
1695 return;
1696 }
1697
4323add6
PL
1698 if (tipc_link_defer_pkt(&l_ptr->oldest_deferred_in,
1699 &l_ptr->newest_deferred_in, buf)) {
b97bf3fd
PL
1700 l_ptr->deferred_inqueue_sz++;
1701 l_ptr->stats.deferred_recv++;
1702 if ((l_ptr->deferred_inqueue_sz % 16) == 1)
4323add6 1703 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
b97bf3fd
PL
1704 } else
1705 l_ptr->stats.duplicates++;
1706}
1707
1708/*
1709 * Send protocol message to the other endpoint.
1710 */
a18c4bc3 1711void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
ae8509c4
PG
1712 int probe_msg, u32 gap, u32 tolerance,
1713 u32 priority, u32 ack_mtu)
b97bf3fd 1714{
1fc54d8f 1715 struct sk_buff *buf = NULL;
b97bf3fd 1716 struct tipc_msg *msg = l_ptr->pmsg;
c4307285 1717 u32 msg_size = sizeof(l_ptr->proto_msg);
75f0aa49 1718 int r_flag;
b97bf3fd 1719
92d2c905 1720 /* Discard any previous message that was deferred due to congestion */
92d2c905 1721 if (l_ptr->proto_msg_queue) {
5f6d9123 1722 kfree_skb(l_ptr->proto_msg_queue);
92d2c905
AS
1723 l_ptr->proto_msg_queue = NULL;
1724 }
1725
77a7e07a
YX
1726 /* Don't send protocol message during link changeover */
1727 if (l_ptr->exp_msg_count)
b97bf3fd 1728 return;
b4b56102
AS
1729
1730 /* Abort non-RESET send if communication with node is prohibited */
b4b56102
AS
1731 if ((l_ptr->owner->block_setup) && (msg_typ != RESET_MSG))
1732 return;
1733
92d2c905 1734 /* Create protocol message with "out-of-sequence" sequence number */
b97bf3fd
PL
1735 msg_set_type(msg, msg_typ);
1736 msg_set_net_plane(msg, l_ptr->b_ptr->net_plane);
7a54d4a9 1737 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
4323add6 1738 msg_set_last_bcast(msg, tipc_bclink_get_last_sent());
b97bf3fd
PL
1739
1740 if (msg_typ == STATE_MSG) {
1741 u32 next_sent = mod(l_ptr->next_out_no);
1742
4323add6 1743 if (!tipc_link_is_up(l_ptr))
b97bf3fd
PL
1744 return;
1745 if (l_ptr->next_out)
f905730c 1746 next_sent = buf_seqno(l_ptr->next_out);
b97bf3fd
PL
1747 msg_set_next_sent(msg, next_sent);
1748 if (l_ptr->oldest_deferred_in) {
f905730c 1749 u32 rec = buf_seqno(l_ptr->oldest_deferred_in);
b97bf3fd
PL
1750 gap = mod(rec - mod(l_ptr->next_in_no));
1751 }
1752 msg_set_seq_gap(msg, gap);
1753 if (gap)
1754 l_ptr->stats.sent_nacks++;
1755 msg_set_link_tolerance(msg, tolerance);
1756 msg_set_linkprio(msg, priority);
1757 msg_set_max_pkt(msg, ack_mtu);
1758 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1759 msg_set_probe(msg, probe_msg != 0);
c4307285 1760 if (probe_msg) {
b97bf3fd
PL
1761 u32 mtu = l_ptr->max_pkt;
1762
c4307285 1763 if ((mtu < l_ptr->max_pkt_target) &&
b97bf3fd
PL
1764 link_working_working(l_ptr) &&
1765 l_ptr->fsm_msg_cnt) {
1766 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
c4307285
YH
1767 if (l_ptr->max_pkt_probes == 10) {
1768 l_ptr->max_pkt_target = (msg_size - 4);
1769 l_ptr->max_pkt_probes = 0;
b97bf3fd 1770 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
c4307285 1771 }
b97bf3fd 1772 l_ptr->max_pkt_probes++;
c4307285 1773 }
b97bf3fd
PL
1774
1775 l_ptr->stats.sent_probes++;
c4307285 1776 }
b97bf3fd
PL
1777 l_ptr->stats.sent_states++;
1778 } else { /* RESET_MSG or ACTIVATE_MSG */
1779 msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
1780 msg_set_seq_gap(msg, 0);
1781 msg_set_next_sent(msg, 1);
f23d9bf2 1782 msg_set_probe(msg, 0);
b97bf3fd
PL
1783 msg_set_link_tolerance(msg, l_ptr->tolerance);
1784 msg_set_linkprio(msg, l_ptr->priority);
1785 msg_set_max_pkt(msg, l_ptr->max_pkt_target);
1786 }
1787
75f0aa49
AS
1788 r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
1789 msg_set_redundant_link(msg, r_flag);
b97bf3fd 1790 msg_set_linkprio(msg, l_ptr->priority);
92d2c905 1791 msg_set_size(msg, msg_size);
b97bf3fd
PL
1792
1793 msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
1794
31e3c3f6 1795 buf = tipc_buf_acquire(msg_size);
b97bf3fd
PL
1796 if (!buf)
1797 return;
1798
27d7ff46 1799 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
796c75d0 1800 buf->priority = TC_PRIO_CONTROL;
b97bf3fd 1801
3c294cb3 1802 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
92d2c905 1803 l_ptr->unacked_window = 0;
5f6d9123 1804 kfree_skb(buf);
b97bf3fd
PL
1805}
1806
1807/*
1808 * Receive protocol message :
c4307285
YH
1809 * Note that network plane id propagates through the network, and may
1810 * change at any time. The node with lowest address rules
b97bf3fd 1811 */
a18c4bc3 1812static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
b97bf3fd
PL
1813{
1814 u32 rec_gap = 0;
1815 u32 max_pkt_info;
c4307285 1816 u32 max_pkt_ack;
b97bf3fd
PL
1817 u32 msg_tol;
1818 struct tipc_msg *msg = buf_msg(buf);
1819
77a7e07a
YX
1820 /* Discard protocol message during link changeover */
1821 if (l_ptr->exp_msg_count)
b97bf3fd
PL
1822 goto exit;
1823
1824 /* record unnumbered packet arrival (force mismatch on next timeout) */
b97bf3fd
PL
1825 l_ptr->checkpoint--;
1826
1827 if (l_ptr->b_ptr->net_plane != msg_net_plane(msg))
1828 if (tipc_own_addr > msg_prevnode(msg))
1829 l_ptr->b_ptr->net_plane = msg_net_plane(msg);
1830
b97bf3fd 1831 switch (msg_type(msg)) {
c4307285 1832
b97bf3fd 1833 case RESET_MSG:
a686e685
AS
1834 if (!link_working_unknown(l_ptr) &&
1835 (l_ptr->peer_session != INVALID_SESSION)) {
641c218d
AS
1836 if (less_eq(msg_session(msg), l_ptr->peer_session))
1837 break; /* duplicate or old reset: ignore */
b97bf3fd 1838 }
b4b56102
AS
1839
1840 if (!msg_redundant_link(msg) && (link_working_working(l_ptr) ||
1841 link_working_unknown(l_ptr))) {
1842 /*
1843 * peer has lost contact -- don't allow peer's links
1844 * to reactivate before we recognize loss & clean up
1845 */
1846 l_ptr->owner->block_setup = WAIT_NODE_DOWN;
1847 }
1848
47361c87
AS
1849 link_state_event(l_ptr, RESET_MSG);
1850
b97bf3fd
PL
1851 /* fall thru' */
1852 case ACTIVATE_MSG:
1853 /* Update link settings according other endpoint's values */
b97bf3fd
PL
1854 strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
1855
2db9983a
AS
1856 msg_tol = msg_link_tolerance(msg);
1857 if (msg_tol > l_ptr->tolerance)
b97bf3fd
PL
1858 link_set_supervision_props(l_ptr, msg_tol);
1859
1860 if (msg_linkprio(msg) > l_ptr->priority)
1861 l_ptr->priority = msg_linkprio(msg);
1862
1863 max_pkt_info = msg_max_pkt(msg);
c4307285 1864 if (max_pkt_info) {
b97bf3fd
PL
1865 if (max_pkt_info < l_ptr->max_pkt_target)
1866 l_ptr->max_pkt_target = max_pkt_info;
1867 if (l_ptr->max_pkt > l_ptr->max_pkt_target)
1868 l_ptr->max_pkt = l_ptr->max_pkt_target;
1869 } else {
c4307285 1870 l_ptr->max_pkt = l_ptr->max_pkt_target;
b97bf3fd 1871 }
b97bf3fd 1872
4d75313c 1873 /* Synchronize broadcast link info, if not done previously */
7a54d4a9
AS
1874 if (!tipc_node_is_up(l_ptr->owner)) {
1875 l_ptr->owner->bclink.last_sent =
1876 l_ptr->owner->bclink.last_in =
1877 msg_last_bcast(msg);
1878 l_ptr->owner->bclink.oos_state = 0;
1879 }
4d75313c 1880
b97bf3fd
PL
1881 l_ptr->peer_session = msg_session(msg);
1882 l_ptr->peer_bearer_id = msg_bearer_id(msg);
47361c87
AS
1883
1884 if (msg_type(msg) == ACTIVATE_MSG)
1885 link_state_event(l_ptr, ACTIVATE_MSG);
b97bf3fd
PL
1886 break;
1887 case STATE_MSG:
1888
2db9983a
AS
1889 msg_tol = msg_link_tolerance(msg);
1890 if (msg_tol)
b97bf3fd 1891 link_set_supervision_props(l_ptr, msg_tol);
c4307285
YH
1892
1893 if (msg_linkprio(msg) &&
b97bf3fd 1894 (msg_linkprio(msg) != l_ptr->priority)) {
2cf8aa19
EH
1895 pr_warn("%s<%s>, priority change %u->%u\n",
1896 link_rst_msg, l_ptr->name, l_ptr->priority,
1897 msg_linkprio(msg));
b97bf3fd 1898 l_ptr->priority = msg_linkprio(msg);
4323add6 1899 tipc_link_reset(l_ptr); /* Enforce change to take effect */
b97bf3fd
PL
1900 break;
1901 }
1902 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1903 l_ptr->stats.recv_states++;
1904 if (link_reset_unknown(l_ptr))
1905 break;
1906
1907 if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
c4307285 1908 rec_gap = mod(msg_next_sent(msg) -
b97bf3fd
PL
1909 mod(l_ptr->next_in_no));
1910 }
1911
1912 max_pkt_ack = msg_max_pkt(msg);
c4307285 1913 if (max_pkt_ack > l_ptr->max_pkt) {
c4307285
YH
1914 l_ptr->max_pkt = max_pkt_ack;
1915 l_ptr->max_pkt_probes = 0;
1916 }
b97bf3fd
PL
1917
1918 max_pkt_ack = 0;
c4307285 1919 if (msg_probe(msg)) {
b97bf3fd 1920 l_ptr->stats.recv_probes++;
a016892c 1921 if (msg_size(msg) > sizeof(l_ptr->proto_msg))
c4307285 1922 max_pkt_ack = msg_size(msg);
c4307285 1923 }
b97bf3fd
PL
1924
1925 /* Protocol message before retransmits, reduce loss risk */
389dd9bc 1926 if (l_ptr->owner->bclink.recv_permitted)
7a54d4a9
AS
1927 tipc_bclink_update_link_state(l_ptr->owner,
1928 msg_last_bcast(msg));
b97bf3fd
PL
1929
1930 if (rec_gap || (msg_probe(msg))) {
4323add6
PL
1931 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
1932 0, rec_gap, 0, 0, max_pkt_ack);
b97bf3fd
PL
1933 }
1934 if (msg_seq_gap(msg)) {
b97bf3fd 1935 l_ptr->stats.recv_nacks++;
4323add6
PL
1936 tipc_link_retransmit(l_ptr, l_ptr->first_out,
1937 msg_seq_gap(msg));
b97bf3fd
PL
1938 }
1939 break;
b97bf3fd
PL
1940 }
1941exit:
5f6d9123 1942 kfree_skb(buf);
b97bf3fd
PL
1943}
1944
1945
170b3927
JPM
1946/* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to
1947 * a different bearer. Owner node is locked.
b97bf3fd 1948 */
170b3927
JPM
1949static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
1950 struct tipc_msg *tunnel_hdr,
1951 struct tipc_msg *msg,
1952 u32 selector)
b97bf3fd 1953{
a18c4bc3 1954 struct tipc_link *tunnel;
b97bf3fd
PL
1955 struct sk_buff *buf;
1956 u32 length = msg_size(msg);
1957
1958 tunnel = l_ptr->owner->active_links[selector & 1];
5392d646 1959 if (!tipc_link_is_up(tunnel)) {
2cf8aa19 1960 pr_warn("%stunnel link no longer available\n", link_co_err);
b97bf3fd 1961 return;
5392d646 1962 }
b97bf3fd 1963 msg_set_size(tunnel_hdr, length + INT_H_SIZE);
31e3c3f6 1964 buf = tipc_buf_acquire(length + INT_H_SIZE);
5392d646 1965 if (!buf) {
2cf8aa19 1966 pr_warn("%sunable to send tunnel msg\n", link_co_err);
b97bf3fd 1967 return;
5392d646 1968 }
27d7ff46
ACM
1969 skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE);
1970 skb_copy_to_linear_data_offset(buf, INT_H_SIZE, msg, length);
4323add6 1971 tipc_link_send_buf(tunnel, buf);
b97bf3fd
PL
1972}
1973
1974
170b3927
JPM
1975/* tipc_link_failover_send_queue(): A link has gone down, but a second
1976 * link is still active. We can do failover. Tunnel the failing link's
1977 * whole send queue via the remaining link. This way, we don't lose
1978 * any packets, and sequence order is preserved for subsequent traffic
1979 * sent over the remaining link. Owner node is locked.
b97bf3fd 1980 */
170b3927 1981void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
b97bf3fd
PL
1982{
1983 u32 msgcount = l_ptr->out_queue_size;
1984 struct sk_buff *crs = l_ptr->first_out;
a18c4bc3 1985 struct tipc_link *tunnel = l_ptr->owner->active_links[0];
b97bf3fd 1986 struct tipc_msg tunnel_hdr;
5392d646 1987 int split_bundles;
b97bf3fd
PL
1988
1989 if (!tunnel)
1990 return;
1991
c68ca7b7 1992 tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
75715217 1993 ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
b97bf3fd
PL
1994 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
1995 msg_set_msgcnt(&tunnel_hdr, msgcount);
f131072c 1996
b97bf3fd
PL
1997 if (!l_ptr->first_out) {
1998 struct sk_buff *buf;
1999
31e3c3f6 2000 buf = tipc_buf_acquire(INT_H_SIZE);
b97bf3fd 2001 if (buf) {
27d7ff46 2002 skb_copy_to_linear_data(buf, &tunnel_hdr, INT_H_SIZE);
b97bf3fd 2003 msg_set_size(&tunnel_hdr, INT_H_SIZE);
4323add6 2004 tipc_link_send_buf(tunnel, buf);
b97bf3fd 2005 } else {
2cf8aa19
EH
2006 pr_warn("%sunable to send changeover msg\n",
2007 link_co_err);
b97bf3fd
PL
2008 }
2009 return;
2010 }
f131072c 2011
c4307285 2012 split_bundles = (l_ptr->owner->active_links[0] !=
5392d646
AS
2013 l_ptr->owner->active_links[1]);
2014
b97bf3fd
PL
2015 while (crs) {
2016 struct tipc_msg *msg = buf_msg(crs);
2017
2018 if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
b97bf3fd 2019 struct tipc_msg *m = msg_get_wrapped(msg);
0e65967e 2020 unchar *pos = (unchar *)m;
b97bf3fd 2021
d788d805 2022 msgcount = msg_msgcnt(msg);
b97bf3fd 2023 while (msgcount--) {
0e65967e 2024 msg_set_seqno(m, msg_seqno(msg));
170b3927
JPM
2025 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, m,
2026 msg_link_selector(m));
b97bf3fd
PL
2027 pos += align(msg_size(m));
2028 m = (struct tipc_msg *)pos;
2029 }
2030 } else {
170b3927
JPM
2031 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg,
2032 msg_link_selector(msg));
b97bf3fd
PL
2033 }
2034 crs = crs->next;
2035 }
2036}
2037
170b3927
JPM
2038/* tipc_link_dup_send_queue(): A second link has become active. Tunnel a
2039 * duplicate of the first link's send queue via the new link. This way, we
2040 * are guaranteed that currently queued packets from a socket are delivered
2041 * before future traffic from the same socket, even if this is using the
2042 * new link. The last arriving copy of each duplicate packet is dropped at
2043 * the receiving end by the regular protocol check, so packet cardinality
2044 * and sequence order is preserved per sender/receiver socket pair.
2045 * Owner node is locked.
2046 */
2047void tipc_link_dup_send_queue(struct tipc_link *l_ptr,
2048 struct tipc_link *tunnel)
b97bf3fd
PL
2049{
2050 struct sk_buff *iter;
2051 struct tipc_msg tunnel_hdr;
2052
c68ca7b7 2053 tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
75715217 2054 DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
b97bf3fd
PL
2055 msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size);
2056 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2057 iter = l_ptr->first_out;
2058 while (iter) {
2059 struct sk_buff *outbuf;
2060 struct tipc_msg *msg = buf_msg(iter);
2061 u32 length = msg_size(msg);
2062
2063 if (msg_user(msg) == MSG_BUNDLER)
2064 msg_set_type(msg, CLOSED_MSG);
2065 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */
c4307285 2066 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
b97bf3fd 2067 msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
31e3c3f6 2068 outbuf = tipc_buf_acquire(length + INT_H_SIZE);
b97bf3fd 2069 if (outbuf == NULL) {
2cf8aa19
EH
2070 pr_warn("%sunable to send duplicate msg\n",
2071 link_co_err);
b97bf3fd
PL
2072 return;
2073 }
27d7ff46
ACM
2074 skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE);
2075 skb_copy_to_linear_data_offset(outbuf, INT_H_SIZE, iter->data,
2076 length);
4323add6
PL
2077 tipc_link_send_buf(tunnel, outbuf);
2078 if (!tipc_link_is_up(l_ptr))
b97bf3fd
PL
2079 return;
2080 iter = iter->next;
2081 }
2082}
2083
b97bf3fd
PL
2084/**
2085 * buf_extract - extracts embedded TIPC message from another message
2086 * @skb: encapsulating message buffer
2087 * @from_pos: offset to extract from
2088 *
c4307285 2089 * Returns a new message buffer containing an embedded message. The
b97bf3fd
PL
2090 * encapsulating message itself is left unchanged.
2091 */
b97bf3fd
PL
2092static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
2093{
2094 struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos);
2095 u32 size = msg_size(msg);
2096 struct sk_buff *eb;
2097
31e3c3f6 2098 eb = tipc_buf_acquire(size);
b97bf3fd 2099 if (eb)
27d7ff46 2100 skb_copy_to_linear_data(eb, msg, size);
b97bf3fd
PL
2101 return eb;
2102}
2103
1dab3d5a
JPM
2104
2105
2106/* tipc_link_dup_rcv(): Receive a tunnelled DUPLICATE_MSG packet.
2107 * Owner node is locked.
2108 */
2109static void tipc_link_dup_rcv(struct tipc_link *l_ptr,
2110 struct sk_buff *t_buf)
2111{
2112 struct sk_buff *buf;
2113
2114 if (!tipc_link_is_up(l_ptr))
2115 return;
2116
2117 buf = buf_extract(t_buf, INT_H_SIZE);
2118 if (buf == NULL) {
2119 pr_warn("%sfailed to extract inner dup pkt\n", link_co_err);
2120 return;
2121 }
2122
2123 /* Add buffer to deferred queue, if applicable: */
2124 link_handle_out_of_seq_msg(l_ptr, buf);
2125}
2126
2127/* tipc_link_tunnel_rcv(): Receive a tunnelled packet, sent
170b3927
JPM
2128 * via other link as result of a failover (ORIGINAL_MSG) or
2129 * a new active link (DUPLICATE_MSG). Failover packets are
2130 * returned to the active link for delivery upwards.
2131 * Owner node is locked.
b97bf3fd 2132 */
170b3927
JPM
2133static int tipc_link_tunnel_rcv(struct tipc_link **l_ptr,
2134 struct sk_buff **buf)
b97bf3fd
PL
2135{
2136 struct sk_buff *tunnel_buf = *buf;
a18c4bc3 2137 struct tipc_link *dest_link;
b97bf3fd
PL
2138 struct tipc_msg *msg;
2139 struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf);
2140 u32 msg_typ = msg_type(tunnel_msg);
2141 u32 msg_count = msg_msgcnt(tunnel_msg);
cb4b102f 2142 u32 bearer_id = msg_bearer_id(tunnel_msg);
b97bf3fd 2143
cb4b102f
DC
2144 if (bearer_id >= MAX_BEARERS)
2145 goto exit;
1dab3d5a 2146
cb4b102f 2147 dest_link = (*l_ptr)->owner->links[bearer_id];
b29f1428 2148 if (!dest_link)
b97bf3fd 2149 goto exit;
f131072c 2150 if (dest_link == *l_ptr) {
2cf8aa19
EH
2151 pr_err("Unexpected changeover message on link <%s>\n",
2152 (*l_ptr)->name);
f131072c
AS
2153 goto exit;
2154 }
b97bf3fd
PL
2155 *l_ptr = dest_link;
2156 msg = msg_get_wrapped(tunnel_msg);
2157
2158 if (msg_typ == DUPLICATE_MSG) {
1dab3d5a
JPM
2159 tipc_link_dup_rcv(dest_link, tunnel_buf);
2160 goto exit;
b97bf3fd
PL
2161 }
2162
2163 /* First original message ?: */
4323add6 2164 if (tipc_link_is_up(dest_link)) {
2cf8aa19
EH
2165 pr_info("%s<%s>, changeover initiated by peer\n", link_rst_msg,
2166 dest_link->name);
4323add6 2167 tipc_link_reset(dest_link);
b97bf3fd
PL
2168 dest_link->exp_msg_count = msg_count;
2169 if (!msg_count)
2170 goto exit;
2171 } else if (dest_link->exp_msg_count == START_CHANGEOVER) {
b97bf3fd
PL
2172 dest_link->exp_msg_count = msg_count;
2173 if (!msg_count)
2174 goto exit;
2175 }
2176
2177 /* Receive original message */
b97bf3fd 2178 if (dest_link->exp_msg_count == 0) {
2cf8aa19 2179 pr_warn("%sgot too many tunnelled messages\n", link_co_err);
b97bf3fd
PL
2180 goto exit;
2181 }
2182 dest_link->exp_msg_count--;
2183 if (less(msg_seqno(msg), dest_link->reset_checkpoint)) {
b97bf3fd
PL
2184 goto exit;
2185 } else {
2186 *buf = buf_extract(tunnel_buf, INT_H_SIZE);
2187 if (*buf != NULL) {
5f6d9123 2188 kfree_skb(tunnel_buf);
b97bf3fd
PL
2189 return 1;
2190 } else {
2cf8aa19 2191 pr_warn("%soriginal msg dropped\n", link_co_err);
b97bf3fd
PL
2192 }
2193 }
2194exit:
1fc54d8f 2195 *buf = NULL;
5f6d9123 2196 kfree_skb(tunnel_buf);
b97bf3fd
PL
2197 return 0;
2198}
2199
2200/*
2201 * Bundler functionality:
2202 */
4323add6 2203void tipc_link_recv_bundle(struct sk_buff *buf)
b97bf3fd
PL
2204{
2205 u32 msgcount = msg_msgcnt(buf_msg(buf));
2206 u32 pos = INT_H_SIZE;
2207 struct sk_buff *obuf;
2208
b97bf3fd
PL
2209 while (msgcount--) {
2210 obuf = buf_extract(buf, pos);
2211 if (obuf == NULL) {
2cf8aa19 2212 pr_warn("Link unable to unbundle message(s)\n");
a10bd924 2213 break;
3ff50b79 2214 }
b97bf3fd 2215 pos += align(msg_size(buf_msg(obuf)));
4323add6 2216 tipc_net_route_msg(obuf);
b97bf3fd 2217 }
5f6d9123 2218 kfree_skb(buf);
b97bf3fd
PL
2219}
2220
2221/*
2222 * Fragmentation/defragmentation:
2223 */
2224
c4307285 2225/*
31e3c3f6 2226 * link_send_long_buf: Entry for buffers needing fragmentation.
c4307285 2227 * The buffer is complete, inclusive total message length.
b97bf3fd
PL
2228 * Returns user data length.
2229 */
a18c4bc3 2230static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
b97bf3fd 2231{
77561557
AS
2232 struct sk_buff *buf_chain = NULL;
2233 struct sk_buff *buf_chain_tail = (struct sk_buff *)&buf_chain;
b97bf3fd
PL
2234 struct tipc_msg *inmsg = buf_msg(buf);
2235 struct tipc_msg fragm_hdr;
2236 u32 insize = msg_size(inmsg);
2237 u32 dsz = msg_data_sz(inmsg);
2238 unchar *crs = buf->data;
2239 u32 rest = insize;
15e979da 2240 u32 pack_sz = l_ptr->max_pkt;
b97bf3fd 2241 u32 fragm_sz = pack_sz - INT_H_SIZE;
77561557 2242 u32 fragm_no = 0;
9c396a7b 2243 u32 destaddr;
b97bf3fd
PL
2244
2245 if (msg_short(inmsg))
2246 destaddr = l_ptr->addr;
9c396a7b
AS
2247 else
2248 destaddr = msg_destnode(inmsg);
b97bf3fd 2249
b97bf3fd 2250 /* Prepare reusable fragment header: */
c68ca7b7 2251 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
75715217 2252 INT_H_SIZE, destaddr);
b97bf3fd
PL
2253
2254 /* Chop up message: */
b97bf3fd
PL
2255 while (rest > 0) {
2256 struct sk_buff *fragm;
2257
2258 if (rest <= fragm_sz) {
2259 fragm_sz = rest;
2260 msg_set_type(&fragm_hdr, LAST_FRAGMENT);
2261 }
31e3c3f6 2262 fragm = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
b97bf3fd 2263 if (fragm == NULL) {
5f6d9123 2264 kfree_skb(buf);
d77b3831 2265 kfree_skb_list(buf_chain);
77561557 2266 return -ENOMEM;
b97bf3fd
PL
2267 }
2268 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
77561557
AS
2269 fragm_no++;
2270 msg_set_fragm_no(&fragm_hdr, fragm_no);
27d7ff46
ACM
2271 skb_copy_to_linear_data(fragm, &fragm_hdr, INT_H_SIZE);
2272 skb_copy_to_linear_data_offset(fragm, INT_H_SIZE, crs,
2273 fragm_sz);
77561557
AS
2274 buf_chain_tail->next = fragm;
2275 buf_chain_tail = fragm;
b97bf3fd 2276
b97bf3fd
PL
2277 rest -= fragm_sz;
2278 crs += fragm_sz;
2279 msg_set_type(&fragm_hdr, FRAGMENT);
2280 }
5f6d9123 2281 kfree_skb(buf);
77561557
AS
2282
2283 /* Append chain of fragments to send queue & send them */
77561557
AS
2284 l_ptr->long_msg_seq_no++;
2285 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
2286 l_ptr->stats.sent_fragments += fragm_no;
2287 l_ptr->stats.sent_fragmented++;
2288 tipc_link_push_queue(l_ptr);
2289
b97bf3fd
PL
2290 return dsz;
2291}
2292
03b92017 2293/* tipc_link_frag_rcv(): Called with node lock on. Returns
b97bf3fd
PL
2294 * the reassembled buffer if message is complete.
2295 */
03b92017
JPM
2296int tipc_link_frag_rcv(struct sk_buff **head, struct sk_buff **tail,
2297 struct sk_buff **fbuf)
b97bf3fd 2298{
40ba3cdf
EH
2299 struct sk_buff *frag = *fbuf;
2300 struct tipc_msg *msg = buf_msg(frag);
2301 u32 fragid = msg_type(msg);
2302 bool headstolen;
2303 int delta;
2304
2305 skb_pull(frag, msg_hdr_sz(msg));
2306 if (fragid == FIRST_FRAGMENT) {
2307 if (*head || skb_unclone(frag, GFP_ATOMIC))
2308 goto out_free;
2309 *head = frag;
2310 skb_frag_list_init(*head);
03b92017 2311 *fbuf = NULL;
b97bf3fd 2312 return 0;
3db0a197
EH
2313 } else if (*head &&
2314 skb_try_coalesce(*head, frag, &headstolen, &delta)) {
40ba3cdf
EH
2315 kfree_skb_partial(frag, headstolen);
2316 } else {
2317 if (!*head)
2318 goto out_free;
2319 if (!skb_has_frag_list(*head))
2320 skb_shinfo(*head)->frag_list = frag;
2321 else
2322 (*tail)->next = frag;
2323 *tail = frag;
2324 (*head)->truesize += frag->truesize;
2325 }
2326 if (fragid == LAST_FRAGMENT) {
2327 *fbuf = *head;
2328 *tail = *head = NULL;
2329 return LINK_REASM_COMPLETE;
b97bf3fd 2330 }
03b92017 2331 *fbuf = NULL;
b97bf3fd 2332 return 0;
40ba3cdf
EH
2333out_free:
2334 pr_warn_ratelimited("Link unable to reassemble fragmented message\n");
2335 kfree_skb(*fbuf);
03b92017 2336 *fbuf = NULL;
40ba3cdf 2337 return LINK_REASM_ERROR;
b97bf3fd
PL
2338}
2339
a18c4bc3 2340static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance)
b97bf3fd 2341{
5413b4c6
AS
2342 if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL))
2343 return;
2344
b97bf3fd
PL
2345 l_ptr->tolerance = tolerance;
2346 l_ptr->continuity_interval =
2347 ((tolerance / 4) > 500) ? 500 : tolerance / 4;
2348 l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4);
2349}
2350
a18c4bc3 2351void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
b97bf3fd
PL
2352{
2353 /* Data messages from this node, inclusive FIRST_FRAGM */
06d82c91
AS
2354 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window;
2355 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE] = (window / 3) * 4;
2356 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE] = (window / 3) * 5;
2357 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE] = (window / 3) * 6;
b97bf3fd 2358 /* Transiting data messages,inclusive FIRST_FRAGM */
06d82c91
AS
2359 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE + 4] = 300;
2360 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE + 4] = 600;
2361 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900;
2362 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200;
b97bf3fd 2363 l_ptr->queue_limit[CONN_MANAGER] = 1200;
b97bf3fd
PL
2364 l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500;
2365 l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000;
2366 /* FRAGMENT and LAST_FRAGMENT packets */
2367 l_ptr->queue_limit[MSG_FRAGMENTER] = 4000;
2368}
2369
2370/**
2371 * link_find_link - locate link by name
2c53040f
BH
2372 * @name: ptr to link name string
2373 * @node: ptr to area to be filled with ptr to associated node
c4307285 2374 *
4323add6 2375 * Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted;
b97bf3fd 2376 * this also prevents link deletion.
c4307285 2377 *
b97bf3fd
PL
2378 * Returns pointer to link (or 0 if invalid link name).
2379 */
a18c4bc3
PG
2380static struct tipc_link *link_find_link(const char *name,
2381 struct tipc_node **node)
b97bf3fd 2382{
a18c4bc3 2383 struct tipc_link *l_ptr;
bbfbe47c
EH
2384 struct tipc_node *n_ptr;
2385 int i;
b97bf3fd 2386
bbfbe47c
EH
2387 list_for_each_entry(n_ptr, &tipc_node_list, list) {
2388 for (i = 0; i < MAX_BEARERS; i++) {
2389 l_ptr = n_ptr->links[i];
2390 if (l_ptr && !strcmp(l_ptr->name, name))
2391 goto found;
2392 }
2393 }
2394 l_ptr = NULL;
2395 n_ptr = NULL;
2396found:
2397 *node = n_ptr;
b97bf3fd
PL
2398 return l_ptr;
2399}
2400
5c216e1d
AS
2401/**
2402 * link_value_is_valid -- validate proposed link tolerance/priority/window
2403 *
2c53040f
BH
2404 * @cmd: value type (TIPC_CMD_SET_LINK_*)
2405 * @new_value: the new value
5c216e1d
AS
2406 *
2407 * Returns 1 if value is within range, 0 if not.
2408 */
5c216e1d
AS
2409static int link_value_is_valid(u16 cmd, u32 new_value)
2410{
2411 switch (cmd) {
2412 case TIPC_CMD_SET_LINK_TOL:
2413 return (new_value >= TIPC_MIN_LINK_TOL) &&
2414 (new_value <= TIPC_MAX_LINK_TOL);
2415 case TIPC_CMD_SET_LINK_PRI:
2416 return (new_value <= TIPC_MAX_LINK_PRI);
2417 case TIPC_CMD_SET_LINK_WINDOW:
2418 return (new_value >= TIPC_MIN_LINK_WIN) &&
2419 (new_value <= TIPC_MAX_LINK_WIN);
2420 }
2421 return 0;
2422}
2423
5c216e1d
AS
2424/**
2425 * link_cmd_set_value - change priority/tolerance/window for link/bearer/media
2c53040f
BH
2426 * @name: ptr to link, bearer, or media name
2427 * @new_value: new value of link, bearer, or media setting
2428 * @cmd: which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*)
5c216e1d
AS
2429 *
2430 * Caller must hold 'tipc_net_lock' to ensure link/bearer/media is not deleted.
2431 *
2432 * Returns 0 if value updated and negative value on error.
2433 */
5c216e1d
AS
2434static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
2435{
2436 struct tipc_node *node;
a18c4bc3 2437 struct tipc_link *l_ptr;
5c216e1d 2438 struct tipc_bearer *b_ptr;
358a0d1c 2439 struct tipc_media *m_ptr;
636c0371 2440 int res = 0;
5c216e1d
AS
2441
2442 l_ptr = link_find_link(name, &node);
2443 if (l_ptr) {
2444 /*
2445 * acquire node lock for tipc_link_send_proto_msg().
2446 * see "TIPC locking policy" in net.c.
2447 */
2448 tipc_node_lock(node);
2449 switch (cmd) {
2450 case TIPC_CMD_SET_LINK_TOL:
2451 link_set_supervision_props(l_ptr, new_value);
2452 tipc_link_send_proto_msg(l_ptr,
2453 STATE_MSG, 0, 0, new_value, 0, 0);
2454 break;
2455 case TIPC_CMD_SET_LINK_PRI:
2456 l_ptr->priority = new_value;
2457 tipc_link_send_proto_msg(l_ptr,
2458 STATE_MSG, 0, 0, 0, new_value, 0);
2459 break;
2460 case TIPC_CMD_SET_LINK_WINDOW:
2461 tipc_link_set_queue_limits(l_ptr, new_value);
2462 break;
636c0371
YX
2463 default:
2464 res = -EINVAL;
2465 break;
5c216e1d
AS
2466 }
2467 tipc_node_unlock(node);
636c0371 2468 return res;
5c216e1d
AS
2469 }
2470
2471 b_ptr = tipc_bearer_find(name);
2472 if (b_ptr) {
2473 switch (cmd) {
2474 case TIPC_CMD_SET_LINK_TOL:
2475 b_ptr->tolerance = new_value;
636c0371 2476 break;
5c216e1d
AS
2477 case TIPC_CMD_SET_LINK_PRI:
2478 b_ptr->priority = new_value;
636c0371 2479 break;
5c216e1d
AS
2480 case TIPC_CMD_SET_LINK_WINDOW:
2481 b_ptr->window = new_value;
636c0371
YX
2482 break;
2483 default:
2484 res = -EINVAL;
2485 break;
5c216e1d 2486 }
636c0371 2487 return res;
5c216e1d
AS
2488 }
2489
2490 m_ptr = tipc_media_find(name);
2491 if (!m_ptr)
2492 return -ENODEV;
2493 switch (cmd) {
2494 case TIPC_CMD_SET_LINK_TOL:
2495 m_ptr->tolerance = new_value;
636c0371 2496 break;
5c216e1d
AS
2497 case TIPC_CMD_SET_LINK_PRI:
2498 m_ptr->priority = new_value;
636c0371 2499 break;
5c216e1d
AS
2500 case TIPC_CMD_SET_LINK_WINDOW:
2501 m_ptr->window = new_value;
636c0371
YX
2502 break;
2503 default:
2504 res = -EINVAL;
2505 break;
5c216e1d 2506 }
636c0371 2507 return res;
5c216e1d
AS
2508}
2509
c4307285 2510struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space,
4323add6 2511 u16 cmd)
b97bf3fd
PL
2512{
2513 struct tipc_link_config *args;
c4307285 2514 u32 new_value;
c4307285 2515 int res;
b97bf3fd
PL
2516
2517 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG))
4323add6 2518 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
b97bf3fd
PL
2519
2520 args = (struct tipc_link_config *)TLV_DATA(req_tlv_area);
2521 new_value = ntohl(args->value);
2522
5c216e1d
AS
2523 if (!link_value_is_valid(cmd, new_value))
2524 return tipc_cfg_reply_error_string(
2525 "cannot change, value invalid");
2526
4323add6 2527 if (!strcmp(args->name, tipc_bclink_name)) {
b97bf3fd 2528 if ((cmd == TIPC_CMD_SET_LINK_WINDOW) &&
4323add6
PL
2529 (tipc_bclink_set_queue_limits(new_value) == 0))
2530 return tipc_cfg_reply_none();
c4307285 2531 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
4323add6 2532 " (cannot change setting on broadcast link)");
b97bf3fd
PL
2533 }
2534
4323add6 2535 read_lock_bh(&tipc_net_lock);
5c216e1d 2536 res = link_cmd_set_value(args->name, new_value, cmd);
4323add6 2537 read_unlock_bh(&tipc_net_lock);
b97bf3fd 2538 if (res)
c4307285 2539 return tipc_cfg_reply_error_string("cannot change link setting");
b97bf3fd 2540
4323add6 2541 return tipc_cfg_reply_none();
b97bf3fd
PL
2542}
2543
2544/**
2545 * link_reset_statistics - reset link statistics
2546 * @l_ptr: pointer to link
2547 */
a18c4bc3 2548static void link_reset_statistics(struct tipc_link *l_ptr)
b97bf3fd
PL
2549{
2550 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
2551 l_ptr->stats.sent_info = l_ptr->next_out_no;
2552 l_ptr->stats.recv_info = l_ptr->next_in_no;
2553}
2554
4323add6 2555struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space)
b97bf3fd
PL
2556{
2557 char *link_name;
a18c4bc3 2558 struct tipc_link *l_ptr;
6c00055a 2559 struct tipc_node *node;
b97bf3fd
PL
2560
2561 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
4323add6 2562 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
b97bf3fd
PL
2563
2564 link_name = (char *)TLV_DATA(req_tlv_area);
4323add6
PL
2565 if (!strcmp(link_name, tipc_bclink_name)) {
2566 if (tipc_bclink_reset_stats())
2567 return tipc_cfg_reply_error_string("link not found");
2568 return tipc_cfg_reply_none();
b97bf3fd
PL
2569 }
2570
4323add6 2571 read_lock_bh(&tipc_net_lock);
c4307285 2572 l_ptr = link_find_link(link_name, &node);
b97bf3fd 2573 if (!l_ptr) {
4323add6
PL
2574 read_unlock_bh(&tipc_net_lock);
2575 return tipc_cfg_reply_error_string("link not found");
b97bf3fd
PL
2576 }
2577
4323add6 2578 tipc_node_lock(node);
b97bf3fd 2579 link_reset_statistics(l_ptr);
4323add6
PL
2580 tipc_node_unlock(node);
2581 read_unlock_bh(&tipc_net_lock);
2582 return tipc_cfg_reply_none();
b97bf3fd
PL
2583}
2584
2585/**
2586 * percent - convert count to a percentage of total (rounding up or down)
2587 */
b97bf3fd
PL
2588static u32 percent(u32 count, u32 total)
2589{
2590 return (count * 100 + (total / 2)) / total;
2591}
2592
2593/**
4323add6 2594 * tipc_link_stats - print link statistics
b97bf3fd
PL
2595 * @name: link name
2596 * @buf: print buffer area
2597 * @buf_size: size of print buffer area
c4307285 2598 *
b97bf3fd
PL
2599 * Returns length of print buffer data string (or 0 if error)
2600 */
4323add6 2601static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
b97bf3fd 2602{
dc1aed37
EH
2603 struct tipc_link *l;
2604 struct tipc_stats *s;
6c00055a 2605 struct tipc_node *node;
b97bf3fd
PL
2606 char *status;
2607 u32 profile_total = 0;
dc1aed37 2608 int ret;
b97bf3fd 2609
4323add6
PL
2610 if (!strcmp(name, tipc_bclink_name))
2611 return tipc_bclink_stats(buf, buf_size);
b97bf3fd 2612
4323add6 2613 read_lock_bh(&tipc_net_lock);
dc1aed37
EH
2614 l = link_find_link(name, &node);
2615 if (!l) {
4323add6 2616 read_unlock_bh(&tipc_net_lock);
b97bf3fd
PL
2617 return 0;
2618 }
4323add6 2619 tipc_node_lock(node);
dc1aed37 2620 s = &l->stats;
b97bf3fd 2621
dc1aed37 2622 if (tipc_link_is_active(l))
b97bf3fd 2623 status = "ACTIVE";
dc1aed37 2624 else if (tipc_link_is_up(l))
b97bf3fd
PL
2625 status = "STANDBY";
2626 else
2627 status = "DEFUNCT";
dc1aed37
EH
2628
2629 ret = tipc_snprintf(buf, buf_size, "Link <%s>\n"
2630 " %s MTU:%u Priority:%u Tolerance:%u ms"
2631 " Window:%u packets\n",
2632 l->name, status, l->max_pkt, l->priority,
2633 l->tolerance, l->queue_limit[0]);
2634
2635 ret += tipc_snprintf(buf + ret, buf_size - ret,
2636 " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
2637 l->next_in_no - s->recv_info, s->recv_fragments,
2638 s->recv_fragmented, s->recv_bundles,
2639 s->recv_bundled);
2640
2641 ret += tipc_snprintf(buf + ret, buf_size - ret,
2642 " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
2643 l->next_out_no - s->sent_info, s->sent_fragments,
2644 s->sent_fragmented, s->sent_bundles,
2645 s->sent_bundled);
2646
2647 profile_total = s->msg_length_counts;
b97bf3fd
PL
2648 if (!profile_total)
2649 profile_total = 1;
dc1aed37
EH
2650
2651 ret += tipc_snprintf(buf + ret, buf_size - ret,
2652 " TX profile sample:%u packets average:%u octets\n"
2653 " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% "
2654 "-16384:%u%% -32768:%u%% -66000:%u%%\n",
2655 s->msg_length_counts,
2656 s->msg_lengths_total / profile_total,
2657 percent(s->msg_length_profile[0], profile_total),
2658 percent(s->msg_length_profile[1], profile_total),
2659 percent(s->msg_length_profile[2], profile_total),
2660 percent(s->msg_length_profile[3], profile_total),
2661 percent(s->msg_length_profile[4], profile_total),
2662 percent(s->msg_length_profile[5], profile_total),
2663 percent(s->msg_length_profile[6], profile_total));
2664
2665 ret += tipc_snprintf(buf + ret, buf_size - ret,
2666 " RX states:%u probes:%u naks:%u defs:%u"
2667 " dups:%u\n", s->recv_states, s->recv_probes,
2668 s->recv_nacks, s->deferred_recv, s->duplicates);
2669
2670 ret += tipc_snprintf(buf + ret, buf_size - ret,
2671 " TX states:%u probes:%u naks:%u acks:%u"
2672 " dups:%u\n", s->sent_states, s->sent_probes,
2673 s->sent_nacks, s->sent_acks, s->retransmitted);
2674
2675 ret += tipc_snprintf(buf + ret, buf_size - ret,
3c294cb3
YX
2676 " Congestion link:%u Send queue"
2677 " max:%u avg:%u\n", s->link_congs,
dc1aed37
EH
2678 s->max_queue_sz, s->queue_sz_counts ?
2679 (s->accu_queue_sz / s->queue_sz_counts) : 0);
b97bf3fd 2680
4323add6
PL
2681 tipc_node_unlock(node);
2682 read_unlock_bh(&tipc_net_lock);
dc1aed37 2683 return ret;
b97bf3fd
PL
2684}
2685
4323add6 2686struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space)
b97bf3fd
PL
2687{
2688 struct sk_buff *buf;
2689 struct tlv_desc *rep_tlv;
2690 int str_len;
dc1aed37
EH
2691 int pb_len;
2692 char *pb;
b97bf3fd
PL
2693
2694 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
4323add6 2695 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
b97bf3fd 2696
dc1aed37 2697 buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
b97bf3fd
PL
2698 if (!buf)
2699 return NULL;
2700
2701 rep_tlv = (struct tlv_desc *)buf->data;
dc1aed37
EH
2702 pb = TLV_DATA(rep_tlv);
2703 pb_len = ULTRA_STRING_MAX_LEN;
4323add6 2704 str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area),
dc1aed37 2705 pb, pb_len);
b97bf3fd 2706 if (!str_len) {
5f6d9123 2707 kfree_skb(buf);
c4307285 2708 return tipc_cfg_reply_error_string("link not found");
b97bf3fd 2709 }
dc1aed37 2710 str_len += 1; /* for "\0" */
b97bf3fd
PL
2711 skb_put(buf, TLV_SPACE(str_len));
2712 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
2713
2714 return buf;
2715}
2716
b97bf3fd 2717/**
4323add6 2718 * tipc_link_get_max_pkt - get maximum packet size to use when sending to destination
b97bf3fd
PL
2719 * @dest: network address of destination node
2720 * @selector: used to select from set of active links
c4307285 2721 *
b97bf3fd
PL
2722 * If no active link can be found, uses default maximum packet size.
2723 */
4323add6 2724u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
b97bf3fd 2725{
6c00055a 2726 struct tipc_node *n_ptr;
a18c4bc3 2727 struct tipc_link *l_ptr;
b97bf3fd 2728 u32 res = MAX_PKT_DEFAULT;
c4307285 2729
b97bf3fd
PL
2730 if (dest == tipc_own_addr)
2731 return MAX_MSG_SIZE;
2732
c4307285 2733 read_lock_bh(&tipc_net_lock);
51a8e4de 2734 n_ptr = tipc_node_find(dest);
b97bf3fd 2735 if (n_ptr) {
4323add6 2736 tipc_node_lock(n_ptr);
b97bf3fd
PL
2737 l_ptr = n_ptr->active_links[selector & 1];
2738 if (l_ptr)
15e979da 2739 res = l_ptr->max_pkt;
4323add6 2740 tipc_node_unlock(n_ptr);
b97bf3fd 2741 }
c4307285 2742 read_unlock_bh(&tipc_net_lock);
b97bf3fd
PL
2743 return res;
2744}
2745
a18c4bc3 2746static void link_print(struct tipc_link *l_ptr, const char *str)
b97bf3fd 2747{
5deedde9 2748 pr_info("%s Link %x<%s>:", str, l_ptr->addr, l_ptr->b_ptr->name);
8d64a5ba 2749
b97bf3fd 2750 if (link_working_unknown(l_ptr))
5deedde9 2751 pr_cont(":WU\n");
8d64a5ba 2752 else if (link_reset_reset(l_ptr))
5deedde9 2753 pr_cont(":RR\n");
8d64a5ba 2754 else if (link_reset_unknown(l_ptr))
5deedde9 2755 pr_cont(":RU\n");
8d64a5ba 2756 else if (link_working_working(l_ptr))
5deedde9
PG
2757 pr_cont(":WW\n");
2758 else
2759 pr_cont("\n");
b97bf3fd 2760}