]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - net/tipc/link.c
net: stmmac:sti: Add STi SOC glue driver.
[mirror_ubuntu-zesty-kernel.git] / net / tipc / link.c
CommitLineData
b97bf3fd
PL
1/*
2 * net/tipc/link.c: TIPC link code
c4307285 3 *
170b3927 4 * Copyright (c) 1996-2007, 2012-2014, Ericsson AB
198d73b8 5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
b97bf3fd
PL
6 * All rights reserved.
7 *
9ea1fd3c 8 * Redistribution and use in source and binary forms, with or without
b97bf3fd
PL
9 * modification, are permitted provided that the following conditions are met:
10 *
9ea1fd3c
PL
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
b97bf3fd 19 *
9ea1fd3c
PL
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
b97bf3fd
PL
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
b97bf3fd 38#include "link.h"
b97bf3fd 39#include "port.h"
b97bf3fd 40#include "name_distr.h"
b97bf3fd
PL
41#include "discover.h"
42#include "config.h"
b97bf3fd 43
796c75d0
YX
44#include <linux/pkt_sched.h>
45
2cf8aa19
EH
46/*
47 * Error message prefixes
48 */
49static const char *link_co_err = "Link changeover error, ";
50static const char *link_rst_msg = "Resetting link ";
51static const char *link_unk_evt = "Unknown link event ";
b97bf3fd 52
a686e685
AS
53/*
54 * Out-of-range value for link session numbers
55 */
a686e685
AS
56#define INVALID_SESSION 0x10000
57
c4307285
YH
58/*
59 * Link state events:
b97bf3fd 60 */
b97bf3fd
PL
61#define STARTING_EVT 856384768 /* link processing trigger */
62#define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */
63#define TIMEOUT_EVT 560817u /* link timer expired */
64
c4307285
YH
65/*
66 * The following two 'message types' is really just implementation
67 * data conveniently stored in the message header.
b97bf3fd
PL
68 * They must not be considered part of the protocol
69 */
70#define OPEN_MSG 0
71#define CLOSED_MSG 1
72
c4307285 73/*
b97bf3fd
PL
74 * State value stored in 'exp_msg_count'
75 */
b97bf3fd
PL
76#define START_CHANGEOVER 100000u
77
a18c4bc3 78static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
b97bf3fd 79 struct sk_buff *buf);
a18c4bc3 80static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf);
170b3927
JPM
81static int tipc_link_tunnel_rcv(struct tipc_link **l_ptr,
82 struct sk_buff **buf);
a18c4bc3 83static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance);
23dd4cce 84static int link_send_sections_long(struct tipc_port *sender,
b97bf3fd 85 struct iovec const *msg_sect,
9446b87a 86 unsigned int len, u32 destnode);
a18c4bc3
PG
87static void link_state_event(struct tipc_link *l_ptr, u32 event);
88static void link_reset_statistics(struct tipc_link *l_ptr);
89static void link_print(struct tipc_link *l_ptr, const char *str);
a18c4bc3 90static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf);
c64f7a6a
JM
91static void tipc_link_send_sync(struct tipc_link *l);
92static void tipc_link_recv_sync(struct tipc_node *n, struct sk_buff *buf);
31e3c3f6 93
b97bf3fd 94/*
05790c64 95 * Simple link routines
b97bf3fd 96 */
05790c64 97static unsigned int align(unsigned int i)
b97bf3fd
PL
98{
99 return (i + 3) & ~3u;
100}
101
a18c4bc3 102static void link_init_max_pkt(struct tipc_link *l_ptr)
b97bf3fd
PL
103{
104 u32 max_pkt;
c4307285 105
2d627b92 106 max_pkt = (l_ptr->b_ptr->mtu & ~3);
b97bf3fd
PL
107 if (max_pkt > MAX_MSG_SIZE)
108 max_pkt = MAX_MSG_SIZE;
109
c4307285 110 l_ptr->max_pkt_target = max_pkt;
b97bf3fd
PL
111 if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
112 l_ptr->max_pkt = l_ptr->max_pkt_target;
c4307285 113 else
b97bf3fd
PL
114 l_ptr->max_pkt = MAX_PKT_DEFAULT;
115
c4307285 116 l_ptr->max_pkt_probes = 0;
b97bf3fd
PL
117}
118
a18c4bc3 119static u32 link_next_sent(struct tipc_link *l_ptr)
b97bf3fd
PL
120{
121 if (l_ptr->next_out)
f905730c 122 return buf_seqno(l_ptr->next_out);
b97bf3fd
PL
123 return mod(l_ptr->next_out_no);
124}
125
a18c4bc3 126static u32 link_last_sent(struct tipc_link *l_ptr)
b97bf3fd
PL
127{
128 return mod(link_next_sent(l_ptr) - 1);
129}
130
131/*
05790c64 132 * Simple non-static link routines (i.e. referenced outside this file)
b97bf3fd 133 */
a18c4bc3 134int tipc_link_is_up(struct tipc_link *l_ptr)
b97bf3fd
PL
135{
136 if (!l_ptr)
137 return 0;
a02cec21 138 return link_working_working(l_ptr) || link_working_unknown(l_ptr);
b97bf3fd
PL
139}
140
a18c4bc3 141int tipc_link_is_active(struct tipc_link *l_ptr)
b97bf3fd 142{
a02cec21
ED
143 return (l_ptr->owner->active_links[0] == l_ptr) ||
144 (l_ptr->owner->active_links[1] == l_ptr);
b97bf3fd
PL
145}
146
b97bf3fd
PL
147/**
148 * link_timeout - handle expiration of link timer
149 * @l_ptr: pointer to link
c4307285 150 *
4323add6
PL
151 * This routine must not grab "tipc_net_lock" to avoid a potential deadlock conflict
152 * with tipc_link_delete(). (There is no risk that the node will be deleted by
153 * another thread because tipc_link_delete() always cancels the link timer before
154 * tipc_node_delete() is called.)
b97bf3fd 155 */
a18c4bc3 156static void link_timeout(struct tipc_link *l_ptr)
b97bf3fd 157{
4323add6 158 tipc_node_lock(l_ptr->owner);
b97bf3fd
PL
159
160 /* update counters used in statistical profiling of send traffic */
b97bf3fd
PL
161 l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size;
162 l_ptr->stats.queue_sz_counts++;
163
b97bf3fd
PL
164 if (l_ptr->first_out) {
165 struct tipc_msg *msg = buf_msg(l_ptr->first_out);
166 u32 length = msg_size(msg);
167
f64f9e71
JP
168 if ((msg_user(msg) == MSG_FRAGMENTER) &&
169 (msg_type(msg) == FIRST_FRAGMENT)) {
b97bf3fd
PL
170 length = msg_size(msg_get_wrapped(msg));
171 }
172 if (length) {
173 l_ptr->stats.msg_lengths_total += length;
174 l_ptr->stats.msg_length_counts++;
175 if (length <= 64)
176 l_ptr->stats.msg_length_profile[0]++;
177 else if (length <= 256)
178 l_ptr->stats.msg_length_profile[1]++;
179 else if (length <= 1024)
180 l_ptr->stats.msg_length_profile[2]++;
181 else if (length <= 4096)
182 l_ptr->stats.msg_length_profile[3]++;
183 else if (length <= 16384)
184 l_ptr->stats.msg_length_profile[4]++;
185 else if (length <= 32768)
186 l_ptr->stats.msg_length_profile[5]++;
187 else
188 l_ptr->stats.msg_length_profile[6]++;
189 }
190 }
191
192 /* do all other link processing performed on a periodic basis */
b97bf3fd
PL
193
194 link_state_event(l_ptr, TIMEOUT_EVT);
195
196 if (l_ptr->next_out)
4323add6 197 tipc_link_push_queue(l_ptr);
b97bf3fd 198
4323add6 199 tipc_node_unlock(l_ptr->owner);
b97bf3fd
PL
200}
201
a18c4bc3 202static void link_set_timer(struct tipc_link *l_ptr, u32 time)
b97bf3fd
PL
203{
204 k_start_timer(&l_ptr->timer, time);
205}
206
207/**
4323add6 208 * tipc_link_create - create a new link
37b9c08a 209 * @n_ptr: pointer to associated node
b97bf3fd 210 * @b_ptr: pointer to associated bearer
b97bf3fd 211 * @media_addr: media address to use when sending messages over link
c4307285 212 *
b97bf3fd
PL
213 * Returns pointer to link.
214 */
a18c4bc3 215struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
37b9c08a 216 struct tipc_bearer *b_ptr,
4323add6 217 const struct tipc_media_addr *media_addr)
b97bf3fd 218{
a18c4bc3 219 struct tipc_link *l_ptr;
b97bf3fd
PL
220 struct tipc_msg *msg;
221 char *if_name;
37b9c08a
AS
222 char addr_string[16];
223 u32 peer = n_ptr->addr;
224
225 if (n_ptr->link_cnt >= 2) {
226 tipc_addr_string_fill(addr_string, n_ptr->addr);
2cf8aa19 227 pr_err("Attempt to establish third link to %s\n", addr_string);
37b9c08a
AS
228 return NULL;
229 }
230
231 if (n_ptr->links[b_ptr->identity]) {
232 tipc_addr_string_fill(addr_string, n_ptr->addr);
2cf8aa19
EH
233 pr_err("Attempt to establish second link on <%s> to %s\n",
234 b_ptr->name, addr_string);
37b9c08a
AS
235 return NULL;
236 }
b97bf3fd 237
0da974f4 238 l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
b97bf3fd 239 if (!l_ptr) {
2cf8aa19 240 pr_warn("Link creation failed, no memory\n");
b97bf3fd
PL
241 return NULL;
242 }
b97bf3fd
PL
243
244 l_ptr->addr = peer;
2d627b92 245 if_name = strchr(b_ptr->name, ':') + 1;
062b4c99 246 sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
b97bf3fd 247 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
c4307285 248 tipc_node(tipc_own_addr),
b97bf3fd
PL
249 if_name,
250 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
062b4c99 251 /* note: peer i/f name is updated by reset/activate message */
b97bf3fd 252 memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
37b9c08a 253 l_ptr->owner = n_ptr;
b97bf3fd 254 l_ptr->checkpoint = 1;
f882cb76 255 l_ptr->peer_session = INVALID_SESSION;
b97bf3fd 256 l_ptr->b_ptr = b_ptr;
5c216e1d 257 link_set_supervision_props(l_ptr, b_ptr->tolerance);
b97bf3fd
PL
258 l_ptr->state = RESET_UNKNOWN;
259
260 l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
261 msg = l_ptr->pmsg;
c68ca7b7 262 tipc_msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr);
b97bf3fd 263 msg_set_size(msg, sizeof(l_ptr->proto_msg));
a686e685 264 msg_set_session(msg, (tipc_random & 0xffff));
b97bf3fd
PL
265 msg_set_bearer_id(msg, b_ptr->identity);
266 strcpy((char *)msg_data(msg), if_name);
267
268 l_ptr->priority = b_ptr->priority;
5c216e1d 269 tipc_link_set_queue_limits(l_ptr, b_ptr->window);
b97bf3fd
PL
270
271 link_init_max_pkt(l_ptr);
272
273 l_ptr->next_out_no = 1;
274 INIT_LIST_HEAD(&l_ptr->waiting_ports);
275
276 link_reset_statistics(l_ptr);
277
37b9c08a 278 tipc_node_attach_link(n_ptr, l_ptr);
b97bf3fd 279
170b3927
JPM
280 k_init_timer(&l_ptr->timer, (Handler)link_timeout,
281 (unsigned long)l_ptr);
94571065 282 list_add_tail(&l_ptr->link_list, &b_ptr->links);
581465fa
JPM
283
284 link_state_event(l_ptr, STARTING_EVT);
b97bf3fd 285
b97bf3fd
PL
286 return l_ptr;
287}
288
c4307285 289/**
4323add6 290 * tipc_link_delete - delete a link
b97bf3fd 291 * @l_ptr: pointer to link
c4307285 292 *
4323add6 293 * Note: 'tipc_net_lock' is write_locked, bearer is locked.
b97bf3fd 294 * This routine must not grab the node lock until after link timer cancellation
c4307285 295 * to avoid a potential deadlock situation.
b97bf3fd 296 */
a18c4bc3 297void tipc_link_delete(struct tipc_link *l_ptr)
b97bf3fd
PL
298{
299 if (!l_ptr) {
2cf8aa19 300 pr_err("Attempt to delete non-existent link\n");
b97bf3fd
PL
301 return;
302 }
303
b97bf3fd 304 k_cancel_timer(&l_ptr->timer);
c4307285 305
4323add6
PL
306 tipc_node_lock(l_ptr->owner);
307 tipc_link_reset(l_ptr);
308 tipc_node_detach_link(l_ptr->owner, l_ptr);
581465fa 309 tipc_link_purge_queues(l_ptr);
b97bf3fd 310 list_del_init(&l_ptr->link_list);
4323add6 311 tipc_node_unlock(l_ptr->owner);
b97bf3fd
PL
312 k_term_timer(&l_ptr->timer);
313 kfree(l_ptr);
314}
315
b97bf3fd
PL
316
317/**
c4307285 318 * link_schedule_port - schedule port for deferred sending
b97bf3fd
PL
319 * @l_ptr: pointer to link
320 * @origport: reference to sending port
321 * @sz: amount of data to be sent
c4307285
YH
322 *
323 * Schedules port for renewed sending of messages after link congestion
b97bf3fd
PL
324 * has abated.
325 */
a18c4bc3 326static int link_schedule_port(struct tipc_link *l_ptr, u32 origport, u32 sz)
b97bf3fd 327{
23dd4cce 328 struct tipc_port *p_ptr;
b97bf3fd 329
4323add6
PL
330 spin_lock_bh(&tipc_port_list_lock);
331 p_ptr = tipc_port_lock(origport);
b97bf3fd
PL
332 if (p_ptr) {
333 if (!p_ptr->wakeup)
334 goto exit;
335 if (!list_empty(&p_ptr->wait_list))
336 goto exit;
23dd4cce 337 p_ptr->congested = 1;
15e979da 338 p_ptr->waiting_pkts = 1 + ((sz - 1) / l_ptr->max_pkt);
b97bf3fd
PL
339 list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports);
340 l_ptr->stats.link_congs++;
341exit:
4323add6 342 tipc_port_unlock(p_ptr);
b97bf3fd 343 }
4323add6 344 spin_unlock_bh(&tipc_port_list_lock);
b97bf3fd
PL
345 return -ELINKCONG;
346}
347
a18c4bc3 348void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all)
b97bf3fd 349{
23dd4cce
AS
350 struct tipc_port *p_ptr;
351 struct tipc_port *temp_p_ptr;
b97bf3fd
PL
352 int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size;
353
354 if (all)
355 win = 100000;
356 if (win <= 0)
357 return;
4323add6 358 if (!spin_trylock_bh(&tipc_port_list_lock))
b97bf3fd
PL
359 return;
360 if (link_congested(l_ptr))
361 goto exit;
c4307285 362 list_for_each_entry_safe(p_ptr, temp_p_ptr, &l_ptr->waiting_ports,
b97bf3fd
PL
363 wait_list) {
364 if (win <= 0)
365 break;
366 list_del_init(&p_ptr->wait_list);
23dd4cce
AS
367 spin_lock_bh(p_ptr->lock);
368 p_ptr->congested = 0;
369 p_ptr->wakeup(p_ptr);
b97bf3fd 370 win -= p_ptr->waiting_pkts;
23dd4cce 371 spin_unlock_bh(p_ptr->lock);
b97bf3fd
PL
372 }
373
374exit:
4323add6 375 spin_unlock_bh(&tipc_port_list_lock);
b97bf3fd
PL
376}
377
c4307285 378/**
b97bf3fd
PL
379 * link_release_outqueue - purge link's outbound message queue
380 * @l_ptr: pointer to link
381 */
a18c4bc3 382static void link_release_outqueue(struct tipc_link *l_ptr)
b97bf3fd 383{
d77b3831 384 kfree_skb_list(l_ptr->first_out);
b97bf3fd
PL
385 l_ptr->first_out = NULL;
386 l_ptr->out_queue_size = 0;
387}
388
389/**
4323add6 390 * tipc_link_reset_fragments - purge link's inbound message fragments queue
b97bf3fd
PL
391 * @l_ptr: pointer to link
392 */
a18c4bc3 393void tipc_link_reset_fragments(struct tipc_link *l_ptr)
b97bf3fd 394{
40ba3cdf
EH
395 kfree_skb(l_ptr->reasm_head);
396 l_ptr->reasm_head = NULL;
397 l_ptr->reasm_tail = NULL;
b97bf3fd
PL
398}
399
c4307285 400/**
581465fa 401 * tipc_link_purge_queues - purge all pkt queues associated with link
b97bf3fd
PL
402 * @l_ptr: pointer to link
403 */
581465fa 404void tipc_link_purge_queues(struct tipc_link *l_ptr)
b97bf3fd 405{
d77b3831
YX
406 kfree_skb_list(l_ptr->oldest_deferred_in);
407 kfree_skb_list(l_ptr->first_out);
4323add6 408 tipc_link_reset_fragments(l_ptr);
5f6d9123 409 kfree_skb(l_ptr->proto_msg_queue);
b97bf3fd
PL
410 l_ptr->proto_msg_queue = NULL;
411}
412
a18c4bc3 413void tipc_link_reset(struct tipc_link *l_ptr)
b97bf3fd 414{
b97bf3fd
PL
415 u32 prev_state = l_ptr->state;
416 u32 checkpoint = l_ptr->next_in_no;
5392d646 417 int was_active_link = tipc_link_is_active(l_ptr);
c4307285 418
a686e685 419 msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
b97bf3fd 420
a686e685
AS
421 /* Link is down, accept any session */
422 l_ptr->peer_session = INVALID_SESSION;
b97bf3fd 423
c4307285 424 /* Prepare for max packet size negotiation */
b97bf3fd 425 link_init_max_pkt(l_ptr);
c4307285 426
b97bf3fd 427 l_ptr->state = RESET_UNKNOWN;
b97bf3fd
PL
428
429 if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
430 return;
431
4323add6
PL
432 tipc_node_link_down(l_ptr->owner, l_ptr);
433 tipc_bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr);
7368ddf1 434
b9d4c339 435 if (was_active_link && tipc_node_active_links(l_ptr->owner)) {
b97bf3fd
PL
436 l_ptr->reset_checkpoint = checkpoint;
437 l_ptr->exp_msg_count = START_CHANGEOVER;
438 }
439
440 /* Clean up all queues: */
b97bf3fd 441 link_release_outqueue(l_ptr);
5f6d9123 442 kfree_skb(l_ptr->proto_msg_queue);
b97bf3fd 443 l_ptr->proto_msg_queue = NULL;
d77b3831 444 kfree_skb_list(l_ptr->oldest_deferred_in);
b97bf3fd 445 if (!list_empty(&l_ptr->waiting_ports))
4323add6 446 tipc_link_wakeup_ports(l_ptr, 1);
b97bf3fd
PL
447
448 l_ptr->retransm_queue_head = 0;
449 l_ptr->retransm_queue_size = 0;
450 l_ptr->last_out = NULL;
451 l_ptr->first_out = NULL;
452 l_ptr->next_out = NULL;
453 l_ptr->unacked_window = 0;
454 l_ptr->checkpoint = 1;
455 l_ptr->next_out_no = 1;
456 l_ptr->deferred_inqueue_sz = 0;
457 l_ptr->oldest_deferred_in = NULL;
458 l_ptr->newest_deferred_in = NULL;
459 l_ptr->fsm_msg_cnt = 0;
460 l_ptr->stale_count = 0;
461 link_reset_statistics(l_ptr);
b97bf3fd
PL
462}
463
464
a18c4bc3 465static void link_activate(struct tipc_link *l_ptr)
b97bf3fd 466{
5392d646 467 l_ptr->next_in_no = l_ptr->stats.recv_info = 1;
4323add6
PL
468 tipc_node_link_up(l_ptr->owner, l_ptr);
469 tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr);
b97bf3fd
PL
470}
471
472/**
473 * link_state_event - link finite state machine
474 * @l_ptr: pointer to link
475 * @event: state machine event to process
476 */
95c96174 477static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
b97bf3fd 478{
a18c4bc3 479 struct tipc_link *other;
b97bf3fd
PL
480 u32 cont_intv = l_ptr->continuity_interval;
481
482 if (!l_ptr->started && (event != STARTING_EVT))
483 return; /* Not yet. */
484
77a7e07a
YX
485 /* Check whether changeover is going on */
486 if (l_ptr->exp_msg_count) {
a016892c 487 if (event == TIMEOUT_EVT)
b97bf3fd 488 link_set_timer(l_ptr, cont_intv);
77a7e07a 489 return;
b97bf3fd 490 }
b97bf3fd
PL
491
492 switch (l_ptr->state) {
493 case WORKING_WORKING:
b97bf3fd
PL
494 switch (event) {
495 case TRAFFIC_MSG_EVT:
b97bf3fd 496 case ACTIVATE_MSG:
b97bf3fd
PL
497 break;
498 case TIMEOUT_EVT:
b97bf3fd
PL
499 if (l_ptr->next_in_no != l_ptr->checkpoint) {
500 l_ptr->checkpoint = l_ptr->next_in_no;
4323add6 501 if (tipc_bclink_acks_missing(l_ptr->owner)) {
c4307285 502 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
4323add6 503 0, 0, 0, 0, 0);
b97bf3fd
PL
504 l_ptr->fsm_msg_cnt++;
505 } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
c4307285 506 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
4323add6 507 1, 0, 0, 0, 0);
b97bf3fd
PL
508 l_ptr->fsm_msg_cnt++;
509 }
510 link_set_timer(l_ptr, cont_intv);
511 break;
512 }
b97bf3fd
PL
513 l_ptr->state = WORKING_UNKNOWN;
514 l_ptr->fsm_msg_cnt = 0;
4323add6 515 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
b97bf3fd
PL
516 l_ptr->fsm_msg_cnt++;
517 link_set_timer(l_ptr, cont_intv / 4);
518 break;
519 case RESET_MSG:
2cf8aa19
EH
520 pr_info("%s<%s>, requested by peer\n", link_rst_msg,
521 l_ptr->name);
4323add6 522 tipc_link_reset(l_ptr);
b97bf3fd
PL
523 l_ptr->state = RESET_RESET;
524 l_ptr->fsm_msg_cnt = 0;
4323add6 525 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
b97bf3fd
PL
526 l_ptr->fsm_msg_cnt++;
527 link_set_timer(l_ptr, cont_intv);
528 break;
529 default:
2cf8aa19 530 pr_err("%s%u in WW state\n", link_unk_evt, event);
b97bf3fd
PL
531 }
532 break;
533 case WORKING_UNKNOWN:
b97bf3fd
PL
534 switch (event) {
535 case TRAFFIC_MSG_EVT:
b97bf3fd 536 case ACTIVATE_MSG:
b97bf3fd
PL
537 l_ptr->state = WORKING_WORKING;
538 l_ptr->fsm_msg_cnt = 0;
539 link_set_timer(l_ptr, cont_intv);
540 break;
541 case RESET_MSG:
2cf8aa19
EH
542 pr_info("%s<%s>, requested by peer while probing\n",
543 link_rst_msg, l_ptr->name);
4323add6 544 tipc_link_reset(l_ptr);
b97bf3fd
PL
545 l_ptr->state = RESET_RESET;
546 l_ptr->fsm_msg_cnt = 0;
4323add6 547 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
b97bf3fd
PL
548 l_ptr->fsm_msg_cnt++;
549 link_set_timer(l_ptr, cont_intv);
550 break;
551 case TIMEOUT_EVT:
b97bf3fd 552 if (l_ptr->next_in_no != l_ptr->checkpoint) {
b97bf3fd
PL
553 l_ptr->state = WORKING_WORKING;
554 l_ptr->fsm_msg_cnt = 0;
555 l_ptr->checkpoint = l_ptr->next_in_no;
4323add6
PL
556 if (tipc_bclink_acks_missing(l_ptr->owner)) {
557 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
558 0, 0, 0, 0, 0);
b97bf3fd
PL
559 l_ptr->fsm_msg_cnt++;
560 }
561 link_set_timer(l_ptr, cont_intv);
562 } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
c4307285 563 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
4323add6 564 1, 0, 0, 0, 0);
b97bf3fd
PL
565 l_ptr->fsm_msg_cnt++;
566 link_set_timer(l_ptr, cont_intv / 4);
567 } else { /* Link has failed */
2cf8aa19
EH
568 pr_warn("%s<%s>, peer not responding\n",
569 link_rst_msg, l_ptr->name);
4323add6 570 tipc_link_reset(l_ptr);
b97bf3fd
PL
571 l_ptr->state = RESET_UNKNOWN;
572 l_ptr->fsm_msg_cnt = 0;
4323add6
PL
573 tipc_link_send_proto_msg(l_ptr, RESET_MSG,
574 0, 0, 0, 0, 0);
b97bf3fd
PL
575 l_ptr->fsm_msg_cnt++;
576 link_set_timer(l_ptr, cont_intv);
577 }
578 break;
579 default:
2cf8aa19 580 pr_err("%s%u in WU state\n", link_unk_evt, event);
b97bf3fd
PL
581 }
582 break;
583 case RESET_UNKNOWN:
b97bf3fd
PL
584 switch (event) {
585 case TRAFFIC_MSG_EVT:
b97bf3fd
PL
586 break;
587 case ACTIVATE_MSG:
588 other = l_ptr->owner->active_links[0];
8d64a5ba 589 if (other && link_working_unknown(other))
b97bf3fd 590 break;
b97bf3fd
PL
591 l_ptr->state = WORKING_WORKING;
592 l_ptr->fsm_msg_cnt = 0;
593 link_activate(l_ptr);
4323add6 594 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
b97bf3fd 595 l_ptr->fsm_msg_cnt++;
c64f7a6a
JM
596 if (l_ptr->owner->working_links == 1)
597 tipc_link_send_sync(l_ptr);
b97bf3fd
PL
598 link_set_timer(l_ptr, cont_intv);
599 break;
600 case RESET_MSG:
b97bf3fd
PL
601 l_ptr->state = RESET_RESET;
602 l_ptr->fsm_msg_cnt = 0;
4323add6 603 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0);
b97bf3fd
PL
604 l_ptr->fsm_msg_cnt++;
605 link_set_timer(l_ptr, cont_intv);
606 break;
607 case STARTING_EVT:
b97bf3fd
PL
608 l_ptr->started = 1;
609 /* fall through */
610 case TIMEOUT_EVT:
4323add6 611 tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
b97bf3fd
PL
612 l_ptr->fsm_msg_cnt++;
613 link_set_timer(l_ptr, cont_intv);
614 break;
615 default:
2cf8aa19 616 pr_err("%s%u in RU state\n", link_unk_evt, event);
b97bf3fd
PL
617 }
618 break;
619 case RESET_RESET:
b97bf3fd
PL
620 switch (event) {
621 case TRAFFIC_MSG_EVT:
b97bf3fd
PL
622 case ACTIVATE_MSG:
623 other = l_ptr->owner->active_links[0];
8d64a5ba 624 if (other && link_working_unknown(other))
b97bf3fd 625 break;
b97bf3fd
PL
626 l_ptr->state = WORKING_WORKING;
627 l_ptr->fsm_msg_cnt = 0;
628 link_activate(l_ptr);
4323add6 629 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
b97bf3fd 630 l_ptr->fsm_msg_cnt++;
c64f7a6a
JM
631 if (l_ptr->owner->working_links == 1)
632 tipc_link_send_sync(l_ptr);
b97bf3fd
PL
633 link_set_timer(l_ptr, cont_intv);
634 break;
635 case RESET_MSG:
b97bf3fd
PL
636 break;
637 case TIMEOUT_EVT:
4323add6 638 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
b97bf3fd
PL
639 l_ptr->fsm_msg_cnt++;
640 link_set_timer(l_ptr, cont_intv);
b97bf3fd
PL
641 break;
642 default:
2cf8aa19 643 pr_err("%s%u in RR state\n", link_unk_evt, event);
b97bf3fd
PL
644 }
645 break;
646 default:
2cf8aa19 647 pr_err("Unknown link state %u/%u\n", l_ptr->state, event);
b97bf3fd
PL
648 }
649}
650
651/*
652 * link_bundle_buf(): Append contents of a buffer to
c4307285 653 * the tail of an existing one.
b97bf3fd 654 */
ae8509c4 655static int link_bundle_buf(struct tipc_link *l_ptr, struct sk_buff *bundler,
b97bf3fd
PL
656 struct sk_buff *buf)
657{
658 struct tipc_msg *bundler_msg = buf_msg(bundler);
659 struct tipc_msg *msg = buf_msg(buf);
660 u32 size = msg_size(msg);
e49060c7
AS
661 u32 bundle_size = msg_size(bundler_msg);
662 u32 to_pos = align(bundle_size);
663 u32 pad = to_pos - bundle_size;
b97bf3fd
PL
664
665 if (msg_user(bundler_msg) != MSG_BUNDLER)
666 return 0;
667 if (msg_type(bundler_msg) != OPEN_MSG)
668 return 0;
e49060c7 669 if (skb_tailroom(bundler) < (pad + size))
b97bf3fd 670 return 0;
15e979da 671 if (l_ptr->max_pkt < (to_pos + size))
863fae66 672 return 0;
b97bf3fd 673
e49060c7 674 skb_put(bundler, pad + size);
27d7ff46 675 skb_copy_to_linear_data_offset(bundler, to_pos, buf->data, size);
b97bf3fd
PL
676 msg_set_size(bundler_msg, to_pos + size);
677 msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1);
5f6d9123 678 kfree_skb(buf);
b97bf3fd
PL
679 l_ptr->stats.sent_bundled++;
680 return 1;
681}
682
a18c4bc3 683static void link_add_to_outqueue(struct tipc_link *l_ptr,
05790c64
SR
684 struct sk_buff *buf,
685 struct tipc_msg *msg)
b97bf3fd
PL
686{
687 u32 ack = mod(l_ptr->next_in_no - 1);
688 u32 seqno = mod(l_ptr->next_out_no++);
689
690 msg_set_word(msg, 2, ((ack << 16) | seqno));
691 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
692 buf->next = NULL;
693 if (l_ptr->first_out) {
694 l_ptr->last_out->next = buf;
695 l_ptr->last_out = buf;
696 } else
697 l_ptr->first_out = l_ptr->last_out = buf;
9bd80b60 698
b97bf3fd 699 l_ptr->out_queue_size++;
9bd80b60
AS
700 if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz)
701 l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
b97bf3fd
PL
702}
703
a18c4bc3 704static void link_add_chain_to_outqueue(struct tipc_link *l_ptr,
dc63d91e
AS
705 struct sk_buff *buf_chain,
706 u32 long_msgno)
707{
708 struct sk_buff *buf;
709 struct tipc_msg *msg;
710
711 if (!l_ptr->next_out)
712 l_ptr->next_out = buf_chain;
713 while (buf_chain) {
714 buf = buf_chain;
715 buf_chain = buf_chain->next;
716
717 msg = buf_msg(buf);
718 msg_set_long_msgno(msg, long_msgno);
719 link_add_to_outqueue(l_ptr, buf, msg);
720 }
721}
722
c4307285
YH
723/*
724 * tipc_link_send_buf() is the 'full path' for messages, called from
b97bf3fd
PL
725 * inside TIPC when the 'fast path' in tipc_send_buf
726 * has failed, and from link_send()
727 */
a18c4bc3 728int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
b97bf3fd
PL
729{
730 struct tipc_msg *msg = buf_msg(buf);
731 u32 size = msg_size(msg);
732 u32 dsz = msg_data_sz(msg);
733 u32 queue_size = l_ptr->out_queue_size;
c68ca7b7 734 u32 imp = tipc_msg_tot_importance(msg);
b97bf3fd 735 u32 queue_limit = l_ptr->queue_limit[imp];
15e979da 736 u32 max_packet = l_ptr->max_pkt;
b97bf3fd 737
b97bf3fd 738 /* Match msg importance against queue limits: */
b97bf3fd
PL
739 if (unlikely(queue_size >= queue_limit)) {
740 if (imp <= TIPC_CRITICAL_IMPORTANCE) {
bebc55ae 741 link_schedule_port(l_ptr, msg_origport(msg), size);
5f6d9123 742 kfree_skb(buf);
bebc55ae 743 return -ELINKCONG;
b97bf3fd 744 }
5f6d9123 745 kfree_skb(buf);
b97bf3fd 746 if (imp > CONN_MANAGER) {
2cf8aa19
EH
747 pr_warn("%s<%s>, send queue full", link_rst_msg,
748 l_ptr->name);
4323add6 749 tipc_link_reset(l_ptr);
b97bf3fd
PL
750 }
751 return dsz;
752 }
753
754 /* Fragmentation needed ? */
b97bf3fd 755 if (size > max_packet)
31e3c3f6 756 return link_send_long_buf(l_ptr, buf);
b97bf3fd 757
617d3c7a 758 /* Packet can be queued or sent. */
512137ee 759 if (likely(!link_congested(l_ptr))) {
b97bf3fd
PL
760 link_add_to_outqueue(l_ptr, buf, msg);
761
3c294cb3
YX
762 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
763 l_ptr->unacked_window = 0;
b97bf3fd
PL
764 return dsz;
765 }
617d3c7a 766 /* Congestion: can message be bundled ? */
b97bf3fd
PL
767 if ((msg_user(msg) != CHANGEOVER_PROTOCOL) &&
768 (msg_user(msg) != MSG_FRAGMENTER)) {
769
770 /* Try adding message to an existing bundle */
c4307285 771 if (l_ptr->next_out &&
3c294cb3 772 link_bundle_buf(l_ptr, l_ptr->last_out, buf))
b97bf3fd 773 return dsz;
b97bf3fd
PL
774
775 /* Try creating a new bundle */
b97bf3fd 776 if (size <= max_packet * 2 / 3) {
31e3c3f6 777 struct sk_buff *bundler = tipc_buf_acquire(max_packet);
b97bf3fd
PL
778 struct tipc_msg bundler_hdr;
779
780 if (bundler) {
c68ca7b7 781 tipc_msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG,
75715217 782 INT_H_SIZE, l_ptr->addr);
27d7ff46
ACM
783 skb_copy_to_linear_data(bundler, &bundler_hdr,
784 INT_H_SIZE);
b97bf3fd
PL
785 skb_trim(bundler, INT_H_SIZE);
786 link_bundle_buf(l_ptr, bundler, buf);
787 buf = bundler;
788 msg = buf_msg(buf);
789 l_ptr->stats.sent_bundles++;
790 }
791 }
792 }
793 if (!l_ptr->next_out)
794 l_ptr->next_out = buf;
795 link_add_to_outqueue(l_ptr, buf, msg);
b97bf3fd
PL
796 return dsz;
797}
798
c4307285
YH
799/*
800 * tipc_link_send(): same as tipc_link_send_buf(), but the link to use has
b97bf3fd
PL
801 * not been selected yet, and the the owner node is not locked
802 * Called by TIPC internal users, e.g. the name distributor
803 */
4323add6 804int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
b97bf3fd 805{
a18c4bc3 806 struct tipc_link *l_ptr;
6c00055a 807 struct tipc_node *n_ptr;
b97bf3fd
PL
808 int res = -ELINKCONG;
809
4323add6 810 read_lock_bh(&tipc_net_lock);
51a8e4de 811 n_ptr = tipc_node_find(dest);
b97bf3fd 812 if (n_ptr) {
4323add6 813 tipc_node_lock(n_ptr);
b97bf3fd 814 l_ptr = n_ptr->active_links[selector & 1];
a016892c 815 if (l_ptr)
4323add6 816 res = tipc_link_send_buf(l_ptr, buf);
a016892c 817 else
5f6d9123 818 kfree_skb(buf);
4323add6 819 tipc_node_unlock(n_ptr);
b97bf3fd 820 } else {
5f6d9123 821 kfree_skb(buf);
b97bf3fd 822 }
4323add6 823 read_unlock_bh(&tipc_net_lock);
b97bf3fd
PL
824 return res;
825}
826
c64f7a6a
JM
827/*
828 * tipc_link_send_sync - synchronize broadcast link endpoints.
829 *
830 * Give a newly added peer node the sequence number where it should
831 * start receiving and acking broadcast packets.
832 *
833 * Called with node locked
834 */
835static void tipc_link_send_sync(struct tipc_link *l)
836{
837 struct sk_buff *buf;
838 struct tipc_msg *msg;
839
840 buf = tipc_buf_acquire(INT_H_SIZE);
841 if (!buf)
842 return;
843
844 msg = buf_msg(buf);
845 tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, l->addr);
846 msg_set_last_bcast(msg, l->owner->bclink.acked);
847 link_add_chain_to_outqueue(l, buf, 0);
848 tipc_link_push_queue(l);
849}
850
851/*
852 * tipc_link_recv_sync - synchronize broadcast link endpoints.
853 * Receive the sequence number where we should start receiving and
854 * acking broadcast packets from a newly added peer node, and open
855 * up for reception of such packets.
856 *
857 * Called with node locked
858 */
859static void tipc_link_recv_sync(struct tipc_node *n, struct sk_buff *buf)
860{
861 struct tipc_msg *msg = buf_msg(buf);
862
863 n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg);
864 n->bclink.recv_permitted = true;
865 kfree_skb(buf);
866}
867
868/*
9aa88c2a
AS
869 * tipc_link_send_names - send name table entries to new neighbor
870 *
871 * Send routine for bulk delivery of name table messages when contact
872 * with a new neighbor occurs. No link congestion checking is performed
873 * because name table messages *must* be delivered. The messages must be
874 * small enough not to require fragmentation.
875 * Called without any locks held.
876 */
9aa88c2a
AS
877void tipc_link_send_names(struct list_head *message_list, u32 dest)
878{
879 struct tipc_node *n_ptr;
a18c4bc3 880 struct tipc_link *l_ptr;
9aa88c2a
AS
881 struct sk_buff *buf;
882 struct sk_buff *temp_buf;
883
884 if (list_empty(message_list))
885 return;
886
887 read_lock_bh(&tipc_net_lock);
888 n_ptr = tipc_node_find(dest);
889 if (n_ptr) {
890 tipc_node_lock(n_ptr);
891 l_ptr = n_ptr->active_links[0];
892 if (l_ptr) {
893 /* convert circular list to linear list */
894 ((struct sk_buff *)message_list->prev)->next = NULL;
895 link_add_chain_to_outqueue(l_ptr,
896 (struct sk_buff *)message_list->next, 0);
897 tipc_link_push_queue(l_ptr);
898 INIT_LIST_HEAD(message_list);
899 }
900 tipc_node_unlock(n_ptr);
901 }
902 read_unlock_bh(&tipc_net_lock);
903
904 /* discard the messages if they couldn't be sent */
9aa88c2a
AS
905 list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) {
906 list_del((struct list_head *)buf);
5f6d9123 907 kfree_skb(buf);
9aa88c2a
AS
908 }
909}
910
c4307285
YH
911/*
912 * link_send_buf_fast: Entry for data messages where the
b97bf3fd
PL
913 * destination link is known and the header is complete,
914 * inclusive total message length. Very time critical.
915 * Link is locked. Returns user data length.
916 */
a18c4bc3 917static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf,
05790c64 918 u32 *used_max_pkt)
b97bf3fd
PL
919{
920 struct tipc_msg *msg = buf_msg(buf);
921 int res = msg_data_sz(msg);
922
923 if (likely(!link_congested(l_ptr))) {
15e979da 924 if (likely(msg_size(msg) <= l_ptr->max_pkt)) {
512137ee
EH
925 link_add_to_outqueue(l_ptr, buf, msg);
926 tipc_bearer_send(l_ptr->b_ptr, buf,
927 &l_ptr->media_addr);
928 l_ptr->unacked_window = 0;
929 return res;
930 }
931 else
15e979da 932 *used_max_pkt = l_ptr->max_pkt;
b97bf3fd 933 }
4323add6 934 return tipc_link_send_buf(l_ptr, buf); /* All other cases */
b97bf3fd
PL
935}
936
c4307285
YH
937/*
938 * tipc_link_send_sections_fast: Entry for messages where the
b97bf3fd 939 * destination processor is known and the header is complete,
c4307285 940 * except for total message length.
b97bf3fd
PL
941 * Returns user data length or errno.
942 */
23dd4cce 943int tipc_link_send_sections_fast(struct tipc_port *sender,
4323add6 944 struct iovec const *msg_sect,
9446b87a 945 unsigned int len, u32 destaddr)
b97bf3fd 946{
23dd4cce 947 struct tipc_msg *hdr = &sender->phdr;
a18c4bc3 948 struct tipc_link *l_ptr;
b97bf3fd 949 struct sk_buff *buf;
6c00055a 950 struct tipc_node *node;
b97bf3fd
PL
951 int res;
952 u32 selector = msg_origport(hdr) & 1;
953
b97bf3fd
PL
954again:
955 /*
956 * Try building message using port's max_pkt hint.
957 * (Must not hold any locks while building message.)
958 */
9446b87a 959 res = tipc_msg_build(hdr, msg_sect, len, sender->max_pkt, &buf);
7410f967
YX
960 /* Exit if build request was invalid */
961 if (unlikely(res < 0))
962 return res;
b97bf3fd 963
4323add6 964 read_lock_bh(&tipc_net_lock);
51a8e4de 965 node = tipc_node_find(destaddr);
b97bf3fd 966 if (likely(node)) {
4323add6 967 tipc_node_lock(node);
b97bf3fd
PL
968 l_ptr = node->active_links[selector];
969 if (likely(l_ptr)) {
970 if (likely(buf)) {
971 res = link_send_buf_fast(l_ptr, buf,
23dd4cce 972 &sender->max_pkt);
b97bf3fd 973exit:
4323add6
PL
974 tipc_node_unlock(node);
975 read_unlock_bh(&tipc_net_lock);
b97bf3fd
PL
976 return res;
977 }
978
b97bf3fd 979 /* Exit if link (or bearer) is congested */
512137ee 980 if (link_congested(l_ptr)) {
b97bf3fd 981 res = link_schedule_port(l_ptr,
23dd4cce 982 sender->ref, res);
b97bf3fd
PL
983 goto exit;
984 }
985
c4307285 986 /*
b97bf3fd
PL
987 * Message size exceeds max_pkt hint; update hint,
988 * then re-try fast path or fragment the message
989 */
23dd4cce 990 sender->max_pkt = l_ptr->max_pkt;
4323add6
PL
991 tipc_node_unlock(node);
992 read_unlock_bh(&tipc_net_lock);
b97bf3fd
PL
993
994
23dd4cce 995 if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
b97bf3fd
PL
996 goto again;
997
9446b87a 998 return link_send_sections_long(sender, msg_sect, len,
26896904 999 destaddr);
b97bf3fd 1000 }
4323add6 1001 tipc_node_unlock(node);
b97bf3fd 1002 }
4323add6 1003 read_unlock_bh(&tipc_net_lock);
b97bf3fd
PL
1004
1005 /* Couldn't find a link to the destination node */
b97bf3fd
PL
1006 if (buf)
1007 return tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
1008 if (res >= 0)
9446b87a
YX
1009 return tipc_port_reject_sections(sender, hdr, msg_sect,
1010 len, TIPC_ERR_NO_NODE);
b97bf3fd
PL
1011 return res;
1012}
1013
c4307285
YH
1014/*
1015 * link_send_sections_long(): Entry for long messages where the
b97bf3fd 1016 * destination node is known and the header is complete,
c4307285 1017 * inclusive total message length.
b97bf3fd
PL
1018 * Link and bearer congestion status have been checked to be ok,
1019 * and are ignored if they change.
1020 *
1021 * Note that fragments do not use the full link MTU so that they won't have
1022 * to undergo refragmentation if link changeover causes them to be sent
1023 * over another link with an additional tunnel header added as prefix.
1024 * (Refragmentation will still occur if the other link has a smaller MTU.)
1025 *
1026 * Returns user data length or errno.
1027 */
23dd4cce 1028static int link_send_sections_long(struct tipc_port *sender,
b97bf3fd 1029 struct iovec const *msg_sect,
9446b87a 1030 unsigned int len, u32 destaddr)
b97bf3fd 1031{
a18c4bc3 1032 struct tipc_link *l_ptr;
6c00055a 1033 struct tipc_node *node;
23dd4cce 1034 struct tipc_msg *hdr = &sender->phdr;
9446b87a 1035 u32 dsz = len;
0e65967e 1036 u32 max_pkt, fragm_sz, rest;
b97bf3fd 1037 struct tipc_msg fragm_hdr;
0e65967e
AS
1038 struct sk_buff *buf, *buf_chain, *prev;
1039 u32 fragm_crs, fragm_rest, hsz, sect_rest;
40682432 1040 const unchar __user *sect_crs;
b97bf3fd
PL
1041 int curr_sect;
1042 u32 fragm_no;
126c0524 1043 int res = 0;
b97bf3fd
PL
1044
1045again:
1046 fragm_no = 1;
23dd4cce 1047 max_pkt = sender->max_pkt - INT_H_SIZE;
b97bf3fd 1048 /* leave room for tunnel header in case of link changeover */
c4307285 1049 fragm_sz = max_pkt - INT_H_SIZE;
b97bf3fd
PL
1050 /* leave room for fragmentation header in each fragment */
1051 rest = dsz;
1052 fragm_crs = 0;
1053 fragm_rest = 0;
1054 sect_rest = 0;
1fc54d8f 1055 sect_crs = NULL;
b97bf3fd
PL
1056 curr_sect = -1;
1057
617d3c7a 1058 /* Prepare reusable fragment header */
c68ca7b7 1059 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
75715217 1060 INT_H_SIZE, msg_destnode(hdr));
b97bf3fd
PL
1061 msg_set_size(&fragm_hdr, max_pkt);
1062 msg_set_fragm_no(&fragm_hdr, 1);
1063
617d3c7a 1064 /* Prepare header of first fragment */
31e3c3f6 1065 buf_chain = buf = tipc_buf_acquire(max_pkt);
b97bf3fd
PL
1066 if (!buf)
1067 return -ENOMEM;
1068 buf->next = NULL;
27d7ff46 1069 skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE);
b97bf3fd 1070 hsz = msg_hdr_sz(hdr);
27d7ff46 1071 skb_copy_to_linear_data_offset(buf, INT_H_SIZE, hdr, hsz);
b97bf3fd 1072
617d3c7a 1073 /* Chop up message */
b97bf3fd
PL
1074 fragm_crs = INT_H_SIZE + hsz;
1075 fragm_rest = fragm_sz - hsz;
1076
1077 do { /* For all sections */
1078 u32 sz;
1079
1080 if (!sect_rest) {
1081 sect_rest = msg_sect[++curr_sect].iov_len;
40682432 1082 sect_crs = msg_sect[curr_sect].iov_base;
b97bf3fd
PL
1083 }
1084
1085 if (sect_rest < fragm_rest)
1086 sz = sect_rest;
1087 else
1088 sz = fragm_rest;
1089
f1733d75 1090 if (copy_from_user(buf->data + fragm_crs, sect_crs, sz)) {
126c0524 1091 res = -EFAULT;
b97bf3fd 1092error:
d77b3831 1093 kfree_skb_list(buf_chain);
126c0524 1094 return res;
f1733d75 1095 }
b97bf3fd
PL
1096 sect_crs += sz;
1097 sect_rest -= sz;
1098 fragm_crs += sz;
1099 fragm_rest -= sz;
1100 rest -= sz;
1101
1102 if (!fragm_rest && rest) {
1103
1104 /* Initiate new fragment: */
1105 if (rest <= fragm_sz) {
1106 fragm_sz = rest;
0e65967e 1107 msg_set_type(&fragm_hdr, LAST_FRAGMENT);
b97bf3fd
PL
1108 } else {
1109 msg_set_type(&fragm_hdr, FRAGMENT);
1110 }
1111 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
1112 msg_set_fragm_no(&fragm_hdr, ++fragm_no);
1113 prev = buf;
31e3c3f6 1114 buf = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
126c0524
YX
1115 if (!buf) {
1116 res = -ENOMEM;
b97bf3fd 1117 goto error;
126c0524 1118 }
b97bf3fd 1119
c4307285 1120 buf->next = NULL;
b97bf3fd 1121 prev->next = buf;
27d7ff46 1122 skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE);
b97bf3fd
PL
1123 fragm_crs = INT_H_SIZE;
1124 fragm_rest = fragm_sz;
b97bf3fd 1125 }
0e65967e 1126 } while (rest > 0);
b97bf3fd 1127
c4307285 1128 /*
b97bf3fd
PL
1129 * Now we have a buffer chain. Select a link and check
1130 * that packet size is still OK
1131 */
51a8e4de 1132 node = tipc_node_find(destaddr);
b97bf3fd 1133 if (likely(node)) {
4323add6 1134 tipc_node_lock(node);
23dd4cce 1135 l_ptr = node->active_links[sender->ref & 1];
b97bf3fd 1136 if (!l_ptr) {
4323add6 1137 tipc_node_unlock(node);
b97bf3fd
PL
1138 goto reject;
1139 }
15e979da 1140 if (l_ptr->max_pkt < max_pkt) {
23dd4cce 1141 sender->max_pkt = l_ptr->max_pkt;
4323add6 1142 tipc_node_unlock(node);
d77b3831 1143 kfree_skb_list(buf_chain);
b97bf3fd
PL
1144 goto again;
1145 }
1146 } else {
1147reject:
d77b3831 1148 kfree_skb_list(buf_chain);
9446b87a
YX
1149 return tipc_port_reject_sections(sender, hdr, msg_sect,
1150 len, TIPC_ERR_NO_NODE);
b97bf3fd
PL
1151 }
1152
dc63d91e 1153 /* Append chain of fragments to send queue & send them */
e0f08596 1154 l_ptr->long_msg_seq_no++;
dc63d91e
AS
1155 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
1156 l_ptr->stats.sent_fragments += fragm_no;
b97bf3fd 1157 l_ptr->stats.sent_fragmented++;
4323add6
PL
1158 tipc_link_push_queue(l_ptr);
1159 tipc_node_unlock(node);
b97bf3fd
PL
1160 return dsz;
1161}
1162
c4307285 1163/*
4323add6 1164 * tipc_link_push_packet: Push one unsent packet to the media
b97bf3fd 1165 */
98056963 1166static u32 tipc_link_push_packet(struct tipc_link *l_ptr)
b97bf3fd
PL
1167{
1168 struct sk_buff *buf = l_ptr->first_out;
1169 u32 r_q_size = l_ptr->retransm_queue_size;
1170 u32 r_q_head = l_ptr->retransm_queue_head;
1171
1172 /* Step to position where retransmission failed, if any, */
1173 /* consider that buffers may have been released in meantime */
b97bf3fd 1174 if (r_q_size && buf) {
c4307285 1175 u32 last = lesser(mod(r_q_head + r_q_size),
b97bf3fd 1176 link_last_sent(l_ptr));
f905730c 1177 u32 first = buf_seqno(buf);
b97bf3fd
PL
1178
1179 while (buf && less(first, r_q_head)) {
1180 first = mod(first + 1);
1181 buf = buf->next;
1182 }
1183 l_ptr->retransm_queue_head = r_q_head = first;
1184 l_ptr->retransm_queue_size = r_q_size = mod(last - first);
1185 }
1186
1187 /* Continue retransmission now, if there is anything: */
ca509101 1188 if (r_q_size && buf) {
b97bf3fd 1189 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
c4307285 1190 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
3c294cb3
YX
1191 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
1192 l_ptr->retransm_queue_head = mod(++r_q_head);
1193 l_ptr->retransm_queue_size = --r_q_size;
1194 l_ptr->stats.retransmitted++;
1195 return 0;
b97bf3fd
PL
1196 }
1197
1198 /* Send deferred protocol message, if any: */
b97bf3fd
PL
1199 buf = l_ptr->proto_msg_queue;
1200 if (buf) {
1201 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
0e65967e 1202 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
3c294cb3
YX
1203 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
1204 l_ptr->unacked_window = 0;
1205 kfree_skb(buf);
1206 l_ptr->proto_msg_queue = NULL;
1207 return 0;
b97bf3fd
PL
1208 }
1209
1210 /* Send one deferred data message, if send window not full: */
b97bf3fd
PL
1211 buf = l_ptr->next_out;
1212 if (buf) {
1213 struct tipc_msg *msg = buf_msg(buf);
1214 u32 next = msg_seqno(msg);
f905730c 1215 u32 first = buf_seqno(l_ptr->first_out);
b97bf3fd
PL
1216
1217 if (mod(next - first) < l_ptr->queue_limit[0]) {
1218 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
c4307285 1219 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
3c294cb3
YX
1220 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
1221 if (msg_user(msg) == MSG_BUNDLER)
1222 msg_set_type(msg, CLOSED_MSG);
1223 l_ptr->next_out = buf->next;
1224 return 0;
b97bf3fd
PL
1225 }
1226 }
3c294cb3 1227 return 1;
b97bf3fd
PL
1228}
1229
1230/*
1231 * push_queue(): push out the unsent messages of a link where
1232 * congestion has abated. Node is locked
1233 */
a18c4bc3 1234void tipc_link_push_queue(struct tipc_link *l_ptr)
b97bf3fd
PL
1235{
1236 u32 res;
1237
b97bf3fd 1238 do {
4323add6 1239 res = tipc_link_push_packet(l_ptr);
0e35fd5e 1240 } while (!res);
b97bf3fd
PL
1241}
1242
d356eeba
AS
1243static void link_reset_all(unsigned long addr)
1244{
6c00055a 1245 struct tipc_node *n_ptr;
d356eeba
AS
1246 char addr_string[16];
1247 u32 i;
1248
1249 read_lock_bh(&tipc_net_lock);
1250 n_ptr = tipc_node_find((u32)addr);
1251 if (!n_ptr) {
1252 read_unlock_bh(&tipc_net_lock);
1253 return; /* node no longer exists */
1254 }
1255
1256 tipc_node_lock(n_ptr);
1257
2cf8aa19
EH
1258 pr_warn("Resetting all links to %s\n",
1259 tipc_addr_string_fill(addr_string, n_ptr->addr));
d356eeba
AS
1260
1261 for (i = 0; i < MAX_BEARERS; i++) {
1262 if (n_ptr->links[i]) {
8d64a5ba 1263 link_print(n_ptr->links[i], "Resetting link\n");
d356eeba
AS
1264 tipc_link_reset(n_ptr->links[i]);
1265 }
1266 }
1267
1268 tipc_node_unlock(n_ptr);
1269 read_unlock_bh(&tipc_net_lock);
1270}
1271
a18c4bc3 1272static void link_retransmit_failure(struct tipc_link *l_ptr,
ae8509c4 1273 struct sk_buff *buf)
d356eeba
AS
1274{
1275 struct tipc_msg *msg = buf_msg(buf);
1276
2cf8aa19 1277 pr_warn("Retransmission failure on link <%s>\n", l_ptr->name);
d356eeba
AS
1278
1279 if (l_ptr->addr) {
d356eeba 1280 /* Handle failure on standard link */
8d64a5ba 1281 link_print(l_ptr, "Resetting link\n");
d356eeba
AS
1282 tipc_link_reset(l_ptr);
1283
1284 } else {
d356eeba 1285 /* Handle failure on broadcast link */
6c00055a 1286 struct tipc_node *n_ptr;
d356eeba
AS
1287 char addr_string[16];
1288
2cf8aa19
EH
1289 pr_info("Msg seq number: %u, ", msg_seqno(msg));
1290 pr_cont("Outstanding acks: %lu\n",
1291 (unsigned long) TIPC_SKB_CB(buf)->handle);
617dbeaa 1292
01d83edd 1293 n_ptr = tipc_bclink_retransmit_to();
d356eeba
AS
1294 tipc_node_lock(n_ptr);
1295
c68ca7b7 1296 tipc_addr_string_fill(addr_string, n_ptr->addr);
2cf8aa19 1297 pr_info("Broadcast link info for %s\n", addr_string);
389dd9bc
YX
1298 pr_info("Reception permitted: %d, Acked: %u\n",
1299 n_ptr->bclink.recv_permitted,
2cf8aa19
EH
1300 n_ptr->bclink.acked);
1301 pr_info("Last in: %u, Oos state: %u, Last sent: %u\n",
1302 n_ptr->bclink.last_in,
1303 n_ptr->bclink.oos_state,
1304 n_ptr->bclink.last_sent);
d356eeba
AS
1305
1306 tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr);
1307
1308 tipc_node_unlock(n_ptr);
1309
1310 l_ptr->stale_count = 0;
1311 }
1312}
1313
a18c4bc3 1314void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
4323add6 1315 u32 retransmits)
b97bf3fd
PL
1316{
1317 struct tipc_msg *msg;
1318
d356eeba
AS
1319 if (!buf)
1320 return;
1321
1322 msg = buf_msg(buf);
c4307285 1323
512137ee
EH
1324 /* Detect repeated retransmit failures */
1325 if (l_ptr->last_retransmitted == msg_seqno(msg)) {
1326 if (++l_ptr->stale_count > 100) {
1327 link_retransmit_failure(l_ptr, buf);
1328 return;
d356eeba
AS
1329 }
1330 } else {
512137ee
EH
1331 l_ptr->last_retransmitted = msg_seqno(msg);
1332 l_ptr->stale_count = 1;
b97bf3fd 1333 }
d356eeba 1334
ca509101 1335 while (retransmits && (buf != l_ptr->next_out) && buf) {
b97bf3fd
PL
1336 msg = buf_msg(buf);
1337 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
c4307285 1338 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
3c294cb3
YX
1339 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
1340 buf = buf->next;
1341 retransmits--;
1342 l_ptr->stats.retransmitted++;
b97bf3fd 1343 }
d356eeba 1344
b97bf3fd
PL
1345 l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
1346}
1347
c4307285 1348/**
b97bf3fd
PL
1349 * link_insert_deferred_queue - insert deferred messages back into receive chain
1350 */
a18c4bc3 1351static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr,
b97bf3fd
PL
1352 struct sk_buff *buf)
1353{
1354 u32 seq_no;
1355
1356 if (l_ptr->oldest_deferred_in == NULL)
1357 return buf;
1358
f905730c 1359 seq_no = buf_seqno(l_ptr->oldest_deferred_in);
b97bf3fd
PL
1360 if (seq_no == mod(l_ptr->next_in_no)) {
1361 l_ptr->newest_deferred_in->next = buf;
1362 buf = l_ptr->oldest_deferred_in;
1363 l_ptr->oldest_deferred_in = NULL;
1364 l_ptr->deferred_inqueue_sz = 0;
1365 }
1366 return buf;
1367}
1368
85035568
AS
1369/**
1370 * link_recv_buf_validate - validate basic format of received message
1371 *
1372 * This routine ensures a TIPC message has an acceptable header, and at least
1373 * as much data as the header indicates it should. The routine also ensures
1374 * that the entire message header is stored in the main fragment of the message
1375 * buffer, to simplify future access to message header fields.
1376 *
1377 * Note: Having extra info present in the message header or data areas is OK.
1378 * TIPC will ignore the excess, under the assumption that it is optional info
1379 * introduced by a later release of the protocol.
1380 */
85035568
AS
1381static int link_recv_buf_validate(struct sk_buff *buf)
1382{
1383 static u32 min_data_hdr_size[8] = {
741d9eb7 1384 SHORT_H_SIZE, MCAST_H_SIZE, NAMED_H_SIZE, BASIC_H_SIZE,
85035568
AS
1385 MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE
1386 };
1387
1388 struct tipc_msg *msg;
1389 u32 tipc_hdr[2];
1390 u32 size;
1391 u32 hdr_size;
1392 u32 min_hdr_size;
1393
1394 if (unlikely(buf->len < MIN_H_SIZE))
1395 return 0;
1396
1397 msg = skb_header_pointer(buf, 0, sizeof(tipc_hdr), tipc_hdr);
1398 if (msg == NULL)
1399 return 0;
1400
1401 if (unlikely(msg_version(msg) != TIPC_VERSION))
1402 return 0;
1403
1404 size = msg_size(msg);
1405 hdr_size = msg_hdr_sz(msg);
1406 min_hdr_size = msg_isdata(msg) ?
1407 min_data_hdr_size[msg_type(msg)] : INT_H_SIZE;
1408
1409 if (unlikely((hdr_size < min_hdr_size) ||
1410 (size < hdr_size) ||
1411 (buf->len < size) ||
1412 (size - hdr_size > TIPC_MAX_USER_MSG_SIZE)))
1413 return 0;
1414
1415 return pskb_may_pull(buf, hdr_size);
1416}
1417
b02b69c8 1418/**
170b3927 1419 * tipc_rcv - process TIPC packets/messages arriving from off-node
b02b69c8
AS
1420 * @head: pointer to message buffer chain
1421 * @tb_ptr: pointer to bearer message arrived on
1422 *
1423 * Invoked with no locks held. Bearer pointer must point to a valid bearer
1424 * structure (i.e. cannot be NULL), but bearer can be inactive.
1425 */
170b3927 1426void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
b97bf3fd 1427{
4323add6 1428 read_lock_bh(&tipc_net_lock);
b97bf3fd 1429 while (head) {
6c00055a 1430 struct tipc_node *n_ptr;
a18c4bc3 1431 struct tipc_link *l_ptr;
b97bf3fd
PL
1432 struct sk_buff *crs;
1433 struct sk_buff *buf = head;
85035568
AS
1434 struct tipc_msg *msg;
1435 u32 seq_no;
1436 u32 ackd;
b97bf3fd
PL
1437 u32 released = 0;
1438 int type;
1439
b97bf3fd 1440 head = head->next;
732256b9 1441 buf->next = NULL;
85035568 1442
b02b69c8 1443 /* Ensure bearer is still enabled */
b02b69c8 1444 if (unlikely(!b_ptr->active))
3af390e2 1445 goto discard;
b02b69c8 1446
85035568 1447 /* Ensure message is well-formed */
85035568 1448 if (unlikely(!link_recv_buf_validate(buf)))
3af390e2 1449 goto discard;
b97bf3fd 1450
fe13dda2 1451 /* Ensure message data is a single contiguous unit */
5f6d9123 1452 if (unlikely(skb_linearize(buf)))
3af390e2 1453 goto discard;
fe13dda2 1454
85035568 1455 /* Handle arrival of a non-unicast link message */
85035568
AS
1456 msg = buf_msg(buf);
1457
b97bf3fd 1458 if (unlikely(msg_non_seq(msg))) {
1265a021
AS
1459 if (msg_user(msg) == LINK_CONFIG)
1460 tipc_disc_recv_msg(buf, b_ptr);
1461 else
1462 tipc_bclink_recv_pkt(buf);
b97bf3fd
PL
1463 continue;
1464 }
c4307285 1465
ed33a9c4 1466 /* Discard unicast link messages destined for another node */
26008247
AS
1467 if (unlikely(!msg_short(msg) &&
1468 (msg_destnode(msg) != tipc_own_addr)))
3af390e2 1469 goto discard;
c4307285 1470
5a68d5ee 1471 /* Locate neighboring node that sent message */
4323add6 1472 n_ptr = tipc_node_find(msg_prevnode(msg));
b97bf3fd 1473 if (unlikely(!n_ptr))
3af390e2 1474 goto discard;
4323add6 1475 tipc_node_lock(n_ptr);
85035568 1476
b4b56102 1477 /* Locate unicast link endpoint that should handle message */
b4b56102 1478 l_ptr = n_ptr->links[b_ptr->identity];
3af390e2
YX
1479 if (unlikely(!l_ptr))
1480 goto unlock_discard;
5a68d5ee 1481
b4b56102 1482 /* Verify that communication with node is currently allowed */
b4b56102
AS
1483 if ((n_ptr->block_setup & WAIT_PEER_DOWN) &&
1484 msg_user(msg) == LINK_PROTOCOL &&
1485 (msg_type(msg) == RESET_MSG ||
1486 msg_type(msg) == ACTIVATE_MSG) &&
1487 !msg_redundant_link(msg))
1488 n_ptr->block_setup &= ~WAIT_PEER_DOWN;
1489
3af390e2
YX
1490 if (n_ptr->block_setup)
1491 goto unlock_discard;
85035568
AS
1492
1493 /* Validate message sequence number info */
85035568
AS
1494 seq_no = msg_seqno(msg);
1495 ackd = msg_ack(msg);
1496
1497 /* Release acked messages */
389dd9bc 1498 if (n_ptr->bclink.recv_permitted)
36559591 1499 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
b97bf3fd
PL
1500
1501 crs = l_ptr->first_out;
c4307285 1502 while ((crs != l_ptr->next_out) &&
f905730c 1503 less_eq(buf_seqno(crs), ackd)) {
b97bf3fd
PL
1504 struct sk_buff *next = crs->next;
1505
5f6d9123 1506 kfree_skb(crs);
b97bf3fd
PL
1507 crs = next;
1508 released++;
1509 }
1510 if (released) {
1511 l_ptr->first_out = crs;
1512 l_ptr->out_queue_size -= released;
1513 }
85035568
AS
1514
1515 /* Try sending any messages link endpoint has pending */
b97bf3fd 1516 if (unlikely(l_ptr->next_out))
4323add6 1517 tipc_link_push_queue(l_ptr);
b97bf3fd 1518 if (unlikely(!list_empty(&l_ptr->waiting_ports)))
4323add6 1519 tipc_link_wakeup_ports(l_ptr, 0);
b97bf3fd
PL
1520 if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
1521 l_ptr->stats.sent_acks++;
4323add6 1522 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
b97bf3fd
PL
1523 }
1524
85035568 1525 /* Now (finally!) process the incoming message */
b97bf3fd 1526protocol_check:
3af390e2
YX
1527 if (unlikely(!link_working_working(l_ptr))) {
1528 if (msg_user(msg) == LINK_PROTOCOL) {
1529 link_recv_proto_msg(l_ptr, buf);
1530 head = link_insert_deferred_queue(l_ptr, head);
4323add6 1531 tipc_node_unlock(n_ptr);
b97bf3fd
PL
1532 continue;
1533 }
3af390e2
YX
1534
1535 /* Traffic message. Conditionally activate link */
1536 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1537
1538 if (link_working_working(l_ptr)) {
1539 /* Re-insert buffer in front of queue */
1540 buf->next = head;
1541 head = buf;
1542 tipc_node_unlock(n_ptr);
1543 continue;
1544 }
1545 goto unlock_discard;
1546 }
1547
1548 /* Link is now in state WORKING_WORKING */
1549 if (unlikely(seq_no != mod(l_ptr->next_in_no))) {
b97bf3fd
PL
1550 link_handle_out_of_seq_msg(l_ptr, buf);
1551 head = link_insert_deferred_queue(l_ptr, head);
4323add6 1552 tipc_node_unlock(n_ptr);
b97bf3fd
PL
1553 continue;
1554 }
3af390e2
YX
1555 l_ptr->next_in_no++;
1556 if (unlikely(l_ptr->oldest_deferred_in))
b97bf3fd 1557 head = link_insert_deferred_queue(l_ptr, head);
3af390e2
YX
1558deliver:
1559 if (likely(msg_isdata(msg))) {
4323add6 1560 tipc_node_unlock(n_ptr);
3af390e2 1561 tipc_port_recv_msg(buf);
b97bf3fd
PL
1562 continue;
1563 }
3af390e2
YX
1564 switch (msg_user(msg)) {
1565 int ret;
1566 case MSG_BUNDLER:
1567 l_ptr->stats.recv_bundles++;
1568 l_ptr->stats.recv_bundled += msg_msgcnt(msg);
1569 tipc_node_unlock(n_ptr);
1570 tipc_link_recv_bundle(buf);
1571 continue;
1572 case NAME_DISTRIBUTOR:
1573 n_ptr->bclink.recv_permitted = true;
1574 tipc_node_unlock(n_ptr);
1575 tipc_named_recv(buf);
1576 continue;
1577 case BCAST_PROTOCOL:
1578 tipc_link_recv_sync(n_ptr, buf);
4323add6 1579 tipc_node_unlock(n_ptr);
b97bf3fd 1580 continue;
3af390e2
YX
1581 case CONN_MANAGER:
1582 tipc_node_unlock(n_ptr);
1583 tipc_port_recv_proto_msg(buf);
1584 continue;
1585 case MSG_FRAGMENTER:
1586 l_ptr->stats.recv_fragments++;
40ba3cdf
EH
1587 ret = tipc_link_recv_fragment(&l_ptr->reasm_head,
1588 &l_ptr->reasm_tail,
1589 &buf);
1590 if (ret == LINK_REASM_COMPLETE) {
3af390e2 1591 l_ptr->stats.recv_fragmented++;
40ba3cdf 1592 msg = buf_msg(buf);
3af390e2
YX
1593 goto deliver;
1594 }
40ba3cdf 1595 if (ret == LINK_REASM_ERROR)
a715b49e 1596 tipc_link_reset(l_ptr);
528f6f4b
EH
1597 tipc_node_unlock(n_ptr);
1598 continue;
3af390e2
YX
1599 case CHANGEOVER_PROTOCOL:
1600 type = msg_type(msg);
170b3927 1601 if (tipc_link_tunnel_rcv(&l_ptr, &buf)) {
3af390e2
YX
1602 msg = buf_msg(buf);
1603 seq_no = msg_seqno(msg);
1604 if (type == ORIGINAL_MSG)
1605 goto deliver;
1606 goto protocol_check;
1607 }
1608 break;
1609 default:
1610 kfree_skb(buf);
1611 buf = NULL;
1612 break;
b97bf3fd 1613 }
4323add6 1614 tipc_node_unlock(n_ptr);
3af390e2
YX
1615 tipc_net_route_msg(buf);
1616 continue;
1617unlock_discard:
1618
1619 tipc_node_unlock(n_ptr);
1620discard:
5f6d9123 1621 kfree_skb(buf);
b97bf3fd 1622 }
4323add6 1623 read_unlock_bh(&tipc_net_lock);
b97bf3fd
PL
1624}
1625
2c53040f 1626/**
8809b255
AS
1627 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
1628 *
1629 * Returns increase in queue length (i.e. 0 or 1)
b97bf3fd 1630 */
8809b255 1631u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
4323add6 1632 struct sk_buff *buf)
b97bf3fd 1633{
8809b255
AS
1634 struct sk_buff *queue_buf;
1635 struct sk_buff **prev;
f905730c 1636 u32 seq_no = buf_seqno(buf);
b97bf3fd
PL
1637
1638 buf->next = NULL;
1639
1640 /* Empty queue ? */
1641 if (*head == NULL) {
1642 *head = *tail = buf;
1643 return 1;
1644 }
1645
1646 /* Last ? */
f905730c 1647 if (less(buf_seqno(*tail), seq_no)) {
b97bf3fd
PL
1648 (*tail)->next = buf;
1649 *tail = buf;
1650 return 1;
1651 }
1652
8809b255
AS
1653 /* Locate insertion point in queue, then insert; discard if duplicate */
1654 prev = head;
1655 queue_buf = *head;
1656 for (;;) {
1657 u32 curr_seqno = buf_seqno(queue_buf);
b97bf3fd 1658
8809b255 1659 if (seq_no == curr_seqno) {
5f6d9123 1660 kfree_skb(buf);
8809b255 1661 return 0;
b97bf3fd 1662 }
8809b255
AS
1663
1664 if (less(seq_no, curr_seqno))
b97bf3fd 1665 break;
b97bf3fd 1666
8809b255
AS
1667 prev = &queue_buf->next;
1668 queue_buf = queue_buf->next;
1669 }
b97bf3fd 1670
8809b255
AS
1671 buf->next = queue_buf;
1672 *prev = buf;
1673 return 1;
b97bf3fd
PL
1674}
1675
8809b255 1676/*
b97bf3fd
PL
1677 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
1678 */
a18c4bc3 1679static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
b97bf3fd
PL
1680 struct sk_buff *buf)
1681{
f905730c 1682 u32 seq_no = buf_seqno(buf);
b97bf3fd
PL
1683
1684 if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
1685 link_recv_proto_msg(l_ptr, buf);
1686 return;
1687 }
1688
b97bf3fd 1689 /* Record OOS packet arrival (force mismatch on next timeout) */
b97bf3fd
PL
1690 l_ptr->checkpoint--;
1691
c4307285 1692 /*
b97bf3fd
PL
1693 * Discard packet if a duplicate; otherwise add it to deferred queue
1694 * and notify peer of gap as per protocol specification
1695 */
b97bf3fd
PL
1696 if (less(seq_no, mod(l_ptr->next_in_no))) {
1697 l_ptr->stats.duplicates++;
5f6d9123 1698 kfree_skb(buf);
b97bf3fd
PL
1699 return;
1700 }
1701
4323add6
PL
1702 if (tipc_link_defer_pkt(&l_ptr->oldest_deferred_in,
1703 &l_ptr->newest_deferred_in, buf)) {
b97bf3fd
PL
1704 l_ptr->deferred_inqueue_sz++;
1705 l_ptr->stats.deferred_recv++;
1706 if ((l_ptr->deferred_inqueue_sz % 16) == 1)
4323add6 1707 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
b97bf3fd
PL
1708 } else
1709 l_ptr->stats.duplicates++;
1710}
1711
1712/*
1713 * Send protocol message to the other endpoint.
1714 */
a18c4bc3 1715void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
ae8509c4
PG
1716 int probe_msg, u32 gap, u32 tolerance,
1717 u32 priority, u32 ack_mtu)
b97bf3fd 1718{
1fc54d8f 1719 struct sk_buff *buf = NULL;
b97bf3fd 1720 struct tipc_msg *msg = l_ptr->pmsg;
c4307285 1721 u32 msg_size = sizeof(l_ptr->proto_msg);
75f0aa49 1722 int r_flag;
b97bf3fd 1723
92d2c905 1724 /* Discard any previous message that was deferred due to congestion */
92d2c905 1725 if (l_ptr->proto_msg_queue) {
5f6d9123 1726 kfree_skb(l_ptr->proto_msg_queue);
92d2c905
AS
1727 l_ptr->proto_msg_queue = NULL;
1728 }
1729
77a7e07a
YX
1730 /* Don't send protocol message during link changeover */
1731 if (l_ptr->exp_msg_count)
b97bf3fd 1732 return;
b4b56102
AS
1733
1734 /* Abort non-RESET send if communication with node is prohibited */
b4b56102
AS
1735 if ((l_ptr->owner->block_setup) && (msg_typ != RESET_MSG))
1736 return;
1737
92d2c905 1738 /* Create protocol message with "out-of-sequence" sequence number */
b97bf3fd
PL
1739 msg_set_type(msg, msg_typ);
1740 msg_set_net_plane(msg, l_ptr->b_ptr->net_plane);
7a54d4a9 1741 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
4323add6 1742 msg_set_last_bcast(msg, tipc_bclink_get_last_sent());
b97bf3fd
PL
1743
1744 if (msg_typ == STATE_MSG) {
1745 u32 next_sent = mod(l_ptr->next_out_no);
1746
4323add6 1747 if (!tipc_link_is_up(l_ptr))
b97bf3fd
PL
1748 return;
1749 if (l_ptr->next_out)
f905730c 1750 next_sent = buf_seqno(l_ptr->next_out);
b97bf3fd
PL
1751 msg_set_next_sent(msg, next_sent);
1752 if (l_ptr->oldest_deferred_in) {
f905730c 1753 u32 rec = buf_seqno(l_ptr->oldest_deferred_in);
b97bf3fd
PL
1754 gap = mod(rec - mod(l_ptr->next_in_no));
1755 }
1756 msg_set_seq_gap(msg, gap);
1757 if (gap)
1758 l_ptr->stats.sent_nacks++;
1759 msg_set_link_tolerance(msg, tolerance);
1760 msg_set_linkprio(msg, priority);
1761 msg_set_max_pkt(msg, ack_mtu);
1762 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1763 msg_set_probe(msg, probe_msg != 0);
c4307285 1764 if (probe_msg) {
b97bf3fd
PL
1765 u32 mtu = l_ptr->max_pkt;
1766
c4307285 1767 if ((mtu < l_ptr->max_pkt_target) &&
b97bf3fd
PL
1768 link_working_working(l_ptr) &&
1769 l_ptr->fsm_msg_cnt) {
1770 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
c4307285
YH
1771 if (l_ptr->max_pkt_probes == 10) {
1772 l_ptr->max_pkt_target = (msg_size - 4);
1773 l_ptr->max_pkt_probes = 0;
b97bf3fd 1774 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
c4307285 1775 }
b97bf3fd 1776 l_ptr->max_pkt_probes++;
c4307285 1777 }
b97bf3fd
PL
1778
1779 l_ptr->stats.sent_probes++;
c4307285 1780 }
b97bf3fd
PL
1781 l_ptr->stats.sent_states++;
1782 } else { /* RESET_MSG or ACTIVATE_MSG */
1783 msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
1784 msg_set_seq_gap(msg, 0);
1785 msg_set_next_sent(msg, 1);
f23d9bf2 1786 msg_set_probe(msg, 0);
b97bf3fd
PL
1787 msg_set_link_tolerance(msg, l_ptr->tolerance);
1788 msg_set_linkprio(msg, l_ptr->priority);
1789 msg_set_max_pkt(msg, l_ptr->max_pkt_target);
1790 }
1791
75f0aa49
AS
1792 r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
1793 msg_set_redundant_link(msg, r_flag);
b97bf3fd 1794 msg_set_linkprio(msg, l_ptr->priority);
92d2c905 1795 msg_set_size(msg, msg_size);
b97bf3fd
PL
1796
1797 msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
1798
31e3c3f6 1799 buf = tipc_buf_acquire(msg_size);
b97bf3fd
PL
1800 if (!buf)
1801 return;
1802
27d7ff46 1803 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
796c75d0 1804 buf->priority = TC_PRIO_CONTROL;
b97bf3fd 1805
3c294cb3 1806 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
92d2c905 1807 l_ptr->unacked_window = 0;
5f6d9123 1808 kfree_skb(buf);
b97bf3fd
PL
1809}
1810
1811/*
1812 * Receive protocol message :
c4307285
YH
1813 * Note that network plane id propagates through the network, and may
1814 * change at any time. The node with lowest address rules
b97bf3fd 1815 */
a18c4bc3 1816static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
b97bf3fd
PL
1817{
1818 u32 rec_gap = 0;
1819 u32 max_pkt_info;
c4307285 1820 u32 max_pkt_ack;
b97bf3fd
PL
1821 u32 msg_tol;
1822 struct tipc_msg *msg = buf_msg(buf);
1823
77a7e07a
YX
1824 /* Discard protocol message during link changeover */
1825 if (l_ptr->exp_msg_count)
b97bf3fd
PL
1826 goto exit;
1827
1828 /* record unnumbered packet arrival (force mismatch on next timeout) */
b97bf3fd
PL
1829 l_ptr->checkpoint--;
1830
1831 if (l_ptr->b_ptr->net_plane != msg_net_plane(msg))
1832 if (tipc_own_addr > msg_prevnode(msg))
1833 l_ptr->b_ptr->net_plane = msg_net_plane(msg);
1834
b97bf3fd 1835 switch (msg_type(msg)) {
c4307285 1836
b97bf3fd 1837 case RESET_MSG:
a686e685
AS
1838 if (!link_working_unknown(l_ptr) &&
1839 (l_ptr->peer_session != INVALID_SESSION)) {
641c218d
AS
1840 if (less_eq(msg_session(msg), l_ptr->peer_session))
1841 break; /* duplicate or old reset: ignore */
b97bf3fd 1842 }
b4b56102
AS
1843
1844 if (!msg_redundant_link(msg) && (link_working_working(l_ptr) ||
1845 link_working_unknown(l_ptr))) {
1846 /*
1847 * peer has lost contact -- don't allow peer's links
1848 * to reactivate before we recognize loss & clean up
1849 */
1850 l_ptr->owner->block_setup = WAIT_NODE_DOWN;
1851 }
1852
47361c87
AS
1853 link_state_event(l_ptr, RESET_MSG);
1854
b97bf3fd
PL
1855 /* fall thru' */
1856 case ACTIVATE_MSG:
1857 /* Update link settings according other endpoint's values */
b97bf3fd
PL
1858 strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
1859
2db9983a
AS
1860 msg_tol = msg_link_tolerance(msg);
1861 if (msg_tol > l_ptr->tolerance)
b97bf3fd
PL
1862 link_set_supervision_props(l_ptr, msg_tol);
1863
1864 if (msg_linkprio(msg) > l_ptr->priority)
1865 l_ptr->priority = msg_linkprio(msg);
1866
1867 max_pkt_info = msg_max_pkt(msg);
c4307285 1868 if (max_pkt_info) {
b97bf3fd
PL
1869 if (max_pkt_info < l_ptr->max_pkt_target)
1870 l_ptr->max_pkt_target = max_pkt_info;
1871 if (l_ptr->max_pkt > l_ptr->max_pkt_target)
1872 l_ptr->max_pkt = l_ptr->max_pkt_target;
1873 } else {
c4307285 1874 l_ptr->max_pkt = l_ptr->max_pkt_target;
b97bf3fd 1875 }
b97bf3fd 1876
4d75313c 1877 /* Synchronize broadcast link info, if not done previously */
7a54d4a9
AS
1878 if (!tipc_node_is_up(l_ptr->owner)) {
1879 l_ptr->owner->bclink.last_sent =
1880 l_ptr->owner->bclink.last_in =
1881 msg_last_bcast(msg);
1882 l_ptr->owner->bclink.oos_state = 0;
1883 }
4d75313c 1884
b97bf3fd
PL
1885 l_ptr->peer_session = msg_session(msg);
1886 l_ptr->peer_bearer_id = msg_bearer_id(msg);
47361c87
AS
1887
1888 if (msg_type(msg) == ACTIVATE_MSG)
1889 link_state_event(l_ptr, ACTIVATE_MSG);
b97bf3fd
PL
1890 break;
1891 case STATE_MSG:
1892
2db9983a
AS
1893 msg_tol = msg_link_tolerance(msg);
1894 if (msg_tol)
b97bf3fd 1895 link_set_supervision_props(l_ptr, msg_tol);
c4307285
YH
1896
1897 if (msg_linkprio(msg) &&
b97bf3fd 1898 (msg_linkprio(msg) != l_ptr->priority)) {
2cf8aa19
EH
1899 pr_warn("%s<%s>, priority change %u->%u\n",
1900 link_rst_msg, l_ptr->name, l_ptr->priority,
1901 msg_linkprio(msg));
b97bf3fd 1902 l_ptr->priority = msg_linkprio(msg);
4323add6 1903 tipc_link_reset(l_ptr); /* Enforce change to take effect */
b97bf3fd
PL
1904 break;
1905 }
1906 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1907 l_ptr->stats.recv_states++;
1908 if (link_reset_unknown(l_ptr))
1909 break;
1910
1911 if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
c4307285 1912 rec_gap = mod(msg_next_sent(msg) -
b97bf3fd
PL
1913 mod(l_ptr->next_in_no));
1914 }
1915
1916 max_pkt_ack = msg_max_pkt(msg);
c4307285 1917 if (max_pkt_ack > l_ptr->max_pkt) {
c4307285
YH
1918 l_ptr->max_pkt = max_pkt_ack;
1919 l_ptr->max_pkt_probes = 0;
1920 }
b97bf3fd
PL
1921
1922 max_pkt_ack = 0;
c4307285 1923 if (msg_probe(msg)) {
b97bf3fd 1924 l_ptr->stats.recv_probes++;
a016892c 1925 if (msg_size(msg) > sizeof(l_ptr->proto_msg))
c4307285 1926 max_pkt_ack = msg_size(msg);
c4307285 1927 }
b97bf3fd
PL
1928
1929 /* Protocol message before retransmits, reduce loss risk */
389dd9bc 1930 if (l_ptr->owner->bclink.recv_permitted)
7a54d4a9
AS
1931 tipc_bclink_update_link_state(l_ptr->owner,
1932 msg_last_bcast(msg));
b97bf3fd
PL
1933
1934 if (rec_gap || (msg_probe(msg))) {
4323add6
PL
1935 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
1936 0, rec_gap, 0, 0, max_pkt_ack);
b97bf3fd
PL
1937 }
1938 if (msg_seq_gap(msg)) {
b97bf3fd 1939 l_ptr->stats.recv_nacks++;
4323add6
PL
1940 tipc_link_retransmit(l_ptr, l_ptr->first_out,
1941 msg_seq_gap(msg));
b97bf3fd
PL
1942 }
1943 break;
b97bf3fd
PL
1944 }
1945exit:
5f6d9123 1946 kfree_skb(buf);
b97bf3fd
PL
1947}
1948
1949
170b3927
JPM
1950/* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to
1951 * a different bearer. Owner node is locked.
b97bf3fd 1952 */
170b3927
JPM
1953static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
1954 struct tipc_msg *tunnel_hdr,
1955 struct tipc_msg *msg,
1956 u32 selector)
b97bf3fd 1957{
a18c4bc3 1958 struct tipc_link *tunnel;
b97bf3fd
PL
1959 struct sk_buff *buf;
1960 u32 length = msg_size(msg);
1961
1962 tunnel = l_ptr->owner->active_links[selector & 1];
5392d646 1963 if (!tipc_link_is_up(tunnel)) {
2cf8aa19 1964 pr_warn("%stunnel link no longer available\n", link_co_err);
b97bf3fd 1965 return;
5392d646 1966 }
b97bf3fd 1967 msg_set_size(tunnel_hdr, length + INT_H_SIZE);
31e3c3f6 1968 buf = tipc_buf_acquire(length + INT_H_SIZE);
5392d646 1969 if (!buf) {
2cf8aa19 1970 pr_warn("%sunable to send tunnel msg\n", link_co_err);
b97bf3fd 1971 return;
5392d646 1972 }
27d7ff46
ACM
1973 skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE);
1974 skb_copy_to_linear_data_offset(buf, INT_H_SIZE, msg, length);
4323add6 1975 tipc_link_send_buf(tunnel, buf);
b97bf3fd
PL
1976}
1977
1978
170b3927
JPM
1979/* tipc_link_failover_send_queue(): A link has gone down, but a second
1980 * link is still active. We can do failover. Tunnel the failing link's
1981 * whole send queue via the remaining link. This way, we don't lose
1982 * any packets, and sequence order is preserved for subsequent traffic
1983 * sent over the remaining link. Owner node is locked.
b97bf3fd 1984 */
170b3927 1985void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
b97bf3fd
PL
1986{
1987 u32 msgcount = l_ptr->out_queue_size;
1988 struct sk_buff *crs = l_ptr->first_out;
a18c4bc3 1989 struct tipc_link *tunnel = l_ptr->owner->active_links[0];
b97bf3fd 1990 struct tipc_msg tunnel_hdr;
5392d646 1991 int split_bundles;
b97bf3fd
PL
1992
1993 if (!tunnel)
1994 return;
1995
c68ca7b7 1996 tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
75715217 1997 ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
b97bf3fd
PL
1998 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
1999 msg_set_msgcnt(&tunnel_hdr, msgcount);
f131072c 2000
b97bf3fd
PL
2001 if (!l_ptr->first_out) {
2002 struct sk_buff *buf;
2003
31e3c3f6 2004 buf = tipc_buf_acquire(INT_H_SIZE);
b97bf3fd 2005 if (buf) {
27d7ff46 2006 skb_copy_to_linear_data(buf, &tunnel_hdr, INT_H_SIZE);
b97bf3fd 2007 msg_set_size(&tunnel_hdr, INT_H_SIZE);
4323add6 2008 tipc_link_send_buf(tunnel, buf);
b97bf3fd 2009 } else {
2cf8aa19
EH
2010 pr_warn("%sunable to send changeover msg\n",
2011 link_co_err);
b97bf3fd
PL
2012 }
2013 return;
2014 }
f131072c 2015
c4307285 2016 split_bundles = (l_ptr->owner->active_links[0] !=
5392d646
AS
2017 l_ptr->owner->active_links[1]);
2018
b97bf3fd
PL
2019 while (crs) {
2020 struct tipc_msg *msg = buf_msg(crs);
2021
2022 if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
b97bf3fd 2023 struct tipc_msg *m = msg_get_wrapped(msg);
0e65967e 2024 unchar *pos = (unchar *)m;
b97bf3fd 2025
d788d805 2026 msgcount = msg_msgcnt(msg);
b97bf3fd 2027 while (msgcount--) {
0e65967e 2028 msg_set_seqno(m, msg_seqno(msg));
170b3927
JPM
2029 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, m,
2030 msg_link_selector(m));
b97bf3fd
PL
2031 pos += align(msg_size(m));
2032 m = (struct tipc_msg *)pos;
2033 }
2034 } else {
170b3927
JPM
2035 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg,
2036 msg_link_selector(msg));
b97bf3fd
PL
2037 }
2038 crs = crs->next;
2039 }
2040}
2041
170b3927
JPM
2042/* tipc_link_dup_send_queue(): A second link has become active. Tunnel a
2043 * duplicate of the first link's send queue via the new link. This way, we
2044 * are guaranteed that currently queued packets from a socket are delivered
2045 * before future traffic from the same socket, even if this is using the
2046 * new link. The last arriving copy of each duplicate packet is dropped at
2047 * the receiving end by the regular protocol check, so packet cardinality
2048 * and sequence order is preserved per sender/receiver socket pair.
2049 * Owner node is locked.
2050 */
2051void tipc_link_dup_send_queue(struct tipc_link *l_ptr,
2052 struct tipc_link *tunnel)
b97bf3fd
PL
2053{
2054 struct sk_buff *iter;
2055 struct tipc_msg tunnel_hdr;
2056
c68ca7b7 2057 tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
75715217 2058 DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
b97bf3fd
PL
2059 msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size);
2060 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2061 iter = l_ptr->first_out;
2062 while (iter) {
2063 struct sk_buff *outbuf;
2064 struct tipc_msg *msg = buf_msg(iter);
2065 u32 length = msg_size(msg);
2066
2067 if (msg_user(msg) == MSG_BUNDLER)
2068 msg_set_type(msg, CLOSED_MSG);
2069 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */
c4307285 2070 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
b97bf3fd 2071 msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
31e3c3f6 2072 outbuf = tipc_buf_acquire(length + INT_H_SIZE);
b97bf3fd 2073 if (outbuf == NULL) {
2cf8aa19
EH
2074 pr_warn("%sunable to send duplicate msg\n",
2075 link_co_err);
b97bf3fd
PL
2076 return;
2077 }
27d7ff46
ACM
2078 skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE);
2079 skb_copy_to_linear_data_offset(outbuf, INT_H_SIZE, iter->data,
2080 length);
4323add6
PL
2081 tipc_link_send_buf(tunnel, outbuf);
2082 if (!tipc_link_is_up(l_ptr))
b97bf3fd
PL
2083 return;
2084 iter = iter->next;
2085 }
2086}
2087
b97bf3fd
PL
2088/**
2089 * buf_extract - extracts embedded TIPC message from another message
2090 * @skb: encapsulating message buffer
2091 * @from_pos: offset to extract from
2092 *
c4307285 2093 * Returns a new message buffer containing an embedded message. The
b97bf3fd
PL
2094 * encapsulating message itself is left unchanged.
2095 */
b97bf3fd
PL
2096static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
2097{
2098 struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos);
2099 u32 size = msg_size(msg);
2100 struct sk_buff *eb;
2101
31e3c3f6 2102 eb = tipc_buf_acquire(size);
b97bf3fd 2103 if (eb)
27d7ff46 2104 skb_copy_to_linear_data(eb, msg, size);
b97bf3fd
PL
2105 return eb;
2106}
2107
170b3927
JPM
2108/* tipc_link_tunnel_rcv(): Receive a tunneled packet, sent
2109 * via other link as result of a failover (ORIGINAL_MSG) or
2110 * a new active link (DUPLICATE_MSG). Failover packets are
2111 * returned to the active link for delivery upwards.
2112 * Owner node is locked.
b97bf3fd 2113 */
170b3927
JPM
2114static int tipc_link_tunnel_rcv(struct tipc_link **l_ptr,
2115 struct sk_buff **buf)
b97bf3fd
PL
2116{
2117 struct sk_buff *tunnel_buf = *buf;
a18c4bc3 2118 struct tipc_link *dest_link;
b97bf3fd
PL
2119 struct tipc_msg *msg;
2120 struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf);
2121 u32 msg_typ = msg_type(tunnel_msg);
2122 u32 msg_count = msg_msgcnt(tunnel_msg);
cb4b102f 2123 u32 bearer_id = msg_bearer_id(tunnel_msg);
b97bf3fd 2124
cb4b102f
DC
2125 if (bearer_id >= MAX_BEARERS)
2126 goto exit;
2127 dest_link = (*l_ptr)->owner->links[bearer_id];
b29f1428 2128 if (!dest_link)
b97bf3fd 2129 goto exit;
f131072c 2130 if (dest_link == *l_ptr) {
2cf8aa19
EH
2131 pr_err("Unexpected changeover message on link <%s>\n",
2132 (*l_ptr)->name);
f131072c
AS
2133 goto exit;
2134 }
b97bf3fd
PL
2135 *l_ptr = dest_link;
2136 msg = msg_get_wrapped(tunnel_msg);
2137
2138 if (msg_typ == DUPLICATE_MSG) {
b29f1428 2139 if (less(msg_seqno(msg), mod(dest_link->next_in_no)))
b97bf3fd 2140 goto exit;
0e65967e 2141 *buf = buf_extract(tunnel_buf, INT_H_SIZE);
b97bf3fd 2142 if (*buf == NULL) {
2cf8aa19 2143 pr_warn("%sduplicate msg dropped\n", link_co_err);
b97bf3fd
PL
2144 goto exit;
2145 }
5f6d9123 2146 kfree_skb(tunnel_buf);
b97bf3fd
PL
2147 return 1;
2148 }
2149
2150 /* First original message ?: */
4323add6 2151 if (tipc_link_is_up(dest_link)) {
2cf8aa19
EH
2152 pr_info("%s<%s>, changeover initiated by peer\n", link_rst_msg,
2153 dest_link->name);
4323add6 2154 tipc_link_reset(dest_link);
b97bf3fd
PL
2155 dest_link->exp_msg_count = msg_count;
2156 if (!msg_count)
2157 goto exit;
2158 } else if (dest_link->exp_msg_count == START_CHANGEOVER) {
b97bf3fd
PL
2159 dest_link->exp_msg_count = msg_count;
2160 if (!msg_count)
2161 goto exit;
2162 }
2163
2164 /* Receive original message */
b97bf3fd 2165 if (dest_link->exp_msg_count == 0) {
2cf8aa19 2166 pr_warn("%sgot too many tunnelled messages\n", link_co_err);
b97bf3fd
PL
2167 goto exit;
2168 }
2169 dest_link->exp_msg_count--;
2170 if (less(msg_seqno(msg), dest_link->reset_checkpoint)) {
b97bf3fd
PL
2171 goto exit;
2172 } else {
2173 *buf = buf_extract(tunnel_buf, INT_H_SIZE);
2174 if (*buf != NULL) {
5f6d9123 2175 kfree_skb(tunnel_buf);
b97bf3fd
PL
2176 return 1;
2177 } else {
2cf8aa19 2178 pr_warn("%soriginal msg dropped\n", link_co_err);
b97bf3fd
PL
2179 }
2180 }
2181exit:
1fc54d8f 2182 *buf = NULL;
5f6d9123 2183 kfree_skb(tunnel_buf);
b97bf3fd
PL
2184 return 0;
2185}
2186
2187/*
2188 * Bundler functionality:
2189 */
4323add6 2190void tipc_link_recv_bundle(struct sk_buff *buf)
b97bf3fd
PL
2191{
2192 u32 msgcount = msg_msgcnt(buf_msg(buf));
2193 u32 pos = INT_H_SIZE;
2194 struct sk_buff *obuf;
2195
b97bf3fd
PL
2196 while (msgcount--) {
2197 obuf = buf_extract(buf, pos);
2198 if (obuf == NULL) {
2cf8aa19 2199 pr_warn("Link unable to unbundle message(s)\n");
a10bd924 2200 break;
3ff50b79 2201 }
b97bf3fd 2202 pos += align(msg_size(buf_msg(obuf)));
4323add6 2203 tipc_net_route_msg(obuf);
b97bf3fd 2204 }
5f6d9123 2205 kfree_skb(buf);
b97bf3fd
PL
2206}
2207
2208/*
2209 * Fragmentation/defragmentation:
2210 */
2211
c4307285 2212/*
31e3c3f6 2213 * link_send_long_buf: Entry for buffers needing fragmentation.
c4307285 2214 * The buffer is complete, inclusive total message length.
b97bf3fd
PL
2215 * Returns user data length.
2216 */
a18c4bc3 2217static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
b97bf3fd 2218{
77561557
AS
2219 struct sk_buff *buf_chain = NULL;
2220 struct sk_buff *buf_chain_tail = (struct sk_buff *)&buf_chain;
b97bf3fd
PL
2221 struct tipc_msg *inmsg = buf_msg(buf);
2222 struct tipc_msg fragm_hdr;
2223 u32 insize = msg_size(inmsg);
2224 u32 dsz = msg_data_sz(inmsg);
2225 unchar *crs = buf->data;
2226 u32 rest = insize;
15e979da 2227 u32 pack_sz = l_ptr->max_pkt;
b97bf3fd 2228 u32 fragm_sz = pack_sz - INT_H_SIZE;
77561557 2229 u32 fragm_no = 0;
9c396a7b 2230 u32 destaddr;
b97bf3fd
PL
2231
2232 if (msg_short(inmsg))
2233 destaddr = l_ptr->addr;
9c396a7b
AS
2234 else
2235 destaddr = msg_destnode(inmsg);
b97bf3fd 2236
b97bf3fd 2237 /* Prepare reusable fragment header: */
c68ca7b7 2238 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
75715217 2239 INT_H_SIZE, destaddr);
b97bf3fd
PL
2240
2241 /* Chop up message: */
b97bf3fd
PL
2242 while (rest > 0) {
2243 struct sk_buff *fragm;
2244
2245 if (rest <= fragm_sz) {
2246 fragm_sz = rest;
2247 msg_set_type(&fragm_hdr, LAST_FRAGMENT);
2248 }
31e3c3f6 2249 fragm = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
b97bf3fd 2250 if (fragm == NULL) {
5f6d9123 2251 kfree_skb(buf);
d77b3831 2252 kfree_skb_list(buf_chain);
77561557 2253 return -ENOMEM;
b97bf3fd
PL
2254 }
2255 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
77561557
AS
2256 fragm_no++;
2257 msg_set_fragm_no(&fragm_hdr, fragm_no);
27d7ff46
ACM
2258 skb_copy_to_linear_data(fragm, &fragm_hdr, INT_H_SIZE);
2259 skb_copy_to_linear_data_offset(fragm, INT_H_SIZE, crs,
2260 fragm_sz);
77561557
AS
2261 buf_chain_tail->next = fragm;
2262 buf_chain_tail = fragm;
b97bf3fd 2263
b97bf3fd
PL
2264 rest -= fragm_sz;
2265 crs += fragm_sz;
2266 msg_set_type(&fragm_hdr, FRAGMENT);
2267 }
5f6d9123 2268 kfree_skb(buf);
77561557
AS
2269
2270 /* Append chain of fragments to send queue & send them */
77561557
AS
2271 l_ptr->long_msg_seq_no++;
2272 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
2273 l_ptr->stats.sent_fragments += fragm_no;
2274 l_ptr->stats.sent_fragmented++;
2275 tipc_link_push_queue(l_ptr);
2276
b97bf3fd
PL
2277 return dsz;
2278}
2279
c4307285
YH
2280/*
2281 * tipc_link_recv_fragment(): Called with node lock on. Returns
b97bf3fd
PL
2282 * the reassembled buffer if message is complete.
2283 */
40ba3cdf
EH
2284int tipc_link_recv_fragment(struct sk_buff **head, struct sk_buff **tail,
2285 struct sk_buff **fbuf)
b97bf3fd 2286{
40ba3cdf
EH
2287 struct sk_buff *frag = *fbuf;
2288 struct tipc_msg *msg = buf_msg(frag);
2289 u32 fragid = msg_type(msg);
2290 bool headstolen;
2291 int delta;
2292
2293 skb_pull(frag, msg_hdr_sz(msg));
2294 if (fragid == FIRST_FRAGMENT) {
2295 if (*head || skb_unclone(frag, GFP_ATOMIC))
2296 goto out_free;
2297 *head = frag;
2298 skb_frag_list_init(*head);
b97bf3fd 2299 return 0;
3db0a197
EH
2300 } else if (*head &&
2301 skb_try_coalesce(*head, frag, &headstolen, &delta)) {
40ba3cdf
EH
2302 kfree_skb_partial(frag, headstolen);
2303 } else {
2304 if (!*head)
2305 goto out_free;
2306 if (!skb_has_frag_list(*head))
2307 skb_shinfo(*head)->frag_list = frag;
2308 else
2309 (*tail)->next = frag;
2310 *tail = frag;
2311 (*head)->truesize += frag->truesize;
2312 }
2313 if (fragid == LAST_FRAGMENT) {
2314 *fbuf = *head;
2315 *tail = *head = NULL;
2316 return LINK_REASM_COMPLETE;
b97bf3fd 2317 }
b97bf3fd 2318 return 0;
40ba3cdf
EH
2319out_free:
2320 pr_warn_ratelimited("Link unable to reassemble fragmented message\n");
2321 kfree_skb(*fbuf);
2322 return LINK_REASM_ERROR;
b97bf3fd
PL
2323}
2324
a18c4bc3 2325static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance)
b97bf3fd 2326{
5413b4c6
AS
2327 if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL))
2328 return;
2329
b97bf3fd
PL
2330 l_ptr->tolerance = tolerance;
2331 l_ptr->continuity_interval =
2332 ((tolerance / 4) > 500) ? 500 : tolerance / 4;
2333 l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4);
2334}
2335
a18c4bc3 2336void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
b97bf3fd
PL
2337{
2338 /* Data messages from this node, inclusive FIRST_FRAGM */
06d82c91
AS
2339 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window;
2340 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE] = (window / 3) * 4;
2341 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE] = (window / 3) * 5;
2342 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE] = (window / 3) * 6;
b97bf3fd 2343 /* Transiting data messages,inclusive FIRST_FRAGM */
06d82c91
AS
2344 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE + 4] = 300;
2345 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE + 4] = 600;
2346 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900;
2347 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200;
b97bf3fd 2348 l_ptr->queue_limit[CONN_MANAGER] = 1200;
b97bf3fd
PL
2349 l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500;
2350 l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000;
2351 /* FRAGMENT and LAST_FRAGMENT packets */
2352 l_ptr->queue_limit[MSG_FRAGMENTER] = 4000;
2353}
2354
2355/**
2356 * link_find_link - locate link by name
2c53040f
BH
2357 * @name: ptr to link name string
2358 * @node: ptr to area to be filled with ptr to associated node
c4307285 2359 *
4323add6 2360 * Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted;
b97bf3fd 2361 * this also prevents link deletion.
c4307285 2362 *
b97bf3fd
PL
2363 * Returns pointer to link (or 0 if invalid link name).
2364 */
a18c4bc3
PG
2365static struct tipc_link *link_find_link(const char *name,
2366 struct tipc_node **node)
b97bf3fd 2367{
a18c4bc3 2368 struct tipc_link *l_ptr;
bbfbe47c
EH
2369 struct tipc_node *n_ptr;
2370 int i;
b97bf3fd 2371
bbfbe47c
EH
2372 list_for_each_entry(n_ptr, &tipc_node_list, list) {
2373 for (i = 0; i < MAX_BEARERS; i++) {
2374 l_ptr = n_ptr->links[i];
2375 if (l_ptr && !strcmp(l_ptr->name, name))
2376 goto found;
2377 }
2378 }
2379 l_ptr = NULL;
2380 n_ptr = NULL;
2381found:
2382 *node = n_ptr;
b97bf3fd
PL
2383 return l_ptr;
2384}
2385
5c216e1d
AS
2386/**
2387 * link_value_is_valid -- validate proposed link tolerance/priority/window
2388 *
2c53040f
BH
2389 * @cmd: value type (TIPC_CMD_SET_LINK_*)
2390 * @new_value: the new value
5c216e1d
AS
2391 *
2392 * Returns 1 if value is within range, 0 if not.
2393 */
5c216e1d
AS
2394static int link_value_is_valid(u16 cmd, u32 new_value)
2395{
2396 switch (cmd) {
2397 case TIPC_CMD_SET_LINK_TOL:
2398 return (new_value >= TIPC_MIN_LINK_TOL) &&
2399 (new_value <= TIPC_MAX_LINK_TOL);
2400 case TIPC_CMD_SET_LINK_PRI:
2401 return (new_value <= TIPC_MAX_LINK_PRI);
2402 case TIPC_CMD_SET_LINK_WINDOW:
2403 return (new_value >= TIPC_MIN_LINK_WIN) &&
2404 (new_value <= TIPC_MAX_LINK_WIN);
2405 }
2406 return 0;
2407}
2408
5c216e1d
AS
2409/**
2410 * link_cmd_set_value - change priority/tolerance/window for link/bearer/media
2c53040f
BH
2411 * @name: ptr to link, bearer, or media name
2412 * @new_value: new value of link, bearer, or media setting
2413 * @cmd: which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*)
5c216e1d
AS
2414 *
2415 * Caller must hold 'tipc_net_lock' to ensure link/bearer/media is not deleted.
2416 *
2417 * Returns 0 if value updated and negative value on error.
2418 */
5c216e1d
AS
2419static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
2420{
2421 struct tipc_node *node;
a18c4bc3 2422 struct tipc_link *l_ptr;
5c216e1d 2423 struct tipc_bearer *b_ptr;
358a0d1c 2424 struct tipc_media *m_ptr;
636c0371 2425 int res = 0;
5c216e1d
AS
2426
2427 l_ptr = link_find_link(name, &node);
2428 if (l_ptr) {
2429 /*
2430 * acquire node lock for tipc_link_send_proto_msg().
2431 * see "TIPC locking policy" in net.c.
2432 */
2433 tipc_node_lock(node);
2434 switch (cmd) {
2435 case TIPC_CMD_SET_LINK_TOL:
2436 link_set_supervision_props(l_ptr, new_value);
2437 tipc_link_send_proto_msg(l_ptr,
2438 STATE_MSG, 0, 0, new_value, 0, 0);
2439 break;
2440 case TIPC_CMD_SET_LINK_PRI:
2441 l_ptr->priority = new_value;
2442 tipc_link_send_proto_msg(l_ptr,
2443 STATE_MSG, 0, 0, 0, new_value, 0);
2444 break;
2445 case TIPC_CMD_SET_LINK_WINDOW:
2446 tipc_link_set_queue_limits(l_ptr, new_value);
2447 break;
636c0371
YX
2448 default:
2449 res = -EINVAL;
2450 break;
5c216e1d
AS
2451 }
2452 tipc_node_unlock(node);
636c0371 2453 return res;
5c216e1d
AS
2454 }
2455
2456 b_ptr = tipc_bearer_find(name);
2457 if (b_ptr) {
2458 switch (cmd) {
2459 case TIPC_CMD_SET_LINK_TOL:
2460 b_ptr->tolerance = new_value;
636c0371 2461 break;
5c216e1d
AS
2462 case TIPC_CMD_SET_LINK_PRI:
2463 b_ptr->priority = new_value;
636c0371 2464 break;
5c216e1d
AS
2465 case TIPC_CMD_SET_LINK_WINDOW:
2466 b_ptr->window = new_value;
636c0371
YX
2467 break;
2468 default:
2469 res = -EINVAL;
2470 break;
5c216e1d 2471 }
636c0371 2472 return res;
5c216e1d
AS
2473 }
2474
2475 m_ptr = tipc_media_find(name);
2476 if (!m_ptr)
2477 return -ENODEV;
2478 switch (cmd) {
2479 case TIPC_CMD_SET_LINK_TOL:
2480 m_ptr->tolerance = new_value;
636c0371 2481 break;
5c216e1d
AS
2482 case TIPC_CMD_SET_LINK_PRI:
2483 m_ptr->priority = new_value;
636c0371 2484 break;
5c216e1d
AS
2485 case TIPC_CMD_SET_LINK_WINDOW:
2486 m_ptr->window = new_value;
636c0371
YX
2487 break;
2488 default:
2489 res = -EINVAL;
2490 break;
5c216e1d 2491 }
636c0371 2492 return res;
5c216e1d
AS
2493}
2494
c4307285 2495struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space,
4323add6 2496 u16 cmd)
b97bf3fd
PL
2497{
2498 struct tipc_link_config *args;
c4307285 2499 u32 new_value;
c4307285 2500 int res;
b97bf3fd
PL
2501
2502 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG))
4323add6 2503 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
b97bf3fd
PL
2504
2505 args = (struct tipc_link_config *)TLV_DATA(req_tlv_area);
2506 new_value = ntohl(args->value);
2507
5c216e1d
AS
2508 if (!link_value_is_valid(cmd, new_value))
2509 return tipc_cfg_reply_error_string(
2510 "cannot change, value invalid");
2511
4323add6 2512 if (!strcmp(args->name, tipc_bclink_name)) {
b97bf3fd 2513 if ((cmd == TIPC_CMD_SET_LINK_WINDOW) &&
4323add6
PL
2514 (tipc_bclink_set_queue_limits(new_value) == 0))
2515 return tipc_cfg_reply_none();
c4307285 2516 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
4323add6 2517 " (cannot change setting on broadcast link)");
b97bf3fd
PL
2518 }
2519
4323add6 2520 read_lock_bh(&tipc_net_lock);
5c216e1d 2521 res = link_cmd_set_value(args->name, new_value, cmd);
4323add6 2522 read_unlock_bh(&tipc_net_lock);
b97bf3fd 2523 if (res)
c4307285 2524 return tipc_cfg_reply_error_string("cannot change link setting");
b97bf3fd 2525
4323add6 2526 return tipc_cfg_reply_none();
b97bf3fd
PL
2527}
2528
2529/**
2530 * link_reset_statistics - reset link statistics
2531 * @l_ptr: pointer to link
2532 */
a18c4bc3 2533static void link_reset_statistics(struct tipc_link *l_ptr)
b97bf3fd
PL
2534{
2535 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
2536 l_ptr->stats.sent_info = l_ptr->next_out_no;
2537 l_ptr->stats.recv_info = l_ptr->next_in_no;
2538}
2539
4323add6 2540struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space)
b97bf3fd
PL
2541{
2542 char *link_name;
a18c4bc3 2543 struct tipc_link *l_ptr;
6c00055a 2544 struct tipc_node *node;
b97bf3fd
PL
2545
2546 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
4323add6 2547 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
b97bf3fd
PL
2548
2549 link_name = (char *)TLV_DATA(req_tlv_area);
4323add6
PL
2550 if (!strcmp(link_name, tipc_bclink_name)) {
2551 if (tipc_bclink_reset_stats())
2552 return tipc_cfg_reply_error_string("link not found");
2553 return tipc_cfg_reply_none();
b97bf3fd
PL
2554 }
2555
4323add6 2556 read_lock_bh(&tipc_net_lock);
c4307285 2557 l_ptr = link_find_link(link_name, &node);
b97bf3fd 2558 if (!l_ptr) {
4323add6
PL
2559 read_unlock_bh(&tipc_net_lock);
2560 return tipc_cfg_reply_error_string("link not found");
b97bf3fd
PL
2561 }
2562
4323add6 2563 tipc_node_lock(node);
b97bf3fd 2564 link_reset_statistics(l_ptr);
4323add6
PL
2565 tipc_node_unlock(node);
2566 read_unlock_bh(&tipc_net_lock);
2567 return tipc_cfg_reply_none();
b97bf3fd
PL
2568}
2569
2570/**
2571 * percent - convert count to a percentage of total (rounding up or down)
2572 */
b97bf3fd
PL
2573static u32 percent(u32 count, u32 total)
2574{
2575 return (count * 100 + (total / 2)) / total;
2576}
2577
2578/**
4323add6 2579 * tipc_link_stats - print link statistics
b97bf3fd
PL
2580 * @name: link name
2581 * @buf: print buffer area
2582 * @buf_size: size of print buffer area
c4307285 2583 *
b97bf3fd
PL
2584 * Returns length of print buffer data string (or 0 if error)
2585 */
4323add6 2586static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
b97bf3fd 2587{
dc1aed37
EH
2588 struct tipc_link *l;
2589 struct tipc_stats *s;
6c00055a 2590 struct tipc_node *node;
b97bf3fd
PL
2591 char *status;
2592 u32 profile_total = 0;
dc1aed37 2593 int ret;
b97bf3fd 2594
4323add6
PL
2595 if (!strcmp(name, tipc_bclink_name))
2596 return tipc_bclink_stats(buf, buf_size);
b97bf3fd 2597
4323add6 2598 read_lock_bh(&tipc_net_lock);
dc1aed37
EH
2599 l = link_find_link(name, &node);
2600 if (!l) {
4323add6 2601 read_unlock_bh(&tipc_net_lock);
b97bf3fd
PL
2602 return 0;
2603 }
4323add6 2604 tipc_node_lock(node);
dc1aed37 2605 s = &l->stats;
b97bf3fd 2606
dc1aed37 2607 if (tipc_link_is_active(l))
b97bf3fd 2608 status = "ACTIVE";
dc1aed37 2609 else if (tipc_link_is_up(l))
b97bf3fd
PL
2610 status = "STANDBY";
2611 else
2612 status = "DEFUNCT";
dc1aed37
EH
2613
2614 ret = tipc_snprintf(buf, buf_size, "Link <%s>\n"
2615 " %s MTU:%u Priority:%u Tolerance:%u ms"
2616 " Window:%u packets\n",
2617 l->name, status, l->max_pkt, l->priority,
2618 l->tolerance, l->queue_limit[0]);
2619
2620 ret += tipc_snprintf(buf + ret, buf_size - ret,
2621 " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
2622 l->next_in_no - s->recv_info, s->recv_fragments,
2623 s->recv_fragmented, s->recv_bundles,
2624 s->recv_bundled);
2625
2626 ret += tipc_snprintf(buf + ret, buf_size - ret,
2627 " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
2628 l->next_out_no - s->sent_info, s->sent_fragments,
2629 s->sent_fragmented, s->sent_bundles,
2630 s->sent_bundled);
2631
2632 profile_total = s->msg_length_counts;
b97bf3fd
PL
2633 if (!profile_total)
2634 profile_total = 1;
dc1aed37
EH
2635
2636 ret += tipc_snprintf(buf + ret, buf_size - ret,
2637 " TX profile sample:%u packets average:%u octets\n"
2638 " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% "
2639 "-16384:%u%% -32768:%u%% -66000:%u%%\n",
2640 s->msg_length_counts,
2641 s->msg_lengths_total / profile_total,
2642 percent(s->msg_length_profile[0], profile_total),
2643 percent(s->msg_length_profile[1], profile_total),
2644 percent(s->msg_length_profile[2], profile_total),
2645 percent(s->msg_length_profile[3], profile_total),
2646 percent(s->msg_length_profile[4], profile_total),
2647 percent(s->msg_length_profile[5], profile_total),
2648 percent(s->msg_length_profile[6], profile_total));
2649
2650 ret += tipc_snprintf(buf + ret, buf_size - ret,
2651 " RX states:%u probes:%u naks:%u defs:%u"
2652 " dups:%u\n", s->recv_states, s->recv_probes,
2653 s->recv_nacks, s->deferred_recv, s->duplicates);
2654
2655 ret += tipc_snprintf(buf + ret, buf_size - ret,
2656 " TX states:%u probes:%u naks:%u acks:%u"
2657 " dups:%u\n", s->sent_states, s->sent_probes,
2658 s->sent_nacks, s->sent_acks, s->retransmitted);
2659
2660 ret += tipc_snprintf(buf + ret, buf_size - ret,
3c294cb3
YX
2661 " Congestion link:%u Send queue"
2662 " max:%u avg:%u\n", s->link_congs,
dc1aed37
EH
2663 s->max_queue_sz, s->queue_sz_counts ?
2664 (s->accu_queue_sz / s->queue_sz_counts) : 0);
b97bf3fd 2665
4323add6
PL
2666 tipc_node_unlock(node);
2667 read_unlock_bh(&tipc_net_lock);
dc1aed37 2668 return ret;
b97bf3fd
PL
2669}
2670
4323add6 2671struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space)
b97bf3fd
PL
2672{
2673 struct sk_buff *buf;
2674 struct tlv_desc *rep_tlv;
2675 int str_len;
dc1aed37
EH
2676 int pb_len;
2677 char *pb;
b97bf3fd
PL
2678
2679 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
4323add6 2680 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
b97bf3fd 2681
dc1aed37 2682 buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
b97bf3fd
PL
2683 if (!buf)
2684 return NULL;
2685
2686 rep_tlv = (struct tlv_desc *)buf->data;
dc1aed37
EH
2687 pb = TLV_DATA(rep_tlv);
2688 pb_len = ULTRA_STRING_MAX_LEN;
4323add6 2689 str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area),
dc1aed37 2690 pb, pb_len);
b97bf3fd 2691 if (!str_len) {
5f6d9123 2692 kfree_skb(buf);
c4307285 2693 return tipc_cfg_reply_error_string("link not found");
b97bf3fd 2694 }
dc1aed37 2695 str_len += 1; /* for "\0" */
b97bf3fd
PL
2696 skb_put(buf, TLV_SPACE(str_len));
2697 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
2698
2699 return buf;
2700}
2701
b97bf3fd 2702/**
4323add6 2703 * tipc_link_get_max_pkt - get maximum packet size to use when sending to destination
b97bf3fd
PL
2704 * @dest: network address of destination node
2705 * @selector: used to select from set of active links
c4307285 2706 *
b97bf3fd
PL
2707 * If no active link can be found, uses default maximum packet size.
2708 */
4323add6 2709u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
b97bf3fd 2710{
6c00055a 2711 struct tipc_node *n_ptr;
a18c4bc3 2712 struct tipc_link *l_ptr;
b97bf3fd 2713 u32 res = MAX_PKT_DEFAULT;
c4307285 2714
b97bf3fd
PL
2715 if (dest == tipc_own_addr)
2716 return MAX_MSG_SIZE;
2717
c4307285 2718 read_lock_bh(&tipc_net_lock);
51a8e4de 2719 n_ptr = tipc_node_find(dest);
b97bf3fd 2720 if (n_ptr) {
4323add6 2721 tipc_node_lock(n_ptr);
b97bf3fd
PL
2722 l_ptr = n_ptr->active_links[selector & 1];
2723 if (l_ptr)
15e979da 2724 res = l_ptr->max_pkt;
4323add6 2725 tipc_node_unlock(n_ptr);
b97bf3fd 2726 }
c4307285 2727 read_unlock_bh(&tipc_net_lock);
b97bf3fd
PL
2728 return res;
2729}
2730
a18c4bc3 2731static void link_print(struct tipc_link *l_ptr, const char *str)
b97bf3fd 2732{
5deedde9 2733 pr_info("%s Link %x<%s>:", str, l_ptr->addr, l_ptr->b_ptr->name);
8d64a5ba 2734
b97bf3fd 2735 if (link_working_unknown(l_ptr))
5deedde9 2736 pr_cont(":WU\n");
8d64a5ba 2737 else if (link_reset_reset(l_ptr))
5deedde9 2738 pr_cont(":RR\n");
8d64a5ba 2739 else if (link_reset_unknown(l_ptr))
5deedde9 2740 pr_cont(":RU\n");
8d64a5ba 2741 else if (link_working_working(l_ptr))
5deedde9
PG
2742 pr_cont(":WW\n");
2743 else
2744 pr_cont("\n");
b97bf3fd 2745}