]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - net/tipc/msg.c
Merge tag 'exfat-for-5.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/linki...
[mirror_ubuntu-jammy-kernel.git] / net / tipc / msg.c
1 /*
2 * net/tipc/msg.c: TIPC message header routines
3 *
4 * Copyright (c) 2000-2006, 2014-2015, Ericsson AB
5 * Copyright (c) 2005, 2010-2011, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include <net/sock.h>
38 #include "core.h"
39 #include "msg.h"
40 #include "addr.h"
41 #include "name_table.h"
42 #include "crypto.h"
43
44 #define MAX_FORWARD_SIZE 1024
45 #ifdef CONFIG_TIPC_CRYPTO
46 #define BUF_HEADROOM ALIGN(((LL_MAX_HEADER + 48) + EHDR_MAX_SIZE), 16)
47 #define BUF_TAILROOM (TIPC_AES_GCM_TAG_SIZE)
48 #else
49 #define BUF_HEADROOM (LL_MAX_HEADER + 48)
50 #define BUF_TAILROOM 16
51 #endif
52
53 static unsigned int align(unsigned int i)
54 {
55 return (i + 3) & ~3u;
56 }
57
58 /**
59 * tipc_buf_acquire - creates a TIPC message buffer
60 * @size: message size (including TIPC header)
61 * @gfp: memory allocation flags
62 *
63 * Return: a new buffer with data pointers set to the specified size.
64 *
65 * NOTE:
66 * Headroom is reserved to allow prepending of a data link header.
67 * There may also be unrequested tailroom present at the buffer's end.
68 */
69 struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp)
70 {
71 struct sk_buff *skb;
72 #ifdef CONFIG_TIPC_CRYPTO
73 unsigned int buf_size = (BUF_HEADROOM + size + BUF_TAILROOM + 3) & ~3u;
74 #else
75 unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
76 #endif
77
78 skb = alloc_skb_fclone(buf_size, gfp);
79 if (skb) {
80 skb_reserve(skb, BUF_HEADROOM);
81 skb_put(skb, size);
82 skb->next = NULL;
83 }
84 return skb;
85 }
86
87 void tipc_msg_init(u32 own_node, struct tipc_msg *m, u32 user, u32 type,
88 u32 hsize, u32 dnode)
89 {
90 memset(m, 0, hsize);
91 msg_set_version(m);
92 msg_set_user(m, user);
93 msg_set_hdr_sz(m, hsize);
94 msg_set_size(m, hsize);
95 msg_set_prevnode(m, own_node);
96 msg_set_type(m, type);
97 if (hsize > SHORT_H_SIZE) {
98 msg_set_orignode(m, own_node);
99 msg_set_destnode(m, dnode);
100 }
101 }
102
103 struct sk_buff *tipc_msg_create(uint user, uint type,
104 uint hdr_sz, uint data_sz, u32 dnode,
105 u32 onode, u32 dport, u32 oport, int errcode)
106 {
107 struct tipc_msg *msg;
108 struct sk_buff *buf;
109
110 buf = tipc_buf_acquire(hdr_sz + data_sz, GFP_ATOMIC);
111 if (unlikely(!buf))
112 return NULL;
113
114 msg = buf_msg(buf);
115 tipc_msg_init(onode, msg, user, type, hdr_sz, dnode);
116 msg_set_size(msg, hdr_sz + data_sz);
117 msg_set_origport(msg, oport);
118 msg_set_destport(msg, dport);
119 msg_set_errcode(msg, errcode);
120 return buf;
121 }
122
123 /* tipc_buf_append(): Append a buffer to the fragment list of another buffer
124 * @*headbuf: in: NULL for first frag, otherwise value returned from prev call
125 * out: set when successful non-complete reassembly, otherwise NULL
126 * @*buf: in: the buffer to append. Always defined
127 * out: head buf after successful complete reassembly, otherwise NULL
128 * Returns 1 when reassembly complete, otherwise 0
129 */
130 int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
131 {
132 struct sk_buff *head = *headbuf;
133 struct sk_buff *frag = *buf;
134 struct sk_buff *tail = NULL;
135 struct tipc_msg *msg;
136 u32 fragid;
137 int delta;
138 bool headstolen;
139
140 if (!frag)
141 goto err;
142
143 msg = buf_msg(frag);
144 fragid = msg_type(msg);
145 frag->next = NULL;
146 skb_pull(frag, msg_hdr_sz(msg));
147
148 if (fragid == FIRST_FRAGMENT) {
149 if (unlikely(head))
150 goto err;
151 *buf = NULL;
152 frag = skb_unshare(frag, GFP_ATOMIC);
153 if (unlikely(!frag))
154 goto err;
155 head = *headbuf = frag;
156 TIPC_SKB_CB(head)->tail = NULL;
157 if (skb_is_nonlinear(head)) {
158 skb_walk_frags(head, tail) {
159 TIPC_SKB_CB(head)->tail = tail;
160 }
161 } else {
162 skb_frag_list_init(head);
163 }
164 return 0;
165 }
166
167 if (!head)
168 goto err;
169
170 if (skb_try_coalesce(head, frag, &headstolen, &delta)) {
171 kfree_skb_partial(frag, headstolen);
172 } else {
173 tail = TIPC_SKB_CB(head)->tail;
174 if (!skb_has_frag_list(head))
175 skb_shinfo(head)->frag_list = frag;
176 else
177 tail->next = frag;
178 head->truesize += frag->truesize;
179 head->data_len += frag->len;
180 head->len += frag->len;
181 TIPC_SKB_CB(head)->tail = frag;
182 }
183
184 if (fragid == LAST_FRAGMENT) {
185 TIPC_SKB_CB(head)->validated = 0;
186 if (unlikely(!tipc_msg_validate(&head)))
187 goto err;
188 *buf = head;
189 TIPC_SKB_CB(head)->tail = NULL;
190 *headbuf = NULL;
191 return 1;
192 }
193 *buf = NULL;
194 return 0;
195 err:
196 kfree_skb(*buf);
197 kfree_skb(*headbuf);
198 *buf = *headbuf = NULL;
199 return 0;
200 }
201
202 /**
203 * tipc_msg_append(): Append data to tail of an existing buffer queue
204 * @_hdr: header to be used
205 * @m: the data to be appended
206 * @mss: max allowable size of buffer
207 * @dlen: size of data to be appended
208 * @txq: queue to append to
209 *
210 * Return: the number of 1k blocks appended or errno value
211 */
212 int tipc_msg_append(struct tipc_msg *_hdr, struct msghdr *m, int dlen,
213 int mss, struct sk_buff_head *txq)
214 {
215 struct sk_buff *skb;
216 int accounted, total, curr;
217 int mlen, cpy, rem = dlen;
218 struct tipc_msg *hdr;
219
220 skb = skb_peek_tail(txq);
221 accounted = skb ? msg_blocks(buf_msg(skb)) : 0;
222 total = accounted;
223
224 do {
225 if (!skb || skb->len >= mss) {
226 skb = tipc_buf_acquire(mss, GFP_KERNEL);
227 if (unlikely(!skb))
228 return -ENOMEM;
229 skb_orphan(skb);
230 skb_trim(skb, MIN_H_SIZE);
231 hdr = buf_msg(skb);
232 skb_copy_to_linear_data(skb, _hdr, MIN_H_SIZE);
233 msg_set_hdr_sz(hdr, MIN_H_SIZE);
234 msg_set_size(hdr, MIN_H_SIZE);
235 __skb_queue_tail(txq, skb);
236 total += 1;
237 }
238 hdr = buf_msg(skb);
239 curr = msg_blocks(hdr);
240 mlen = msg_size(hdr);
241 cpy = min_t(size_t, rem, mss - mlen);
242 if (cpy != copy_from_iter(skb->data + mlen, cpy, &m->msg_iter))
243 return -EFAULT;
244 msg_set_size(hdr, mlen + cpy);
245 skb_put(skb, cpy);
246 rem -= cpy;
247 total += msg_blocks(hdr) - curr;
248 } while (rem > 0);
249 return total - accounted;
250 }
251
252 /* tipc_msg_validate - validate basic format of received message
253 *
254 * This routine ensures a TIPC message has an acceptable header, and at least
255 * as much data as the header indicates it should. The routine also ensures
256 * that the entire message header is stored in the main fragment of the message
257 * buffer, to simplify future access to message header fields.
258 *
259 * Note: Having extra info present in the message header or data areas is OK.
260 * TIPC will ignore the excess, under the assumption that it is optional info
261 * introduced by a later release of the protocol.
262 */
263 bool tipc_msg_validate(struct sk_buff **_skb)
264 {
265 struct sk_buff *skb = *_skb;
266 struct tipc_msg *hdr;
267 int msz, hsz;
268
269 /* Ensure that flow control ratio condition is satisfied */
270 if (unlikely(skb->truesize / buf_roundup_len(skb) >= 4)) {
271 skb = skb_copy_expand(skb, BUF_HEADROOM, 0, GFP_ATOMIC);
272 if (!skb)
273 return false;
274 kfree_skb(*_skb);
275 *_skb = skb;
276 }
277
278 if (unlikely(TIPC_SKB_CB(skb)->validated))
279 return true;
280
281 if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE)))
282 return false;
283
284 hsz = msg_hdr_sz(buf_msg(skb));
285 if (unlikely(hsz < MIN_H_SIZE) || (hsz > MAX_H_SIZE))
286 return false;
287 if (unlikely(!pskb_may_pull(skb, hsz)))
288 return false;
289
290 hdr = buf_msg(skb);
291 if (unlikely(msg_version(hdr) != TIPC_VERSION))
292 return false;
293
294 msz = msg_size(hdr);
295 if (unlikely(msz < hsz))
296 return false;
297 if (unlikely((msz - hsz) > TIPC_MAX_USER_MSG_SIZE))
298 return false;
299 if (unlikely(skb->len < msz))
300 return false;
301
302 TIPC_SKB_CB(skb)->validated = 1;
303 return true;
304 }
305
306 /**
307 * tipc_msg_fragment - build a fragment skb list for TIPC message
308 *
309 * @skb: TIPC message skb
310 * @hdr: internal msg header to be put on the top of the fragments
311 * @pktmax: max size of a fragment incl. the header
312 * @frags: returned fragment skb list
313 *
314 * Return: 0 if the fragmentation is successful, otherwise: -EINVAL
315 * or -ENOMEM
316 */
317 int tipc_msg_fragment(struct sk_buff *skb, const struct tipc_msg *hdr,
318 int pktmax, struct sk_buff_head *frags)
319 {
320 int pktno, nof_fragms, dsz, dmax, eat;
321 struct tipc_msg *_hdr;
322 struct sk_buff *_skb;
323 u8 *data;
324
325 /* Non-linear buffer? */
326 if (skb_linearize(skb))
327 return -ENOMEM;
328
329 data = (u8 *)skb->data;
330 dsz = msg_size(buf_msg(skb));
331 dmax = pktmax - INT_H_SIZE;
332 if (dsz <= dmax || !dmax)
333 return -EINVAL;
334
335 nof_fragms = dsz / dmax + 1;
336 for (pktno = 1; pktno <= nof_fragms; pktno++) {
337 if (pktno < nof_fragms)
338 eat = dmax;
339 else
340 eat = dsz % dmax;
341 /* Allocate a new fragment */
342 _skb = tipc_buf_acquire(INT_H_SIZE + eat, GFP_ATOMIC);
343 if (!_skb)
344 goto error;
345 skb_orphan(_skb);
346 __skb_queue_tail(frags, _skb);
347 /* Copy header & data to the fragment */
348 skb_copy_to_linear_data(_skb, hdr, INT_H_SIZE);
349 skb_copy_to_linear_data_offset(_skb, INT_H_SIZE, data, eat);
350 data += eat;
351 /* Update the fragment's header */
352 _hdr = buf_msg(_skb);
353 msg_set_fragm_no(_hdr, pktno);
354 msg_set_nof_fragms(_hdr, nof_fragms);
355 msg_set_size(_hdr, INT_H_SIZE + eat);
356 }
357 return 0;
358
359 error:
360 __skb_queue_purge(frags);
361 __skb_queue_head_init(frags);
362 return -ENOMEM;
363 }
364
365 /**
366 * tipc_msg_build - create buffer chain containing specified header and data
367 * @mhdr: Message header, to be prepended to data
368 * @m: User message
369 * @offset: buffer offset for fragmented messages (FIXME)
370 * @dsz: Total length of user data
371 * @pktmax: Max packet size that can be used
372 * @list: Buffer or chain of buffers to be returned to caller
373 *
374 * Note that the recursive call we are making here is safe, since it can
375 * logically go only one further level down.
376 *
377 * Return: message data size or errno: -ENOMEM, -EFAULT
378 */
379 int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
380 int dsz, int pktmax, struct sk_buff_head *list)
381 {
382 int mhsz = msg_hdr_sz(mhdr);
383 struct tipc_msg pkthdr;
384 int msz = mhsz + dsz;
385 int pktrem = pktmax;
386 struct sk_buff *skb;
387 int drem = dsz;
388 int pktno = 1;
389 char *pktpos;
390 int pktsz;
391 int rc;
392
393 msg_set_size(mhdr, msz);
394
395 /* No fragmentation needed? */
396 if (likely(msz <= pktmax)) {
397 skb = tipc_buf_acquire(msz, GFP_KERNEL);
398
399 /* Fall back to smaller MTU if node local message */
400 if (unlikely(!skb)) {
401 if (pktmax != MAX_MSG_SIZE)
402 return -ENOMEM;
403 rc = tipc_msg_build(mhdr, m, offset, dsz, FB_MTU, list);
404 if (rc != dsz)
405 return rc;
406 if (tipc_msg_assemble(list))
407 return dsz;
408 return -ENOMEM;
409 }
410 skb_orphan(skb);
411 __skb_queue_tail(list, skb);
412 skb_copy_to_linear_data(skb, mhdr, mhsz);
413 pktpos = skb->data + mhsz;
414 if (copy_from_iter_full(pktpos, dsz, &m->msg_iter))
415 return dsz;
416 rc = -EFAULT;
417 goto error;
418 }
419
420 /* Prepare reusable fragment header */
421 tipc_msg_init(msg_prevnode(mhdr), &pkthdr, MSG_FRAGMENTER,
422 FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr));
423 msg_set_size(&pkthdr, pktmax);
424 msg_set_fragm_no(&pkthdr, pktno);
425 msg_set_importance(&pkthdr, msg_importance(mhdr));
426
427 /* Prepare first fragment */
428 skb = tipc_buf_acquire(pktmax, GFP_KERNEL);
429 if (!skb)
430 return -ENOMEM;
431 skb_orphan(skb);
432 __skb_queue_tail(list, skb);
433 pktpos = skb->data;
434 skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
435 pktpos += INT_H_SIZE;
436 pktrem -= INT_H_SIZE;
437 skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz);
438 pktpos += mhsz;
439 pktrem -= mhsz;
440
441 do {
442 if (drem < pktrem)
443 pktrem = drem;
444
445 if (!copy_from_iter_full(pktpos, pktrem, &m->msg_iter)) {
446 rc = -EFAULT;
447 goto error;
448 }
449 drem -= pktrem;
450
451 if (!drem)
452 break;
453
454 /* Prepare new fragment: */
455 if (drem < (pktmax - INT_H_SIZE))
456 pktsz = drem + INT_H_SIZE;
457 else
458 pktsz = pktmax;
459 skb = tipc_buf_acquire(pktsz, GFP_KERNEL);
460 if (!skb) {
461 rc = -ENOMEM;
462 goto error;
463 }
464 skb_orphan(skb);
465 __skb_queue_tail(list, skb);
466 msg_set_type(&pkthdr, FRAGMENT);
467 msg_set_size(&pkthdr, pktsz);
468 msg_set_fragm_no(&pkthdr, ++pktno);
469 skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
470 pktpos = skb->data + INT_H_SIZE;
471 pktrem = pktsz - INT_H_SIZE;
472
473 } while (1);
474 msg_set_type(buf_msg(skb), LAST_FRAGMENT);
475 return dsz;
476 error:
477 __skb_queue_purge(list);
478 __skb_queue_head_init(list);
479 return rc;
480 }
481
482 /**
483 * tipc_msg_bundle - Append contents of a buffer to tail of an existing one
484 * @bskb: the bundle buffer to append to
485 * @msg: message to be appended
486 * @max: max allowable size for the bundle buffer
487 *
488 * Return: "true" if bundling has been performed, otherwise "false"
489 */
490 static bool tipc_msg_bundle(struct sk_buff *bskb, struct tipc_msg *msg,
491 u32 max)
492 {
493 struct tipc_msg *bmsg = buf_msg(bskb);
494 u32 msz, bsz, offset, pad;
495
496 msz = msg_size(msg);
497 bsz = msg_size(bmsg);
498 offset = align(bsz);
499 pad = offset - bsz;
500
501 if (unlikely(skb_tailroom(bskb) < (pad + msz)))
502 return false;
503 if (unlikely(max < (offset + msz)))
504 return false;
505
506 skb_put(bskb, pad + msz);
507 skb_copy_to_linear_data_offset(bskb, offset, msg, msz);
508 msg_set_size(bmsg, offset + msz);
509 msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
510 return true;
511 }
512
513 /**
514 * tipc_msg_try_bundle - Try to bundle a new message to the last one
515 * @tskb: the last/target message to which the new one will be appended
516 * @skb: the new message skb pointer
517 * @mss: max message size (header inclusive)
518 * @dnode: destination node for the message
519 * @new_bundle: if this call made a new bundle or not
520 *
521 * Return: "true" if the new message skb is potential for bundling this time or
522 * later, in the case a bundling has been done this time, the skb is consumed
523 * (the skb pointer = NULL).
524 * Otherwise, "false" if the skb cannot be bundled at all.
525 */
526 bool tipc_msg_try_bundle(struct sk_buff *tskb, struct sk_buff **skb, u32 mss,
527 u32 dnode, bool *new_bundle)
528 {
529 struct tipc_msg *msg, *inner, *outer;
530 u32 tsz;
531
532 /* First, check if the new buffer is suitable for bundling */
533 msg = buf_msg(*skb);
534 if (msg_user(msg) == MSG_FRAGMENTER)
535 return false;
536 if (msg_user(msg) == TUNNEL_PROTOCOL)
537 return false;
538 if (msg_user(msg) == BCAST_PROTOCOL)
539 return false;
540 if (mss <= INT_H_SIZE + msg_size(msg))
541 return false;
542
543 /* Ok, but the last/target buffer can be empty? */
544 if (unlikely(!tskb))
545 return true;
546
547 /* Is it a bundle already? Try to bundle the new message to it */
548 if (msg_user(buf_msg(tskb)) == MSG_BUNDLER) {
549 *new_bundle = false;
550 goto bundle;
551 }
552
553 /* Make a new bundle of the two messages if possible */
554 tsz = msg_size(buf_msg(tskb));
555 if (unlikely(mss < align(INT_H_SIZE + tsz) + msg_size(msg)))
556 return true;
557 if (unlikely(pskb_expand_head(tskb, INT_H_SIZE, mss - tsz - INT_H_SIZE,
558 GFP_ATOMIC)))
559 return true;
560 inner = buf_msg(tskb);
561 skb_push(tskb, INT_H_SIZE);
562 outer = buf_msg(tskb);
563 tipc_msg_init(msg_prevnode(inner), outer, MSG_BUNDLER, 0, INT_H_SIZE,
564 dnode);
565 msg_set_importance(outer, msg_importance(inner));
566 msg_set_size(outer, INT_H_SIZE + tsz);
567 msg_set_msgcnt(outer, 1);
568 *new_bundle = true;
569
570 bundle:
571 if (likely(tipc_msg_bundle(tskb, msg, mss))) {
572 consume_skb(*skb);
573 *skb = NULL;
574 }
575 return true;
576 }
577
578 /**
579 * tipc_msg_extract(): extract bundled inner packet from buffer
580 * @skb: buffer to be extracted from.
581 * @iskb: extracted inner buffer, to be returned
582 * @pos: position in outer message of msg to be extracted.
583 * Returns position of next msg.
584 * Consumes outer buffer when last packet extracted
585 * Return: true when there is an extracted buffer, otherwise false
586 */
587 bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos)
588 {
589 struct tipc_msg *hdr, *ihdr;
590 int imsz;
591
592 *iskb = NULL;
593 if (unlikely(skb_linearize(skb)))
594 goto none;
595
596 hdr = buf_msg(skb);
597 if (unlikely(*pos > (msg_data_sz(hdr) - MIN_H_SIZE)))
598 goto none;
599
600 ihdr = (struct tipc_msg *)(msg_data(hdr) + *pos);
601 imsz = msg_size(ihdr);
602
603 if ((*pos + imsz) > msg_data_sz(hdr))
604 goto none;
605
606 *iskb = tipc_buf_acquire(imsz, GFP_ATOMIC);
607 if (!*iskb)
608 goto none;
609
610 skb_copy_to_linear_data(*iskb, ihdr, imsz);
611 if (unlikely(!tipc_msg_validate(iskb)))
612 goto none;
613
614 *pos += align(imsz);
615 return true;
616 none:
617 kfree_skb(skb);
618 kfree_skb(*iskb);
619 *iskb = NULL;
620 return false;
621 }
622
623 /**
624 * tipc_msg_reverse(): swap source and destination addresses and add error code
625 * @own_node: originating node id for reversed message
626 * @skb: buffer containing message to be reversed; will be consumed
627 * @err: error code to be set in message, if any
628 * Replaces consumed buffer with new one when successful
629 * Return: true if success, otherwise false
630 */
631 bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err)
632 {
633 struct sk_buff *_skb = *skb;
634 struct tipc_msg *_hdr, *hdr;
635 int hlen, dlen;
636
637 if (skb_linearize(_skb))
638 goto exit;
639 _hdr = buf_msg(_skb);
640 dlen = min_t(uint, msg_data_sz(_hdr), MAX_FORWARD_SIZE);
641 hlen = msg_hdr_sz(_hdr);
642
643 if (msg_dest_droppable(_hdr))
644 goto exit;
645 if (msg_errcode(_hdr))
646 goto exit;
647
648 /* Never return SHORT header */
649 if (hlen == SHORT_H_SIZE)
650 hlen = BASIC_H_SIZE;
651
652 /* Don't return data along with SYN+, - sender has a clone */
653 if (msg_is_syn(_hdr) && err == TIPC_ERR_OVERLOAD)
654 dlen = 0;
655
656 /* Allocate new buffer to return */
657 *skb = tipc_buf_acquire(hlen + dlen, GFP_ATOMIC);
658 if (!*skb)
659 goto exit;
660 memcpy((*skb)->data, _skb->data, msg_hdr_sz(_hdr));
661 memcpy((*skb)->data + hlen, msg_data(_hdr), dlen);
662
663 /* Build reverse header in new buffer */
664 hdr = buf_msg(*skb);
665 msg_set_hdr_sz(hdr, hlen);
666 msg_set_errcode(hdr, err);
667 msg_set_non_seq(hdr, 0);
668 msg_set_origport(hdr, msg_destport(_hdr));
669 msg_set_destport(hdr, msg_origport(_hdr));
670 msg_set_destnode(hdr, msg_prevnode(_hdr));
671 msg_set_prevnode(hdr, own_node);
672 msg_set_orignode(hdr, own_node);
673 msg_set_size(hdr, hlen + dlen);
674 skb_orphan(_skb);
675 kfree_skb(_skb);
676 return true;
677 exit:
678 kfree_skb(_skb);
679 *skb = NULL;
680 return false;
681 }
682
683 bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy)
684 {
685 struct sk_buff *skb, *_skb;
686
687 skb_queue_walk(msg, skb) {
688 _skb = skb_clone(skb, GFP_ATOMIC);
689 if (!_skb) {
690 __skb_queue_purge(cpy);
691 pr_err_ratelimited("Failed to clone buffer chain\n");
692 return false;
693 }
694 __skb_queue_tail(cpy, _skb);
695 }
696 return true;
697 }
698
699 /**
700 * tipc_msg_lookup_dest(): try to find new destination for named message
701 * @net: pointer to associated network namespace
702 * @skb: the buffer containing the message.
703 * @err: error code to be used by caller if lookup fails
704 * Does not consume buffer
705 * Return: true if a destination is found, false otherwise
706 */
707 bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
708 {
709 struct tipc_msg *msg = buf_msg(skb);
710 u32 dport, dnode;
711 u32 onode = tipc_own_addr(net);
712
713 if (!msg_isdata(msg))
714 return false;
715 if (!msg_named(msg))
716 return false;
717 if (msg_errcode(msg))
718 return false;
719 *err = TIPC_ERR_NO_NAME;
720 if (skb_linearize(skb))
721 return false;
722 msg = buf_msg(skb);
723 if (msg_reroute_cnt(msg))
724 return false;
725 dnode = tipc_scope2node(net, msg_lookup_scope(msg));
726 dport = tipc_nametbl_translate(net, msg_nametype(msg),
727 msg_nameinst(msg), &dnode);
728 if (!dport)
729 return false;
730 msg_incr_reroute_cnt(msg);
731 if (dnode != onode)
732 msg_set_prevnode(msg, onode);
733 msg_set_destnode(msg, dnode);
734 msg_set_destport(msg, dport);
735 *err = TIPC_OK;
736
737 return true;
738 }
739
740 /* tipc_msg_assemble() - assemble chain of fragments into one message
741 */
742 bool tipc_msg_assemble(struct sk_buff_head *list)
743 {
744 struct sk_buff *skb, *tmp = NULL;
745
746 if (skb_queue_len(list) == 1)
747 return true;
748
749 while ((skb = __skb_dequeue(list))) {
750 skb->next = NULL;
751 if (tipc_buf_append(&tmp, &skb)) {
752 __skb_queue_tail(list, skb);
753 return true;
754 }
755 if (!tmp)
756 break;
757 }
758 __skb_queue_purge(list);
759 __skb_queue_head_init(list);
760 pr_warn("Failed do assemble buffer\n");
761 return false;
762 }
763
764 /* tipc_msg_reassemble() - clone a buffer chain of fragments and
765 * reassemble the clones into one message
766 */
767 bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq)
768 {
769 struct sk_buff *skb, *_skb;
770 struct sk_buff *frag = NULL;
771 struct sk_buff *head = NULL;
772 int hdr_len;
773
774 /* Copy header if single buffer */
775 if (skb_queue_len(list) == 1) {
776 skb = skb_peek(list);
777 hdr_len = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb));
778 _skb = __pskb_copy(skb, hdr_len, GFP_ATOMIC);
779 if (!_skb)
780 return false;
781 __skb_queue_tail(rcvq, _skb);
782 return true;
783 }
784
785 /* Clone all fragments and reassemble */
786 skb_queue_walk(list, skb) {
787 frag = skb_clone(skb, GFP_ATOMIC);
788 if (!frag)
789 goto error;
790 frag->next = NULL;
791 if (tipc_buf_append(&head, &frag))
792 break;
793 if (!head)
794 goto error;
795 }
796 __skb_queue_tail(rcvq, frag);
797 return true;
798 error:
799 pr_warn("Failed do clone local mcast rcv buffer\n");
800 kfree_skb(head);
801 return false;
802 }
803
804 bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg,
805 struct sk_buff_head *cpy)
806 {
807 struct sk_buff *skb, *_skb;
808
809 skb_queue_walk(msg, skb) {
810 _skb = pskb_copy(skb, GFP_ATOMIC);
811 if (!_skb) {
812 __skb_queue_purge(cpy);
813 return false;
814 }
815 msg_set_destnode(buf_msg(_skb), dst);
816 __skb_queue_tail(cpy, _skb);
817 }
818 return true;
819 }
820
821 /* tipc_skb_queue_sorted(); sort pkt into list according to sequence number
822 * @list: list to be appended to
823 * @seqno: sequence number of buffer to add
824 * @skb: buffer to add
825 */
826 bool __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
827 struct sk_buff *skb)
828 {
829 struct sk_buff *_skb, *tmp;
830
831 if (skb_queue_empty(list) || less(seqno, buf_seqno(skb_peek(list)))) {
832 __skb_queue_head(list, skb);
833 return true;
834 }
835
836 if (more(seqno, buf_seqno(skb_peek_tail(list)))) {
837 __skb_queue_tail(list, skb);
838 return true;
839 }
840
841 skb_queue_walk_safe(list, _skb, tmp) {
842 if (more(seqno, buf_seqno(_skb)))
843 continue;
844 if (seqno == buf_seqno(_skb))
845 break;
846 __skb_queue_before(list, _skb, skb);
847 return true;
848 }
849 kfree_skb(skb);
850 return false;
851 }
852
853 void tipc_skb_reject(struct net *net, int err, struct sk_buff *skb,
854 struct sk_buff_head *xmitq)
855 {
856 if (tipc_msg_reverse(tipc_own_addr(net), &skb, err))
857 __skb_queue_tail(xmitq, skb);
858 }