]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - net/tipc/msg.c
tipc: make link mtu easily accessible from socket
[mirror_ubuntu-zesty-kernel.git] / net / tipc / msg.c
CommitLineData
b97bf3fd
PL
1/*
2 * net/tipc/msg.c: TIPC message header routines
c4307285 3 *
37e22164 4 * Copyright (c) 2000-2006, 2014, Ericsson AB
741de3e9 5 * Copyright (c) 2005, 2010-2011, Wind River Systems
b97bf3fd
PL
6 * All rights reserved.
7 *
9ea1fd3c 8 * Redistribution and use in source and binary forms, with or without
b97bf3fd
PL
9 * modification, are permitted provided that the following conditions are met:
10 *
9ea1fd3c
PL
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
b97bf3fd 19 *
9ea1fd3c
PL
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
b97bf3fd
PL
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
b97bf3fd 38#include "msg.h"
b97bf3fd 39
4f1688b2 40static unsigned int align(unsigned int i)
23461e83 41{
4f1688b2 42 return (i + 3) & ~3u;
23461e83
AS
43}
44
ae8509c4
PG
45void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize,
46 u32 destnode)
23461e83
AS
47{
48 memset(m, 0, hsize);
49 msg_set_version(m);
50 msg_set_user(m, user);
51 msg_set_hdr_sz(m, hsize);
52 msg_set_size(m, hsize);
53 msg_set_prevnode(m, tipc_own_addr);
54 msg_set_type(m, type);
15f4e2b3
AS
55 msg_set_orignode(m, tipc_own_addr);
56 msg_set_destnode(m, destnode);
23461e83
AS
57}
58
23461e83
AS
59/**
60 * tipc_msg_build - create message using specified header and data
61 *
62 * Note: Caller must not hold any locks in case copy_from_user() is interrupted!
63 *
64 * Returns message data size or errno
65 */
26896904 66int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
9446b87a 67 unsigned int len, int max_size, struct sk_buff **buf)
23461e83 68{
5c0a0fc8
YX
69 int dsz, sz, hsz;
70 unsigned char *to;
23461e83 71
9446b87a 72 dsz = len;
5c0a0fc8 73 hsz = msg_hdr_sz(hdr);
23461e83
AS
74 sz = hsz + dsz;
75 msg_set_size(hdr, sz);
76 if (unlikely(sz > max_size)) {
77 *buf = NULL;
78 return dsz;
79 }
80
31e3c3f6 81 *buf = tipc_buf_acquire(sz);
23461e83
AS
82 if (!(*buf))
83 return -ENOMEM;
84 skb_copy_to_linear_data(*buf, hdr, hsz);
5c0a0fc8 85 to = (*buf)->data + hsz;
9446b87a 86 if (len && memcpy_fromiovecend(to, msg_sect, 0, dsz)) {
5c0a0fc8
YX
87 kfree_skb(*buf);
88 *buf = NULL;
89 return -EFAULT;
23461e83 90 }
5c0a0fc8 91 return dsz;
23461e83 92}
37e22164
JPM
93
94/* tipc_buf_append(): Append a buffer to the fragment list of another buffer
95 * Let first buffer become head buffer
96 * Returns 1 and sets *buf to headbuf if chain is complete, otherwise 0
97 * Leaves headbuf pointer at NULL if failure
98 */
99int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
100{
101 struct sk_buff *head = *headbuf;
102 struct sk_buff *frag = *buf;
103 struct sk_buff *tail;
104 struct tipc_msg *msg = buf_msg(frag);
105 u32 fragid = msg_type(msg);
106 bool headstolen;
107 int delta;
108
109 skb_pull(frag, msg_hdr_sz(msg));
110
111 if (fragid == FIRST_FRAGMENT) {
112 if (head || skb_unclone(frag, GFP_ATOMIC))
113 goto out_free;
114 head = *headbuf = frag;
115 skb_frag_list_init(head);
116 return 0;
117 }
118 if (!head)
119 goto out_free;
120 tail = TIPC_SKB_CB(head)->tail;
121 if (skb_try_coalesce(head, frag, &headstolen, &delta)) {
122 kfree_skb_partial(frag, headstolen);
123 } else {
124 if (!skb_has_frag_list(head))
125 skb_shinfo(head)->frag_list = frag;
126 else
127 tail->next = frag;
128 head->truesize += frag->truesize;
129 head->data_len += frag->len;
130 head->len += frag->len;
131 TIPC_SKB_CB(head)->tail = frag;
132 }
133 if (fragid == LAST_FRAGMENT) {
134 *buf = head;
135 TIPC_SKB_CB(head)->tail = NULL;
136 *headbuf = NULL;
137 return 1;
138 }
139 *buf = NULL;
140 return 0;
141out_free:
142 pr_warn_ratelimited("Unable to build fragment list\n");
143 kfree_skb(*buf);
144 return 0;
145}
4f1688b2
JPM
146
147/**
148 * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one
149 * @bbuf: the existing buffer ("bundle")
150 * @buf: buffer to be appended
151 * @mtu: max allowable size for the bundle buffer
152 * Consumes buffer if successful
153 * Returns true if bundling could be performed, otherwise false
154 */
155bool tipc_msg_bundle(struct sk_buff *bbuf, struct sk_buff *buf, u32 mtu)
156{
157 struct tipc_msg *bmsg = buf_msg(bbuf);
158 struct tipc_msg *msg = buf_msg(buf);
159 unsigned int bsz = msg_size(bmsg);
160 unsigned int msz = msg_size(msg);
161 u32 start = align(bsz);
162 u32 max = mtu - INT_H_SIZE;
163 u32 pad = start - bsz;
164
165 if (likely(msg_user(msg) == MSG_FRAGMENTER))
166 return false;
167 if (unlikely(msg_user(msg) == CHANGEOVER_PROTOCOL))
168 return false;
169 if (unlikely(msg_user(msg) == BCAST_PROTOCOL))
170 return false;
171 if (likely(msg_user(bmsg) != MSG_BUNDLER))
172 return false;
173 if (likely(msg_type(bmsg) != BUNDLE_OPEN))
174 return false;
175 if (unlikely(skb_tailroom(bbuf) < (pad + msz)))
176 return false;
177 if (unlikely(max < (start + msz)))
178 return false;
179
180 skb_put(bbuf, pad + msz);
181 skb_copy_to_linear_data_offset(bbuf, start, buf->data, msz);
182 msg_set_size(bmsg, start + msz);
183 msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
184 bbuf->next = buf->next;
185 kfree_skb(buf);
186 return true;
187}
188
189/**
190 * tipc_msg_make_bundle(): Create bundle buf and append message to its tail
191 * @buf: buffer to be appended and replaced
192 * @mtu: max allowable size for the bundle buffer, inclusive header
193 * @dnode: destination node for message. (Not always present in header)
194 * Replaces buffer if successful
195 * Returns true if sucess, otherwise false
196 */
197bool tipc_msg_make_bundle(struct sk_buff **buf, u32 mtu, u32 dnode)
198{
199 struct sk_buff *bbuf;
200 struct tipc_msg *bmsg;
201 struct tipc_msg *msg = buf_msg(*buf);
202 u32 msz = msg_size(msg);
203 u32 max = mtu - INT_H_SIZE;
204
205 if (msg_user(msg) == MSG_FRAGMENTER)
206 return false;
207 if (msg_user(msg) == CHANGEOVER_PROTOCOL)
208 return false;
209 if (msg_user(msg) == BCAST_PROTOCOL)
210 return false;
211 if (msz > (max / 2))
212 return false;
213
214 bbuf = tipc_buf_acquire(max);
215 if (!bbuf)
216 return false;
217
218 skb_trim(bbuf, INT_H_SIZE);
219 bmsg = buf_msg(bbuf);
220 tipc_msg_init(bmsg, MSG_BUNDLER, BUNDLE_OPEN, INT_H_SIZE, dnode);
221 msg_set_seqno(bmsg, msg_seqno(msg));
222 msg_set_ack(bmsg, msg_ack(msg));
223 msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
224 bbuf->next = (*buf)->next;
225 tipc_msg_bundle(bbuf, *buf, mtu);
226 *buf = bbuf;
227 return true;
228}