]>
git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/tipc/msg.c
2 * net/tipc/msg.c: TIPC message header routines
4 * Copyright (c) 2000-2006, 2014, Ericsson AB
5 * Copyright (c) 2005, 2010-2011, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
40 static unsigned int align(unsigned int i
)
45 void tipc_msg_init(struct tipc_msg
*m
, u32 user
, u32 type
, u32 hsize
,
50 msg_set_user(m
, user
);
51 msg_set_hdr_sz(m
, hsize
);
52 msg_set_size(m
, hsize
);
53 msg_set_prevnode(m
, tipc_own_addr
);
54 msg_set_type(m
, type
);
55 msg_set_orignode(m
, tipc_own_addr
);
56 msg_set_destnode(m
, destnode
);
60 * tipc_msg_build - create message using specified header and data
62 * Note: Caller must not hold any locks in case copy_from_user() is interrupted!
64 * Returns message data size or errno
66 int tipc_msg_build(struct tipc_msg
*hdr
, struct iovec
const *msg_sect
,
67 unsigned int len
, int max_size
, struct sk_buff
**buf
)
73 hsz
= msg_hdr_sz(hdr
);
75 msg_set_size(hdr
, sz
);
76 if (unlikely(sz
> max_size
)) {
81 *buf
= tipc_buf_acquire(sz
);
84 skb_copy_to_linear_data(*buf
, hdr
, hsz
);
85 to
= (*buf
)->data
+ hsz
;
86 if (len
&& memcpy_fromiovecend(to
, msg_sect
, 0, dsz
)) {
94 /* tipc_buf_append(): Append a buffer to the fragment list of another buffer
95 * Let first buffer become head buffer
96 * Returns 1 and sets *buf to headbuf if chain is complete, otherwise 0
97 * Leaves headbuf pointer at NULL if failure
99 int tipc_buf_append(struct sk_buff
**headbuf
, struct sk_buff
**buf
)
101 struct sk_buff
*head
= *headbuf
;
102 struct sk_buff
*frag
= *buf
;
103 struct sk_buff
*tail
;
104 struct tipc_msg
*msg
= buf_msg(frag
);
105 u32 fragid
= msg_type(msg
);
109 skb_pull(frag
, msg_hdr_sz(msg
));
111 if (fragid
== FIRST_FRAGMENT
) {
112 if (head
|| skb_unclone(frag
, GFP_ATOMIC
))
114 head
= *headbuf
= frag
;
115 skb_frag_list_init(head
);
120 tail
= TIPC_SKB_CB(head
)->tail
;
121 if (skb_try_coalesce(head
, frag
, &headstolen
, &delta
)) {
122 kfree_skb_partial(frag
, headstolen
);
124 if (!skb_has_frag_list(head
))
125 skb_shinfo(head
)->frag_list
= frag
;
128 head
->truesize
+= frag
->truesize
;
129 head
->data_len
+= frag
->len
;
130 head
->len
+= frag
->len
;
131 TIPC_SKB_CB(head
)->tail
= frag
;
133 if (fragid
== LAST_FRAGMENT
) {
135 TIPC_SKB_CB(head
)->tail
= NULL
;
142 pr_warn_ratelimited("Unable to build fragment list\n");
149 * tipc_msg_build2 - create buffer chain containing specified header and data
150 * @mhdr: Message header, to be prepended to data
152 * @offset: Posision in iov to start copying from
153 * @dsz: Total length of user data
154 * @pktmax: Max packet size that can be used
155 * @chain: Buffer or chain of buffers to be returned to caller
156 * Returns message data size or errno: -ENOMEM, -EFAULT
158 int tipc_msg_build2(struct tipc_msg
*mhdr
, struct iovec
const *iov
,
159 int offset
, int dsz
, int pktmax
, struct sk_buff
**chain
)
161 int mhsz
= msg_hdr_sz(mhdr
);
162 int msz
= mhsz
+ dsz
;
167 struct tipc_msg pkthdr
;
168 struct sk_buff
*buf
, *prev
;
172 msg_set_size(mhdr
, msz
);
174 /* No fragmentation needed? */
175 if (likely(msz
<= pktmax
)) {
176 buf
= tipc_buf_acquire(msz
);
180 skb_copy_to_linear_data(buf
, mhdr
, mhsz
);
181 pktpos
= buf
->data
+ mhsz
;
182 if (!dsz
|| !memcpy_fromiovecend(pktpos
, iov
, offset
, dsz
))
188 /* Prepare reusable fragment header */
189 tipc_msg_init(&pkthdr
, MSG_FRAGMENTER
, FIRST_FRAGMENT
,
190 INT_H_SIZE
, msg_destnode(mhdr
));
191 msg_set_size(&pkthdr
, pktmax
);
192 msg_set_fragm_no(&pkthdr
, pktno
);
194 /* Prepare first fragment */
195 *chain
= buf
= tipc_buf_acquire(pktmax
);
199 skb_copy_to_linear_data(buf
, &pkthdr
, INT_H_SIZE
);
200 pktpos
+= INT_H_SIZE
;
201 pktrem
-= INT_H_SIZE
;
202 skb_copy_to_linear_data_offset(buf
, INT_H_SIZE
, mhdr
, mhsz
);
210 if (memcpy_fromiovecend(pktpos
, iov
, offset
, pktrem
)) {
220 /* Prepare new fragment: */
221 if (drem
< (pktmax
- INT_H_SIZE
))
222 pktsz
= drem
+ INT_H_SIZE
;
226 buf
= tipc_buf_acquire(pktsz
);
232 msg_set_type(&pkthdr
, FRAGMENT
);
233 msg_set_size(&pkthdr
, pktsz
);
234 msg_set_fragm_no(&pkthdr
, ++pktno
);
235 skb_copy_to_linear_data(buf
, &pkthdr
, INT_H_SIZE
);
236 pktpos
= buf
->data
+ INT_H_SIZE
;
237 pktrem
= pktsz
- INT_H_SIZE
;
241 msg_set_type(buf_msg(buf
), LAST_FRAGMENT
);
244 kfree_skb_list(*chain
);
250 * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one
251 * @bbuf: the existing buffer ("bundle")
252 * @buf: buffer to be appended
253 * @mtu: max allowable size for the bundle buffer
254 * Consumes buffer if successful
255 * Returns true if bundling could be performed, otherwise false
257 bool tipc_msg_bundle(struct sk_buff
*bbuf
, struct sk_buff
*buf
, u32 mtu
)
259 struct tipc_msg
*bmsg
= buf_msg(bbuf
);
260 struct tipc_msg
*msg
= buf_msg(buf
);
261 unsigned int bsz
= msg_size(bmsg
);
262 unsigned int msz
= msg_size(msg
);
263 u32 start
= align(bsz
);
264 u32 max
= mtu
- INT_H_SIZE
;
265 u32 pad
= start
- bsz
;
267 if (likely(msg_user(msg
) == MSG_FRAGMENTER
))
269 if (unlikely(msg_user(msg
) == CHANGEOVER_PROTOCOL
))
271 if (unlikely(msg_user(msg
) == BCAST_PROTOCOL
))
273 if (likely(msg_user(bmsg
) != MSG_BUNDLER
))
275 if (likely(msg_type(bmsg
) != BUNDLE_OPEN
))
277 if (unlikely(skb_tailroom(bbuf
) < (pad
+ msz
)))
279 if (unlikely(max
< (start
+ msz
)))
282 skb_put(bbuf
, pad
+ msz
);
283 skb_copy_to_linear_data_offset(bbuf
, start
, buf
->data
, msz
);
284 msg_set_size(bmsg
, start
+ msz
);
285 msg_set_msgcnt(bmsg
, msg_msgcnt(bmsg
) + 1);
286 bbuf
->next
= buf
->next
;
292 * tipc_msg_make_bundle(): Create bundle buf and append message to its tail
293 * @buf: buffer to be appended and replaced
294 * @mtu: max allowable size for the bundle buffer, inclusive header
295 * @dnode: destination node for message. (Not always present in header)
296 * Replaces buffer if successful
297 * Returns true if sucess, otherwise false
299 bool tipc_msg_make_bundle(struct sk_buff
**buf
, u32 mtu
, u32 dnode
)
301 struct sk_buff
*bbuf
;
302 struct tipc_msg
*bmsg
;
303 struct tipc_msg
*msg
= buf_msg(*buf
);
304 u32 msz
= msg_size(msg
);
305 u32 max
= mtu
- INT_H_SIZE
;
307 if (msg_user(msg
) == MSG_FRAGMENTER
)
309 if (msg_user(msg
) == CHANGEOVER_PROTOCOL
)
311 if (msg_user(msg
) == BCAST_PROTOCOL
)
316 bbuf
= tipc_buf_acquire(max
);
320 skb_trim(bbuf
, INT_H_SIZE
);
321 bmsg
= buf_msg(bbuf
);
322 tipc_msg_init(bmsg
, MSG_BUNDLER
, BUNDLE_OPEN
, INT_H_SIZE
, dnode
);
323 msg_set_seqno(bmsg
, msg_seqno(msg
));
324 msg_set_ack(bmsg
, msg_ack(msg
));
325 msg_set_bcast_ack(bmsg
, msg_bcast_ack(msg
));
326 bbuf
->next
= (*buf
)->next
;
327 tipc_msg_bundle(bbuf
, *buf
, mtu
);