]> git.proxmox.com Git - mirror_ubuntu-disco-kernel.git/blob - net/sctp/diag.c
Merge tag 'drm-fixes-2019-02-15-1' of git://anongit.freedesktop.org/drm/drm
[mirror_ubuntu-disco-kernel.git] / net / sctp / diag.c
1 /* SCTP kernel implementation
2 * (C) Copyright Red Hat Inc. 2017
3 *
4 * This file is part of the SCTP kernel implementation
5 *
6 * These functions implement sctp diag support.
7 *
8 * This SCTP implementation is free software;
9 * you can redistribute it and/or modify it under the terms of
10 * the GNU General Public License as published by
11 * the Free Software Foundation; either version 2, or (at your option)
12 * any later version.
13 *
14 * This SCTP implementation is distributed in the hope that it
15 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
16 * ************************
17 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
18 * See the GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with GNU CC; see the file COPYING. If not, see
22 * <http://www.gnu.org/licenses/>.
23 *
24 * Please send any bug reports or fixes you make to the
25 * email addresched(es):
26 * lksctp developers <linux-sctp@vger.kernel.org>
27 *
28 * Written or modified by:
29 * Xin Long <lucien.xin@gmail.com>
30 */
31
32 #include <linux/module.h>
33 #include <linux/inet_diag.h>
34 #include <linux/sock_diag.h>
35 #include <net/sctp/sctp.h>
36
37 static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
38 void *info);
39
40 /* define some functions to make asoc/ep fill look clean */
41 static void inet_diag_msg_sctpasoc_fill(struct inet_diag_msg *r,
42 struct sock *sk,
43 struct sctp_association *asoc)
44 {
45 union sctp_addr laddr, paddr;
46 struct dst_entry *dst;
47 struct timer_list *t3_rtx = &asoc->peer.primary_path->T3_rtx_timer;
48
49 laddr = list_entry(asoc->base.bind_addr.address_list.next,
50 struct sctp_sockaddr_entry, list)->a;
51 paddr = asoc->peer.primary_path->ipaddr;
52 dst = asoc->peer.primary_path->dst;
53
54 r->idiag_family = sk->sk_family;
55 r->id.idiag_sport = htons(asoc->base.bind_addr.port);
56 r->id.idiag_dport = htons(asoc->peer.port);
57 r->id.idiag_if = dst ? dst->dev->ifindex : 0;
58 sock_diag_save_cookie(sk, r->id.idiag_cookie);
59
60 #if IS_ENABLED(CONFIG_IPV6)
61 if (sk->sk_family == AF_INET6) {
62 *(struct in6_addr *)r->id.idiag_src = laddr.v6.sin6_addr;
63 *(struct in6_addr *)r->id.idiag_dst = paddr.v6.sin6_addr;
64 } else
65 #endif
66 {
67 memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
68 memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
69
70 r->id.idiag_src[0] = laddr.v4.sin_addr.s_addr;
71 r->id.idiag_dst[0] = paddr.v4.sin_addr.s_addr;
72 }
73
74 r->idiag_state = asoc->state;
75 if (timer_pending(t3_rtx)) {
76 r->idiag_timer = SCTP_EVENT_TIMEOUT_T3_RTX;
77 r->idiag_retrans = asoc->rtx_data_chunks;
78 r->idiag_expires = jiffies_to_msecs(t3_rtx->expires - jiffies);
79 } else {
80 r->idiag_timer = 0;
81 r->idiag_retrans = 0;
82 r->idiag_expires = 0;
83 }
84 }
85
86 static int inet_diag_msg_sctpladdrs_fill(struct sk_buff *skb,
87 struct list_head *address_list)
88 {
89 struct sctp_sockaddr_entry *laddr;
90 int addrlen = sizeof(struct sockaddr_storage);
91 int addrcnt = 0;
92 struct nlattr *attr;
93 void *info = NULL;
94
95 list_for_each_entry_rcu(laddr, address_list, list)
96 addrcnt++;
97
98 attr = nla_reserve(skb, INET_DIAG_LOCALS, addrlen * addrcnt);
99 if (!attr)
100 return -EMSGSIZE;
101
102 info = nla_data(attr);
103 list_for_each_entry_rcu(laddr, address_list, list) {
104 memcpy(info, &laddr->a, sizeof(laddr->a));
105 memset(info + sizeof(laddr->a), 0, addrlen - sizeof(laddr->a));
106 info += addrlen;
107 }
108
109 return 0;
110 }
111
112 static int inet_diag_msg_sctpaddrs_fill(struct sk_buff *skb,
113 struct sctp_association *asoc)
114 {
115 int addrlen = sizeof(struct sockaddr_storage);
116 struct sctp_transport *from;
117 struct nlattr *attr;
118 void *info = NULL;
119
120 attr = nla_reserve(skb, INET_DIAG_PEERS,
121 addrlen * asoc->peer.transport_count);
122 if (!attr)
123 return -EMSGSIZE;
124
125 info = nla_data(attr);
126 list_for_each_entry(from, &asoc->peer.transport_addr_list,
127 transports) {
128 memcpy(info, &from->ipaddr, sizeof(from->ipaddr));
129 memset(info + sizeof(from->ipaddr), 0,
130 addrlen - sizeof(from->ipaddr));
131 info += addrlen;
132 }
133
134 return 0;
135 }
136
137 /* sctp asoc/ep fill*/
138 static int inet_sctp_diag_fill(struct sock *sk, struct sctp_association *asoc,
139 struct sk_buff *skb,
140 const struct inet_diag_req_v2 *req,
141 struct user_namespace *user_ns,
142 int portid, u32 seq, u16 nlmsg_flags,
143 const struct nlmsghdr *unlh,
144 bool net_admin)
145 {
146 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
147 struct list_head *addr_list;
148 struct inet_diag_msg *r;
149 struct nlmsghdr *nlh;
150 int ext = req->idiag_ext;
151 struct sctp_infox infox;
152 void *info = NULL;
153
154 nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
155 nlmsg_flags);
156 if (!nlh)
157 return -EMSGSIZE;
158
159 r = nlmsg_data(nlh);
160 BUG_ON(!sk_fullsock(sk));
161
162 if (asoc) {
163 inet_diag_msg_sctpasoc_fill(r, sk, asoc);
164 } else {
165 inet_diag_msg_common_fill(r, sk);
166 r->idiag_state = sk->sk_state;
167 r->idiag_timer = 0;
168 r->idiag_retrans = 0;
169 }
170
171 if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns, net_admin))
172 goto errout;
173
174 if (ext & (1 << (INET_DIAG_SKMEMINFO - 1))) {
175 u32 mem[SK_MEMINFO_VARS];
176 int amt;
177
178 if (asoc && asoc->ep->sndbuf_policy)
179 amt = asoc->sndbuf_used;
180 else
181 amt = sk_wmem_alloc_get(sk);
182 mem[SK_MEMINFO_WMEM_ALLOC] = amt;
183 if (asoc && asoc->ep->rcvbuf_policy)
184 amt = atomic_read(&asoc->rmem_alloc);
185 else
186 amt = sk_rmem_alloc_get(sk);
187 mem[SK_MEMINFO_RMEM_ALLOC] = amt;
188 mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf;
189 mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf;
190 mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
191 mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
192 mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
193 mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len;
194 mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
195
196 if (nla_put(skb, INET_DIAG_SKMEMINFO, sizeof(mem), &mem) < 0)
197 goto errout;
198 }
199
200 if (ext & (1 << (INET_DIAG_INFO - 1))) {
201 struct nlattr *attr;
202
203 attr = nla_reserve_64bit(skb, INET_DIAG_INFO,
204 sizeof(struct sctp_info),
205 INET_DIAG_PAD);
206 if (!attr)
207 goto errout;
208
209 info = nla_data(attr);
210 }
211 infox.sctpinfo = (struct sctp_info *)info;
212 infox.asoc = asoc;
213 sctp_diag_get_info(sk, r, &infox);
214
215 addr_list = asoc ? &asoc->base.bind_addr.address_list
216 : &ep->base.bind_addr.address_list;
217 if (inet_diag_msg_sctpladdrs_fill(skb, addr_list))
218 goto errout;
219
220 if (asoc && (ext & (1 << (INET_DIAG_CONG - 1))))
221 if (nla_put_string(skb, INET_DIAG_CONG, "reno") < 0)
222 goto errout;
223
224 if (asoc && inet_diag_msg_sctpaddrs_fill(skb, asoc))
225 goto errout;
226
227 nlmsg_end(skb, nlh);
228 return 0;
229
230 errout:
231 nlmsg_cancel(skb, nlh);
232 return -EMSGSIZE;
233 }
234
235 /* callback and param */
236 struct sctp_comm_param {
237 struct sk_buff *skb;
238 struct netlink_callback *cb;
239 const struct inet_diag_req_v2 *r;
240 const struct nlmsghdr *nlh;
241 bool net_admin;
242 };
243
244 static size_t inet_assoc_attr_size(struct sctp_association *asoc)
245 {
246 int addrlen = sizeof(struct sockaddr_storage);
247 int addrcnt = 0;
248 struct sctp_sockaddr_entry *laddr;
249
250 list_for_each_entry_rcu(laddr, &asoc->base.bind_addr.address_list,
251 list)
252 addrcnt++;
253
254 return nla_total_size(sizeof(struct sctp_info))
255 + nla_total_size(1) /* INET_DIAG_SHUTDOWN */
256 + nla_total_size(1) /* INET_DIAG_TOS */
257 + nla_total_size(1) /* INET_DIAG_TCLASS */
258 + nla_total_size(4) /* INET_DIAG_MARK */
259 + nla_total_size(4) /* INET_DIAG_CLASS_ID */
260 + nla_total_size(addrlen * asoc->peer.transport_count)
261 + nla_total_size(addrlen * addrcnt)
262 + nla_total_size(sizeof(struct inet_diag_meminfo))
263 + nla_total_size(sizeof(struct inet_diag_msg))
264 + 64;
265 }
266
267 static int sctp_tsp_dump_one(struct sctp_transport *tsp, void *p)
268 {
269 struct sctp_association *assoc = tsp->asoc;
270 struct sock *sk = tsp->asoc->base.sk;
271 struct sctp_comm_param *commp = p;
272 struct sk_buff *in_skb = commp->skb;
273 const struct inet_diag_req_v2 *req = commp->r;
274 const struct nlmsghdr *nlh = commp->nlh;
275 struct net *net = sock_net(in_skb->sk);
276 struct sk_buff *rep;
277 int err;
278
279 err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
280 if (err)
281 goto out;
282
283 err = -ENOMEM;
284 rep = nlmsg_new(inet_assoc_attr_size(assoc), GFP_KERNEL);
285 if (!rep)
286 goto out;
287
288 lock_sock(sk);
289 if (sk != assoc->base.sk) {
290 release_sock(sk);
291 sk = assoc->base.sk;
292 lock_sock(sk);
293 }
294 err = inet_sctp_diag_fill(sk, assoc, rep, req,
295 sk_user_ns(NETLINK_CB(in_skb).sk),
296 NETLINK_CB(in_skb).portid,
297 nlh->nlmsg_seq, 0, nlh,
298 commp->net_admin);
299 release_sock(sk);
300 if (err < 0) {
301 WARN_ON(err == -EMSGSIZE);
302 kfree_skb(rep);
303 goto out;
304 }
305
306 err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
307 MSG_DONTWAIT);
308 if (err > 0)
309 err = 0;
310 out:
311 return err;
312 }
313
314 static int sctp_sock_dump(struct sctp_transport *tsp, void *p)
315 {
316 struct sctp_endpoint *ep = tsp->asoc->ep;
317 struct sctp_comm_param *commp = p;
318 struct sock *sk = ep->base.sk;
319 struct sk_buff *skb = commp->skb;
320 struct netlink_callback *cb = commp->cb;
321 const struct inet_diag_req_v2 *r = commp->r;
322 struct sctp_association *assoc;
323 int err = 0;
324
325 lock_sock(sk);
326 list_for_each_entry(assoc, &ep->asocs, asocs) {
327 if (cb->args[4] < cb->args[1])
328 goto next;
329
330 if (r->id.idiag_sport != htons(assoc->base.bind_addr.port) &&
331 r->id.idiag_sport)
332 goto next;
333 if (r->id.idiag_dport != htons(assoc->peer.port) &&
334 r->id.idiag_dport)
335 goto next;
336
337 if (!cb->args[3] &&
338 inet_sctp_diag_fill(sk, NULL, skb, r,
339 sk_user_ns(NETLINK_CB(cb->skb).sk),
340 NETLINK_CB(cb->skb).portid,
341 cb->nlh->nlmsg_seq,
342 NLM_F_MULTI, cb->nlh,
343 commp->net_admin) < 0) {
344 err = 1;
345 goto release;
346 }
347 cb->args[3] = 1;
348
349 if (inet_sctp_diag_fill(sk, assoc, skb, r,
350 sk_user_ns(NETLINK_CB(cb->skb).sk),
351 NETLINK_CB(cb->skb).portid,
352 cb->nlh->nlmsg_seq, 0, cb->nlh,
353 commp->net_admin) < 0) {
354 err = 1;
355 goto release;
356 }
357 next:
358 cb->args[4]++;
359 }
360 cb->args[1] = 0;
361 cb->args[3] = 0;
362 cb->args[4] = 0;
363 release:
364 release_sock(sk);
365 return err;
366 }
367
368 static int sctp_sock_filter(struct sctp_transport *tsp, void *p)
369 {
370 struct sctp_endpoint *ep = tsp->asoc->ep;
371 struct sctp_comm_param *commp = p;
372 struct sock *sk = ep->base.sk;
373 const struct inet_diag_req_v2 *r = commp->r;
374 struct sctp_association *assoc =
375 list_entry(ep->asocs.next, struct sctp_association, asocs);
376
377 /* find the ep only once through the transports by this condition */
378 if (tsp->asoc != assoc)
379 return 0;
380
381 if (r->sdiag_family != AF_UNSPEC && sk->sk_family != r->sdiag_family)
382 return 0;
383
384 return 1;
385 }
386
387 static int sctp_ep_dump(struct sctp_endpoint *ep, void *p)
388 {
389 struct sctp_comm_param *commp = p;
390 struct sock *sk = ep->base.sk;
391 struct sk_buff *skb = commp->skb;
392 struct netlink_callback *cb = commp->cb;
393 const struct inet_diag_req_v2 *r = commp->r;
394 struct net *net = sock_net(skb->sk);
395 struct inet_sock *inet = inet_sk(sk);
396 int err = 0;
397
398 if (!net_eq(sock_net(sk), net))
399 goto out;
400
401 if (cb->args[4] < cb->args[1])
402 goto next;
403
404 if (!(r->idiag_states & TCPF_LISTEN) && !list_empty(&ep->asocs))
405 goto next;
406
407 if (r->sdiag_family != AF_UNSPEC &&
408 sk->sk_family != r->sdiag_family)
409 goto next;
410
411 if (r->id.idiag_sport != inet->inet_sport &&
412 r->id.idiag_sport)
413 goto next;
414
415 if (r->id.idiag_dport != inet->inet_dport &&
416 r->id.idiag_dport)
417 goto next;
418
419 if (inet_sctp_diag_fill(sk, NULL, skb, r,
420 sk_user_ns(NETLINK_CB(cb->skb).sk),
421 NETLINK_CB(cb->skb).portid,
422 cb->nlh->nlmsg_seq, NLM_F_MULTI,
423 cb->nlh, commp->net_admin) < 0) {
424 err = 2;
425 goto out;
426 }
427 next:
428 cb->args[4]++;
429 out:
430 return err;
431 }
432
433 /* define the functions for sctp_diag_handler*/
434 static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
435 void *info)
436 {
437 struct sctp_infox *infox = (struct sctp_infox *)info;
438
439 if (infox->asoc) {
440 r->idiag_rqueue = atomic_read(&infox->asoc->rmem_alloc);
441 r->idiag_wqueue = infox->asoc->sndbuf_used;
442 } else {
443 r->idiag_rqueue = sk->sk_ack_backlog;
444 r->idiag_wqueue = sk->sk_max_ack_backlog;
445 }
446 if (infox->sctpinfo)
447 sctp_get_sctp_info(sk, infox->asoc, infox->sctpinfo);
448 }
449
450 static int sctp_diag_dump_one(struct sk_buff *in_skb,
451 const struct nlmsghdr *nlh,
452 const struct inet_diag_req_v2 *req)
453 {
454 struct net *net = sock_net(in_skb->sk);
455 union sctp_addr laddr, paddr;
456 struct sctp_comm_param commp = {
457 .skb = in_skb,
458 .r = req,
459 .nlh = nlh,
460 .net_admin = netlink_net_capable(in_skb, CAP_NET_ADMIN),
461 };
462
463 if (req->sdiag_family == AF_INET) {
464 laddr.v4.sin_port = req->id.idiag_sport;
465 laddr.v4.sin_addr.s_addr = req->id.idiag_src[0];
466 laddr.v4.sin_family = AF_INET;
467
468 paddr.v4.sin_port = req->id.idiag_dport;
469 paddr.v4.sin_addr.s_addr = req->id.idiag_dst[0];
470 paddr.v4.sin_family = AF_INET;
471 } else {
472 laddr.v6.sin6_port = req->id.idiag_sport;
473 memcpy(&laddr.v6.sin6_addr, req->id.idiag_src,
474 sizeof(laddr.v6.sin6_addr));
475 laddr.v6.sin6_family = AF_INET6;
476
477 paddr.v6.sin6_port = req->id.idiag_dport;
478 memcpy(&paddr.v6.sin6_addr, req->id.idiag_dst,
479 sizeof(paddr.v6.sin6_addr));
480 paddr.v6.sin6_family = AF_INET6;
481 }
482
483 return sctp_transport_lookup_process(sctp_tsp_dump_one,
484 net, &laddr, &paddr, &commp);
485 }
486
487 static void sctp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
488 const struct inet_diag_req_v2 *r, struct nlattr *bc)
489 {
490 u32 idiag_states = r->idiag_states;
491 struct net *net = sock_net(skb->sk);
492 struct sctp_comm_param commp = {
493 .skb = skb,
494 .cb = cb,
495 .r = r,
496 .net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN),
497 };
498 int pos = cb->args[2];
499
500 /* eps hashtable dumps
501 * args:
502 * 0 : if it will traversal listen sock
503 * 1 : to record the sock pos of this time's traversal
504 * 4 : to work as a temporary variable to traversal list
505 */
506 if (cb->args[0] == 0) {
507 if (!(idiag_states & TCPF_LISTEN))
508 goto skip;
509 if (sctp_for_each_endpoint(sctp_ep_dump, &commp))
510 goto done;
511 skip:
512 cb->args[0] = 1;
513 cb->args[1] = 0;
514 cb->args[4] = 0;
515 }
516
517 /* asocs by transport hashtable dump
518 * args:
519 * 1 : to record the assoc pos of this time's traversal
520 * 2 : to record the transport pos of this time's traversal
521 * 3 : to mark if we have dumped the ep info of the current asoc
522 * 4 : to work as a temporary variable to traversal list
523 * 5 : to save the sk we get from travelsing the tsp list.
524 */
525 if (!(idiag_states & ~(TCPF_LISTEN | TCPF_CLOSE)))
526 goto done;
527
528 sctp_for_each_transport(sctp_sock_filter, sctp_sock_dump,
529 net, &pos, &commp);
530 cb->args[2] = pos;
531
532 done:
533 cb->args[1] = cb->args[4];
534 cb->args[4] = 0;
535 }
536
537 static const struct inet_diag_handler sctp_diag_handler = {
538 .dump = sctp_diag_dump,
539 .dump_one = sctp_diag_dump_one,
540 .idiag_get_info = sctp_diag_get_info,
541 .idiag_type = IPPROTO_SCTP,
542 .idiag_info_size = sizeof(struct sctp_info),
543 };
544
545 static int __init sctp_diag_init(void)
546 {
547 return inet_diag_register(&sctp_diag_handler);
548 }
549
550 static void __exit sctp_diag_exit(void)
551 {
552 inet_diag_unregister(&sctp_diag_handler);
553 }
554
555 module_init(sctp_diag_init);
556 module_exit(sctp_diag_exit);
557 MODULE_LICENSE("GPL");
558 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-132);