]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * NETLINK Kernel-user communication protocol. | |
3 | * | |
4 | * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> | |
5 | * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License | |
9 | * as published by the Free Software Foundation; either version | |
10 | * 2 of the License, or (at your option) any later version. | |
11 | * | |
12 | * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith | |
13 | * added netlink_proto_exit | |
14 | * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br> | |
15 | * use nlk_sk, as sk->protinfo is on a diet 8) | |
16 | * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org> | |
17 | * - inc module use count of module that owns | |
18 | * the kernel socket in case userspace opens | |
19 | * socket of same protocol | |
20 | * - remove all module support, since netlink is | |
21 | * mandatory if CONFIG_NET=y these days | |
22 | */ | |
23 | ||
24 | #include <linux/module.h> | |
25 | ||
26 | #include <linux/capability.h> | |
27 | #include <linux/kernel.h> | |
28 | #include <linux/init.h> | |
29 | #include <linux/signal.h> | |
30 | #include <linux/sched.h> | |
31 | #include <linux/errno.h> | |
32 | #include <linux/string.h> | |
33 | #include <linux/stat.h> | |
34 | #include <linux/socket.h> | |
35 | #include <linux/un.h> | |
36 | #include <linux/fcntl.h> | |
37 | #include <linux/termios.h> | |
38 | #include <linux/sockios.h> | |
39 | #include <linux/net.h> | |
40 | #include <linux/fs.h> | |
41 | #include <linux/slab.h> | |
42 | #include <asm/uaccess.h> | |
43 | #include <linux/skbuff.h> | |
44 | #include <linux/netdevice.h> | |
45 | #include <linux/rtnetlink.h> | |
46 | #include <linux/proc_fs.h> | |
47 | #include <linux/seq_file.h> | |
48 | #include <linux/notifier.h> | |
49 | #include <linux/security.h> | |
50 | #include <linux/jhash.h> | |
51 | #include <linux/jiffies.h> | |
52 | #include <linux/random.h> | |
53 | #include <linux/bitops.h> | |
54 | #include <linux/mm.h> | |
55 | #include <linux/types.h> | |
56 | #include <linux/audit.h> | |
57 | #include <linux/mutex.h> | |
58 | ||
59 | #include <net/net_namespace.h> | |
60 | #include <net/sock.h> | |
61 | #include <net/scm.h> | |
62 | #include <net/netlink.h> | |
63 | ||
64 | #define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8) | |
65 | #define NLGRPLONGS(x) (NLGRPSZ(x)/sizeof(unsigned long)) | |
66 | ||
67 | struct netlink_sock { | |
68 | /* struct sock has to be the first member of netlink_sock */ | |
69 | struct sock sk; | |
70 | u32 pid; | |
71 | u32 dst_pid; | |
72 | u32 dst_group; | |
73 | u32 flags; | |
74 | u32 subscriptions; | |
75 | u32 ngroups; | |
76 | unsigned long *groups; | |
77 | unsigned long state; | |
78 | wait_queue_head_t wait; | |
79 | struct netlink_callback *cb; | |
80 | struct mutex *cb_mutex; | |
81 | struct mutex cb_def_mutex; | |
82 | void (*netlink_rcv)(struct sk_buff *skb); | |
83 | struct module *module; | |
84 | }; | |
85 | ||
86 | struct listeners_rcu_head { | |
87 | struct rcu_head rcu_head; | |
88 | void *ptr; | |
89 | }; | |
90 | ||
91 | #define NETLINK_KERNEL_SOCKET 0x1 | |
92 | #define NETLINK_RECV_PKTINFO 0x2 | |
93 | #define NETLINK_BROADCAST_SEND_ERROR 0x4 | |
94 | #define NETLINK_RECV_NO_ENOBUFS 0x8 | |
95 | ||
96 | static inline struct netlink_sock *nlk_sk(struct sock *sk) | |
97 | { | |
98 | return container_of(sk, struct netlink_sock, sk); | |
99 | } | |
100 | ||
101 | static inline int netlink_is_kernel(struct sock *sk) | |
102 | { | |
103 | return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET; | |
104 | } | |
105 | ||
106 | struct nl_pid_hash { | |
107 | struct hlist_head *table; | |
108 | unsigned long rehash_time; | |
109 | ||
110 | unsigned int mask; | |
111 | unsigned int shift; | |
112 | ||
113 | unsigned int entries; | |
114 | unsigned int max_shift; | |
115 | ||
116 | u32 rnd; | |
117 | }; | |
118 | ||
119 | struct netlink_table { | |
120 | struct nl_pid_hash hash; | |
121 | struct hlist_head mc_list; | |
122 | unsigned long *listeners; | |
123 | unsigned int nl_nonroot; | |
124 | unsigned int groups; | |
125 | struct mutex *cb_mutex; | |
126 | struct module *module; | |
127 | int registered; | |
128 | }; | |
129 | ||
130 | static struct netlink_table *nl_table; | |
131 | ||
132 | static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait); | |
133 | ||
134 | static int netlink_dump(struct sock *sk); | |
135 | static void netlink_destroy_callback(struct netlink_callback *cb); | |
136 | ||
137 | static DEFINE_RWLOCK(nl_table_lock); | |
138 | static atomic_t nl_table_users = ATOMIC_INIT(0); | |
139 | ||
140 | static ATOMIC_NOTIFIER_HEAD(netlink_chain); | |
141 | ||
142 | static u32 netlink_group_mask(u32 group) | |
143 | { | |
144 | return group ? 1 << (group - 1) : 0; | |
145 | } | |
146 | ||
147 | static struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid) | |
148 | { | |
149 | return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask]; | |
150 | } | |
151 | ||
152 | static void netlink_sock_destruct(struct sock *sk) | |
153 | { | |
154 | struct netlink_sock *nlk = nlk_sk(sk); | |
155 | ||
156 | if (nlk->cb) { | |
157 | if (nlk->cb->done) | |
158 | nlk->cb->done(nlk->cb); | |
159 | netlink_destroy_callback(nlk->cb); | |
160 | } | |
161 | ||
162 | skb_queue_purge(&sk->sk_receive_queue); | |
163 | ||
164 | if (!sock_flag(sk, SOCK_DEAD)) { | |
165 | printk(KERN_ERR "Freeing alive netlink socket %p\n", sk); | |
166 | return; | |
167 | } | |
168 | ||
169 | WARN_ON(atomic_read(&sk->sk_rmem_alloc)); | |
170 | WARN_ON(atomic_read(&sk->sk_wmem_alloc)); | |
171 | WARN_ON(nlk_sk(sk)->groups); | |
172 | } | |
173 | ||
174 | /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on | |
175 | * SMP. Look, when several writers sleep and reader wakes them up, all but one | |
176 | * immediately hit write lock and grab all the cpus. Exclusive sleep solves | |
177 | * this, _but_ remember, it adds useless work on UP machines. | |
178 | */ | |
179 | ||
180 | void netlink_table_grab(void) | |
181 | __acquires(nl_table_lock) | |
182 | { | |
183 | might_sleep(); | |
184 | ||
185 | write_lock_irq(&nl_table_lock); | |
186 | ||
187 | if (atomic_read(&nl_table_users)) { | |
188 | DECLARE_WAITQUEUE(wait, current); | |
189 | ||
190 | add_wait_queue_exclusive(&nl_table_wait, &wait); | |
191 | for (;;) { | |
192 | set_current_state(TASK_UNINTERRUPTIBLE); | |
193 | if (atomic_read(&nl_table_users) == 0) | |
194 | break; | |
195 | write_unlock_irq(&nl_table_lock); | |
196 | schedule(); | |
197 | write_lock_irq(&nl_table_lock); | |
198 | } | |
199 | ||
200 | __set_current_state(TASK_RUNNING); | |
201 | remove_wait_queue(&nl_table_wait, &wait); | |
202 | } | |
203 | } | |
204 | ||
205 | void netlink_table_ungrab(void) | |
206 | __releases(nl_table_lock) | |
207 | { | |
208 | write_unlock_irq(&nl_table_lock); | |
209 | wake_up(&nl_table_wait); | |
210 | } | |
211 | ||
212 | static inline void | |
213 | netlink_lock_table(void) | |
214 | { | |
215 | /* read_lock() synchronizes us to netlink_table_grab */ | |
216 | ||
217 | read_lock(&nl_table_lock); | |
218 | atomic_inc(&nl_table_users); | |
219 | read_unlock(&nl_table_lock); | |
220 | } | |
221 | ||
222 | static inline void | |
223 | netlink_unlock_table(void) | |
224 | { | |
225 | if (atomic_dec_and_test(&nl_table_users)) | |
226 | wake_up(&nl_table_wait); | |
227 | } | |
228 | ||
229 | static inline struct sock *netlink_lookup(struct net *net, int protocol, | |
230 | u32 pid) | |
231 | { | |
232 | struct nl_pid_hash *hash = &nl_table[protocol].hash; | |
233 | struct hlist_head *head; | |
234 | struct sock *sk; | |
235 | struct hlist_node *node; | |
236 | ||
237 | read_lock(&nl_table_lock); | |
238 | head = nl_pid_hashfn(hash, pid); | |
239 | sk_for_each(sk, node, head) { | |
240 | if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->pid == pid)) { | |
241 | sock_hold(sk); | |
242 | goto found; | |
243 | } | |
244 | } | |
245 | sk = NULL; | |
246 | found: | |
247 | read_unlock(&nl_table_lock); | |
248 | return sk; | |
249 | } | |
250 | ||
251 | static inline struct hlist_head *nl_pid_hash_zalloc(size_t size) | |
252 | { | |
253 | if (size <= PAGE_SIZE) | |
254 | return kzalloc(size, GFP_ATOMIC); | |
255 | else | |
256 | return (struct hlist_head *) | |
257 | __get_free_pages(GFP_ATOMIC | __GFP_ZERO, | |
258 | get_order(size)); | |
259 | } | |
260 | ||
261 | static inline void nl_pid_hash_free(struct hlist_head *table, size_t size) | |
262 | { | |
263 | if (size <= PAGE_SIZE) | |
264 | kfree(table); | |
265 | else | |
266 | free_pages((unsigned long)table, get_order(size)); | |
267 | } | |
268 | ||
269 | static int nl_pid_hash_rehash(struct nl_pid_hash *hash, int grow) | |
270 | { | |
271 | unsigned int omask, mask, shift; | |
272 | size_t osize, size; | |
273 | struct hlist_head *otable, *table; | |
274 | int i; | |
275 | ||
276 | omask = mask = hash->mask; | |
277 | osize = size = (mask + 1) * sizeof(*table); | |
278 | shift = hash->shift; | |
279 | ||
280 | if (grow) { | |
281 | if (++shift > hash->max_shift) | |
282 | return 0; | |
283 | mask = mask * 2 + 1; | |
284 | size *= 2; | |
285 | } | |
286 | ||
287 | table = nl_pid_hash_zalloc(size); | |
288 | if (!table) | |
289 | return 0; | |
290 | ||
291 | otable = hash->table; | |
292 | hash->table = table; | |
293 | hash->mask = mask; | |
294 | hash->shift = shift; | |
295 | get_random_bytes(&hash->rnd, sizeof(hash->rnd)); | |
296 | ||
297 | for (i = 0; i <= omask; i++) { | |
298 | struct sock *sk; | |
299 | struct hlist_node *node, *tmp; | |
300 | ||
301 | sk_for_each_safe(sk, node, tmp, &otable[i]) | |
302 | __sk_add_node(sk, nl_pid_hashfn(hash, nlk_sk(sk)->pid)); | |
303 | } | |
304 | ||
305 | nl_pid_hash_free(otable, osize); | |
306 | hash->rehash_time = jiffies + 10 * 60 * HZ; | |
307 | return 1; | |
308 | } | |
309 | ||
310 | static inline int nl_pid_hash_dilute(struct nl_pid_hash *hash, int len) | |
311 | { | |
312 | int avg = hash->entries >> hash->shift; | |
313 | ||
314 | if (unlikely(avg > 1) && nl_pid_hash_rehash(hash, 1)) | |
315 | return 1; | |
316 | ||
317 | if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) { | |
318 | nl_pid_hash_rehash(hash, 0); | |
319 | return 1; | |
320 | } | |
321 | ||
322 | return 0; | |
323 | } | |
324 | ||
325 | static const struct proto_ops netlink_ops; | |
326 | ||
327 | static void | |
328 | netlink_update_listeners(struct sock *sk) | |
329 | { | |
330 | struct netlink_table *tbl = &nl_table[sk->sk_protocol]; | |
331 | struct hlist_node *node; | |
332 | unsigned long mask; | |
333 | unsigned int i; | |
334 | ||
335 | for (i = 0; i < NLGRPLONGS(tbl->groups); i++) { | |
336 | mask = 0; | |
337 | sk_for_each_bound(sk, node, &tbl->mc_list) { | |
338 | if (i < NLGRPLONGS(nlk_sk(sk)->ngroups)) | |
339 | mask |= nlk_sk(sk)->groups[i]; | |
340 | } | |
341 | tbl->listeners[i] = mask; | |
342 | } | |
343 | /* this function is only called with the netlink table "grabbed", which | |
344 | * makes sure updates are visible before bind or setsockopt return. */ | |
345 | } | |
346 | ||
347 | static int netlink_insert(struct sock *sk, struct net *net, u32 pid) | |
348 | { | |
349 | struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash; | |
350 | struct hlist_head *head; | |
351 | int err = -EADDRINUSE; | |
352 | struct sock *osk; | |
353 | struct hlist_node *node; | |
354 | int len; | |
355 | ||
356 | netlink_table_grab(); | |
357 | head = nl_pid_hashfn(hash, pid); | |
358 | len = 0; | |
359 | sk_for_each(osk, node, head) { | |
360 | if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->pid == pid)) | |
361 | break; | |
362 | len++; | |
363 | } | |
364 | if (node) | |
365 | goto err; | |
366 | ||
367 | err = -EBUSY; | |
368 | if (nlk_sk(sk)->pid) | |
369 | goto err; | |
370 | ||
371 | err = -ENOMEM; | |
372 | if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX)) | |
373 | goto err; | |
374 | ||
375 | if (len && nl_pid_hash_dilute(hash, len)) | |
376 | head = nl_pid_hashfn(hash, pid); | |
377 | hash->entries++; | |
378 | nlk_sk(sk)->pid = pid; | |
379 | sk_add_node(sk, head); | |
380 | err = 0; | |
381 | ||
382 | err: | |
383 | netlink_table_ungrab(); | |
384 | return err; | |
385 | } | |
386 | ||
387 | static void netlink_remove(struct sock *sk) | |
388 | { | |
389 | netlink_table_grab(); | |
390 | if (sk_del_node_init(sk)) | |
391 | nl_table[sk->sk_protocol].hash.entries--; | |
392 | if (nlk_sk(sk)->subscriptions) | |
393 | __sk_del_bind_node(sk); | |
394 | netlink_table_ungrab(); | |
395 | } | |
396 | ||
397 | static struct proto netlink_proto = { | |
398 | .name = "NETLINK", | |
399 | .owner = THIS_MODULE, | |
400 | .obj_size = sizeof(struct netlink_sock), | |
401 | }; | |
402 | ||
403 | static int __netlink_create(struct net *net, struct socket *sock, | |
404 | struct mutex *cb_mutex, int protocol) | |
405 | { | |
406 | struct sock *sk; | |
407 | struct netlink_sock *nlk; | |
408 | ||
409 | sock->ops = &netlink_ops; | |
410 | ||
411 | sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto); | |
412 | if (!sk) | |
413 | return -ENOMEM; | |
414 | ||
415 | sock_init_data(sock, sk); | |
416 | ||
417 | nlk = nlk_sk(sk); | |
418 | if (cb_mutex) | |
419 | nlk->cb_mutex = cb_mutex; | |
420 | else { | |
421 | nlk->cb_mutex = &nlk->cb_def_mutex; | |
422 | mutex_init(nlk->cb_mutex); | |
423 | } | |
424 | init_waitqueue_head(&nlk->wait); | |
425 | ||
426 | sk->sk_destruct = netlink_sock_destruct; | |
427 | sk->sk_protocol = protocol; | |
428 | return 0; | |
429 | } | |
430 | ||
431 | static int netlink_create(struct net *net, struct socket *sock, int protocol, | |
432 | int kern) | |
433 | { | |
434 | struct module *module = NULL; | |
435 | struct mutex *cb_mutex; | |
436 | struct netlink_sock *nlk; | |
437 | int err = 0; | |
438 | ||
439 | sock->state = SS_UNCONNECTED; | |
440 | ||
441 | if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM) | |
442 | return -ESOCKTNOSUPPORT; | |
443 | ||
444 | if (protocol < 0 || protocol >= MAX_LINKS) | |
445 | return -EPROTONOSUPPORT; | |
446 | ||
447 | netlink_lock_table(); | |
448 | #ifdef CONFIG_MODULES | |
449 | if (!nl_table[protocol].registered) { | |
450 | netlink_unlock_table(); | |
451 | request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol); | |
452 | netlink_lock_table(); | |
453 | } | |
454 | #endif | |
455 | if (nl_table[protocol].registered && | |
456 | try_module_get(nl_table[protocol].module)) | |
457 | module = nl_table[protocol].module; | |
458 | else | |
459 | err = -EPROTONOSUPPORT; | |
460 | cb_mutex = nl_table[protocol].cb_mutex; | |
461 | netlink_unlock_table(); | |
462 | ||
463 | if (err < 0) | |
464 | goto out; | |
465 | ||
466 | err = __netlink_create(net, sock, cb_mutex, protocol); | |
467 | if (err < 0) | |
468 | goto out_module; | |
469 | ||
470 | local_bh_disable(); | |
471 | sock_prot_inuse_add(net, &netlink_proto, 1); | |
472 | local_bh_enable(); | |
473 | ||
474 | nlk = nlk_sk(sock->sk); | |
475 | nlk->module = module; | |
476 | out: | |
477 | return err; | |
478 | ||
479 | out_module: | |
480 | module_put(module); | |
481 | goto out; | |
482 | } | |
483 | ||
484 | static int netlink_release(struct socket *sock) | |
485 | { | |
486 | struct sock *sk = sock->sk; | |
487 | struct netlink_sock *nlk; | |
488 | ||
489 | if (!sk) | |
490 | return 0; | |
491 | ||
492 | netlink_remove(sk); | |
493 | sock_orphan(sk); | |
494 | nlk = nlk_sk(sk); | |
495 | ||
496 | /* | |
497 | * OK. Socket is unlinked, any packets that arrive now | |
498 | * will be purged. | |
499 | */ | |
500 | ||
501 | sock->sk = NULL; | |
502 | wake_up_interruptible_all(&nlk->wait); | |
503 | ||
504 | skb_queue_purge(&sk->sk_write_queue); | |
505 | ||
506 | if (nlk->pid) { | |
507 | struct netlink_notify n = { | |
508 | .net = sock_net(sk), | |
509 | .protocol = sk->sk_protocol, | |
510 | .pid = nlk->pid, | |
511 | }; | |
512 | atomic_notifier_call_chain(&netlink_chain, | |
513 | NETLINK_URELEASE, &n); | |
514 | } | |
515 | ||
516 | module_put(nlk->module); | |
517 | ||
518 | netlink_table_grab(); | |
519 | if (netlink_is_kernel(sk)) { | |
520 | BUG_ON(nl_table[sk->sk_protocol].registered == 0); | |
521 | if (--nl_table[sk->sk_protocol].registered == 0) { | |
522 | kfree(nl_table[sk->sk_protocol].listeners); | |
523 | nl_table[sk->sk_protocol].module = NULL; | |
524 | nl_table[sk->sk_protocol].registered = 0; | |
525 | } | |
526 | } else if (nlk->subscriptions) | |
527 | netlink_update_listeners(sk); | |
528 | netlink_table_ungrab(); | |
529 | ||
530 | kfree(nlk->groups); | |
531 | nlk->groups = NULL; | |
532 | ||
533 | local_bh_disable(); | |
534 | sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1); | |
535 | local_bh_enable(); | |
536 | sock_put(sk); | |
537 | return 0; | |
538 | } | |
539 | ||
540 | static int netlink_autobind(struct socket *sock) | |
541 | { | |
542 | struct sock *sk = sock->sk; | |
543 | struct net *net = sock_net(sk); | |
544 | struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash; | |
545 | struct hlist_head *head; | |
546 | struct sock *osk; | |
547 | struct hlist_node *node; | |
548 | s32 pid = task_tgid_vnr(current); | |
549 | int err; | |
550 | static s32 rover = -4097; | |
551 | ||
552 | retry: | |
553 | cond_resched(); | |
554 | netlink_table_grab(); | |
555 | head = nl_pid_hashfn(hash, pid); | |
556 | sk_for_each(osk, node, head) { | |
557 | if (!net_eq(sock_net(osk), net)) | |
558 | continue; | |
559 | if (nlk_sk(osk)->pid == pid) { | |
560 | /* Bind collision, search negative pid values. */ | |
561 | pid = rover--; | |
562 | if (rover > -4097) | |
563 | rover = -4097; | |
564 | netlink_table_ungrab(); | |
565 | goto retry; | |
566 | } | |
567 | } | |
568 | netlink_table_ungrab(); | |
569 | ||
570 | err = netlink_insert(sk, net, pid); | |
571 | if (err == -EADDRINUSE) | |
572 | goto retry; | |
573 | ||
574 | /* If 2 threads race to autobind, that is fine. */ | |
575 | if (err == -EBUSY) | |
576 | err = 0; | |
577 | ||
578 | return err; | |
579 | } | |
580 | ||
581 | static inline int netlink_capable(struct socket *sock, unsigned int flag) | |
582 | { | |
583 | return (nl_table[sock->sk->sk_protocol].nl_nonroot & flag) || | |
584 | capable(CAP_NET_ADMIN); | |
585 | } | |
586 | ||
587 | static void | |
588 | netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions) | |
589 | { | |
590 | struct netlink_sock *nlk = nlk_sk(sk); | |
591 | ||
592 | if (nlk->subscriptions && !subscriptions) | |
593 | __sk_del_bind_node(sk); | |
594 | else if (!nlk->subscriptions && subscriptions) | |
595 | sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list); | |
596 | nlk->subscriptions = subscriptions; | |
597 | } | |
598 | ||
599 | static int netlink_realloc_groups(struct sock *sk) | |
600 | { | |
601 | struct netlink_sock *nlk = nlk_sk(sk); | |
602 | unsigned int groups; | |
603 | unsigned long *new_groups; | |
604 | int err = 0; | |
605 | ||
606 | netlink_table_grab(); | |
607 | ||
608 | groups = nl_table[sk->sk_protocol].groups; | |
609 | if (!nl_table[sk->sk_protocol].registered) { | |
610 | err = -ENOENT; | |
611 | goto out_unlock; | |
612 | } | |
613 | ||
614 | if (nlk->ngroups >= groups) | |
615 | goto out_unlock; | |
616 | ||
617 | new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC); | |
618 | if (new_groups == NULL) { | |
619 | err = -ENOMEM; | |
620 | goto out_unlock; | |
621 | } | |
622 | memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0, | |
623 | NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups)); | |
624 | ||
625 | nlk->groups = new_groups; | |
626 | nlk->ngroups = groups; | |
627 | out_unlock: | |
628 | netlink_table_ungrab(); | |
629 | return err; | |
630 | } | |
631 | ||
632 | static int netlink_bind(struct socket *sock, struct sockaddr *addr, | |
633 | int addr_len) | |
634 | { | |
635 | struct sock *sk = sock->sk; | |
636 | struct net *net = sock_net(sk); | |
637 | struct netlink_sock *nlk = nlk_sk(sk); | |
638 | struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; | |
639 | int err; | |
640 | ||
641 | if (nladdr->nl_family != AF_NETLINK) | |
642 | return -EINVAL; | |
643 | ||
644 | /* Only superuser is allowed to listen multicasts */ | |
645 | if (nladdr->nl_groups) { | |
646 | if (!netlink_capable(sock, NL_NONROOT_RECV)) | |
647 | return -EPERM; | |
648 | err = netlink_realloc_groups(sk); | |
649 | if (err) | |
650 | return err; | |
651 | } | |
652 | ||
653 | if (nlk->pid) { | |
654 | if (nladdr->nl_pid != nlk->pid) | |
655 | return -EINVAL; | |
656 | } else { | |
657 | err = nladdr->nl_pid ? | |
658 | netlink_insert(sk, net, nladdr->nl_pid) : | |
659 | netlink_autobind(sock); | |
660 | if (err) | |
661 | return err; | |
662 | } | |
663 | ||
664 | if (!nladdr->nl_groups && (nlk->groups == NULL || !(u32)nlk->groups[0])) | |
665 | return 0; | |
666 | ||
667 | netlink_table_grab(); | |
668 | netlink_update_subscriptions(sk, nlk->subscriptions + | |
669 | hweight32(nladdr->nl_groups) - | |
670 | hweight32(nlk->groups[0])); | |
671 | nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | nladdr->nl_groups; | |
672 | netlink_update_listeners(sk); | |
673 | netlink_table_ungrab(); | |
674 | ||
675 | return 0; | |
676 | } | |
677 | ||
678 | static int netlink_connect(struct socket *sock, struct sockaddr *addr, | |
679 | int alen, int flags) | |
680 | { | |
681 | int err = 0; | |
682 | struct sock *sk = sock->sk; | |
683 | struct netlink_sock *nlk = nlk_sk(sk); | |
684 | struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; | |
685 | ||
686 | if (alen < sizeof(addr->sa_family)) | |
687 | return -EINVAL; | |
688 | ||
689 | if (addr->sa_family == AF_UNSPEC) { | |
690 | sk->sk_state = NETLINK_UNCONNECTED; | |
691 | nlk->dst_pid = 0; | |
692 | nlk->dst_group = 0; | |
693 | return 0; | |
694 | } | |
695 | if (addr->sa_family != AF_NETLINK) | |
696 | return -EINVAL; | |
697 | ||
698 | /* Only superuser is allowed to send multicasts */ | |
699 | if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND)) | |
700 | return -EPERM; | |
701 | ||
702 | if (!nlk->pid) | |
703 | err = netlink_autobind(sock); | |
704 | ||
705 | if (err == 0) { | |
706 | sk->sk_state = NETLINK_CONNECTED; | |
707 | nlk->dst_pid = nladdr->nl_pid; | |
708 | nlk->dst_group = ffs(nladdr->nl_groups); | |
709 | } | |
710 | ||
711 | return err; | |
712 | } | |
713 | ||
714 | static int netlink_getname(struct socket *sock, struct sockaddr *addr, | |
715 | int *addr_len, int peer) | |
716 | { | |
717 | struct sock *sk = sock->sk; | |
718 | struct netlink_sock *nlk = nlk_sk(sk); | |
719 | DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr); | |
720 | ||
721 | nladdr->nl_family = AF_NETLINK; | |
722 | nladdr->nl_pad = 0; | |
723 | *addr_len = sizeof(*nladdr); | |
724 | ||
725 | if (peer) { | |
726 | nladdr->nl_pid = nlk->dst_pid; | |
727 | nladdr->nl_groups = netlink_group_mask(nlk->dst_group); | |
728 | } else { | |
729 | nladdr->nl_pid = nlk->pid; | |
730 | nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0; | |
731 | } | |
732 | return 0; | |
733 | } | |
734 | ||
735 | static void netlink_overrun(struct sock *sk) | |
736 | { | |
737 | struct netlink_sock *nlk = nlk_sk(sk); | |
738 | ||
739 | if (!(nlk->flags & NETLINK_RECV_NO_ENOBUFS)) { | |
740 | if (!test_and_set_bit(0, &nlk_sk(sk)->state)) { | |
741 | sk->sk_err = ENOBUFS; | |
742 | sk->sk_error_report(sk); | |
743 | } | |
744 | } | |
745 | atomic_inc(&sk->sk_drops); | |
746 | } | |
747 | ||
748 | static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid) | |
749 | { | |
750 | struct sock *sock; | |
751 | struct netlink_sock *nlk; | |
752 | ||
753 | sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, pid); | |
754 | if (!sock) | |
755 | return ERR_PTR(-ECONNREFUSED); | |
756 | ||
757 | /* Don't bother queuing skb if kernel socket has no input function */ | |
758 | nlk = nlk_sk(sock); | |
759 | if (sock->sk_state == NETLINK_CONNECTED && | |
760 | nlk->dst_pid != nlk_sk(ssk)->pid) { | |
761 | sock_put(sock); | |
762 | return ERR_PTR(-ECONNREFUSED); | |
763 | } | |
764 | return sock; | |
765 | } | |
766 | ||
767 | struct sock *netlink_getsockbyfilp(struct file *filp) | |
768 | { | |
769 | struct inode *inode = filp->f_path.dentry->d_inode; | |
770 | struct sock *sock; | |
771 | ||
772 | if (!S_ISSOCK(inode->i_mode)) | |
773 | return ERR_PTR(-ENOTSOCK); | |
774 | ||
775 | sock = SOCKET_I(inode)->sk; | |
776 | if (sock->sk_family != AF_NETLINK) | |
777 | return ERR_PTR(-EINVAL); | |
778 | ||
779 | sock_hold(sock); | |
780 | return sock; | |
781 | } | |
782 | ||
783 | /* | |
784 | * Attach a skb to a netlink socket. | |
785 | * The caller must hold a reference to the destination socket. On error, the | |
786 | * reference is dropped. The skb is not send to the destination, just all | |
787 | * all error checks are performed and memory in the queue is reserved. | |
788 | * Return values: | |
789 | * < 0: error. skb freed, reference to sock dropped. | |
790 | * 0: continue | |
791 | * 1: repeat lookup - reference dropped while waiting for socket memory. | |
792 | */ | |
793 | int netlink_attachskb(struct sock *sk, struct sk_buff *skb, | |
794 | long *timeo, struct sock *ssk) | |
795 | { | |
796 | struct netlink_sock *nlk; | |
797 | ||
798 | nlk = nlk_sk(sk); | |
799 | ||
800 | if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || | |
801 | test_bit(0, &nlk->state)) { | |
802 | DECLARE_WAITQUEUE(wait, current); | |
803 | if (!*timeo) { | |
804 | if (!ssk || netlink_is_kernel(ssk)) | |
805 | netlink_overrun(sk); | |
806 | sock_put(sk); | |
807 | kfree_skb(skb); | |
808 | return -EAGAIN; | |
809 | } | |
810 | ||
811 | __set_current_state(TASK_INTERRUPTIBLE); | |
812 | add_wait_queue(&nlk->wait, &wait); | |
813 | ||
814 | if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || | |
815 | test_bit(0, &nlk->state)) && | |
816 | !sock_flag(sk, SOCK_DEAD)) | |
817 | *timeo = schedule_timeout(*timeo); | |
818 | ||
819 | __set_current_state(TASK_RUNNING); | |
820 | remove_wait_queue(&nlk->wait, &wait); | |
821 | sock_put(sk); | |
822 | ||
823 | if (signal_pending(current)) { | |
824 | kfree_skb(skb); | |
825 | return sock_intr_errno(*timeo); | |
826 | } | |
827 | return 1; | |
828 | } | |
829 | skb_set_owner_r(skb, sk); | |
830 | return 0; | |
831 | } | |
832 | ||
833 | int netlink_sendskb(struct sock *sk, struct sk_buff *skb) | |
834 | { | |
835 | int len = skb->len; | |
836 | ||
837 | skb_queue_tail(&sk->sk_receive_queue, skb); | |
838 | sk->sk_data_ready(sk, len); | |
839 | sock_put(sk); | |
840 | return len; | |
841 | } | |
842 | ||
843 | void netlink_detachskb(struct sock *sk, struct sk_buff *skb) | |
844 | { | |
845 | kfree_skb(skb); | |
846 | sock_put(sk); | |
847 | } | |
848 | ||
849 | static inline struct sk_buff *netlink_trim(struct sk_buff *skb, | |
850 | gfp_t allocation) | |
851 | { | |
852 | int delta; | |
853 | ||
854 | skb_orphan(skb); | |
855 | ||
856 | delta = skb->end - skb->tail; | |
857 | if (delta * 2 < skb->truesize) | |
858 | return skb; | |
859 | ||
860 | if (skb_shared(skb)) { | |
861 | struct sk_buff *nskb = skb_clone(skb, allocation); | |
862 | if (!nskb) | |
863 | return skb; | |
864 | kfree_skb(skb); | |
865 | skb = nskb; | |
866 | } | |
867 | ||
868 | if (!pskb_expand_head(skb, 0, -delta, allocation)) | |
869 | skb->truesize -= delta; | |
870 | ||
871 | return skb; | |
872 | } | |
873 | ||
874 | static inline void netlink_rcv_wake(struct sock *sk) | |
875 | { | |
876 | struct netlink_sock *nlk = nlk_sk(sk); | |
877 | ||
878 | if (skb_queue_empty(&sk->sk_receive_queue)) | |
879 | clear_bit(0, &nlk->state); | |
880 | if (!test_bit(0, &nlk->state)) | |
881 | wake_up_interruptible(&nlk->wait); | |
882 | } | |
883 | ||
884 | static inline int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb) | |
885 | { | |
886 | int ret; | |
887 | struct netlink_sock *nlk = nlk_sk(sk); | |
888 | ||
889 | ret = -ECONNREFUSED; | |
890 | if (nlk->netlink_rcv != NULL) { | |
891 | ret = skb->len; | |
892 | skb_set_owner_r(skb, sk); | |
893 | nlk->netlink_rcv(skb); | |
894 | } | |
895 | kfree_skb(skb); | |
896 | sock_put(sk); | |
897 | return ret; | |
898 | } | |
899 | ||
900 | int netlink_unicast(struct sock *ssk, struct sk_buff *skb, | |
901 | u32 pid, int nonblock) | |
902 | { | |
903 | struct sock *sk; | |
904 | int err; | |
905 | long timeo; | |
906 | ||
907 | skb = netlink_trim(skb, gfp_any()); | |
908 | ||
909 | timeo = sock_sndtimeo(ssk, nonblock); | |
910 | retry: | |
911 | sk = netlink_getsockbypid(ssk, pid); | |
912 | if (IS_ERR(sk)) { | |
913 | kfree_skb(skb); | |
914 | return PTR_ERR(sk); | |
915 | } | |
916 | if (netlink_is_kernel(sk)) | |
917 | return netlink_unicast_kernel(sk, skb); | |
918 | ||
919 | if (sk_filter(sk, skb)) { | |
920 | err = skb->len; | |
921 | kfree_skb(skb); | |
922 | sock_put(sk); | |
923 | return err; | |
924 | } | |
925 | ||
926 | err = netlink_attachskb(sk, skb, &timeo, ssk); | |
927 | if (err == 1) | |
928 | goto retry; | |
929 | if (err) | |
930 | return err; | |
931 | ||
932 | return netlink_sendskb(sk, skb); | |
933 | } | |
934 | EXPORT_SYMBOL(netlink_unicast); | |
935 | ||
936 | int netlink_has_listeners(struct sock *sk, unsigned int group) | |
937 | { | |
938 | int res = 0; | |
939 | unsigned long *listeners; | |
940 | ||
941 | BUG_ON(!netlink_is_kernel(sk)); | |
942 | ||
943 | rcu_read_lock(); | |
944 | listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners); | |
945 | ||
946 | if (group - 1 < nl_table[sk->sk_protocol].groups) | |
947 | res = test_bit(group - 1, listeners); | |
948 | ||
949 | rcu_read_unlock(); | |
950 | ||
951 | return res; | |
952 | } | |
953 | EXPORT_SYMBOL_GPL(netlink_has_listeners); | |
954 | ||
955 | static inline int netlink_broadcast_deliver(struct sock *sk, | |
956 | struct sk_buff *skb) | |
957 | { | |
958 | struct netlink_sock *nlk = nlk_sk(sk); | |
959 | ||
960 | if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && | |
961 | !test_bit(0, &nlk->state)) { | |
962 | skb_set_owner_r(skb, sk); | |
963 | skb_queue_tail(&sk->sk_receive_queue, skb); | |
964 | sk->sk_data_ready(sk, skb->len); | |
965 | return atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf; | |
966 | } | |
967 | return -1; | |
968 | } | |
969 | ||
970 | struct netlink_broadcast_data { | |
971 | struct sock *exclude_sk; | |
972 | struct net *net; | |
973 | u32 pid; | |
974 | u32 group; | |
975 | int failure; | |
976 | int delivery_failure; | |
977 | int congested; | |
978 | int delivered; | |
979 | gfp_t allocation; | |
980 | struct sk_buff *skb, *skb2; | |
981 | }; | |
982 | ||
983 | static inline int do_one_broadcast(struct sock *sk, | |
984 | struct netlink_broadcast_data *p) | |
985 | { | |
986 | struct netlink_sock *nlk = nlk_sk(sk); | |
987 | int val; | |
988 | ||
989 | if (p->exclude_sk == sk) | |
990 | goto out; | |
991 | ||
992 | if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups || | |
993 | !test_bit(p->group - 1, nlk->groups)) | |
994 | goto out; | |
995 | ||
996 | if (!net_eq(sock_net(sk), p->net)) | |
997 | goto out; | |
998 | ||
999 | if (p->failure) { | |
1000 | netlink_overrun(sk); | |
1001 | goto out; | |
1002 | } | |
1003 | ||
1004 | sock_hold(sk); | |
1005 | if (p->skb2 == NULL) { | |
1006 | if (skb_shared(p->skb)) { | |
1007 | p->skb2 = skb_clone(p->skb, p->allocation); | |
1008 | } else { | |
1009 | p->skb2 = skb_get(p->skb); | |
1010 | /* | |
1011 | * skb ownership may have been set when | |
1012 | * delivered to a previous socket. | |
1013 | */ | |
1014 | skb_orphan(p->skb2); | |
1015 | } | |
1016 | } | |
1017 | if (p->skb2 == NULL) { | |
1018 | netlink_overrun(sk); | |
1019 | /* Clone failed. Notify ALL listeners. */ | |
1020 | p->failure = 1; | |
1021 | if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR) | |
1022 | p->delivery_failure = 1; | |
1023 | } else if (sk_filter(sk, p->skb2)) { | |
1024 | kfree_skb(p->skb2); | |
1025 | p->skb2 = NULL; | |
1026 | } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) { | |
1027 | netlink_overrun(sk); | |
1028 | if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR) | |
1029 | p->delivery_failure = 1; | |
1030 | } else { | |
1031 | p->congested |= val; | |
1032 | p->delivered = 1; | |
1033 | p->skb2 = NULL; | |
1034 | } | |
1035 | sock_put(sk); | |
1036 | ||
1037 | out: | |
1038 | return 0; | |
1039 | } | |
1040 | ||
1041 | int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid, | |
1042 | u32 group, gfp_t allocation) | |
1043 | { | |
1044 | struct net *net = sock_net(ssk); | |
1045 | struct netlink_broadcast_data info; | |
1046 | struct hlist_node *node; | |
1047 | struct sock *sk; | |
1048 | ||
1049 | skb = netlink_trim(skb, allocation); | |
1050 | ||
1051 | info.exclude_sk = ssk; | |
1052 | info.net = net; | |
1053 | info.pid = pid; | |
1054 | info.group = group; | |
1055 | info.failure = 0; | |
1056 | info.delivery_failure = 0; | |
1057 | info.congested = 0; | |
1058 | info.delivered = 0; | |
1059 | info.allocation = allocation; | |
1060 | info.skb = skb; | |
1061 | info.skb2 = NULL; | |
1062 | ||
1063 | /* While we sleep in clone, do not allow to change socket list */ | |
1064 | ||
1065 | netlink_lock_table(); | |
1066 | ||
1067 | sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list) | |
1068 | do_one_broadcast(sk, &info); | |
1069 | ||
1070 | kfree_skb(skb); | |
1071 | ||
1072 | netlink_unlock_table(); | |
1073 | ||
1074 | kfree_skb(info.skb2); | |
1075 | ||
1076 | if (info.delivery_failure) | |
1077 | return -ENOBUFS; | |
1078 | ||
1079 | if (info.delivered) { | |
1080 | if (info.congested && (allocation & __GFP_WAIT)) | |
1081 | yield(); | |
1082 | return 0; | |
1083 | } | |
1084 | return -ESRCH; | |
1085 | } | |
1086 | EXPORT_SYMBOL(netlink_broadcast); | |
1087 | ||
1088 | struct netlink_set_err_data { | |
1089 | struct sock *exclude_sk; | |
1090 | u32 pid; | |
1091 | u32 group; | |
1092 | int code; | |
1093 | }; | |
1094 | ||
1095 | static inline int do_one_set_err(struct sock *sk, | |
1096 | struct netlink_set_err_data *p) | |
1097 | { | |
1098 | struct netlink_sock *nlk = nlk_sk(sk); | |
1099 | int ret = 0; | |
1100 | ||
1101 | if (sk == p->exclude_sk) | |
1102 | goto out; | |
1103 | ||
1104 | if (!net_eq(sock_net(sk), sock_net(p->exclude_sk))) | |
1105 | goto out; | |
1106 | ||
1107 | if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups || | |
1108 | !test_bit(p->group - 1, nlk->groups)) | |
1109 | goto out; | |
1110 | ||
1111 | if (p->code == ENOBUFS && nlk->flags & NETLINK_RECV_NO_ENOBUFS) { | |
1112 | ret = 1; | |
1113 | goto out; | |
1114 | } | |
1115 | ||
1116 | sk->sk_err = p->code; | |
1117 | sk->sk_error_report(sk); | |
1118 | out: | |
1119 | return ret; | |
1120 | } | |
1121 | ||
1122 | /** | |
1123 | * netlink_set_err - report error to broadcast listeners | |
1124 | * @ssk: the kernel netlink socket, as returned by netlink_kernel_create() | |
1125 | * @pid: the PID of a process that we want to skip (if any) | |
1126 | * @groups: the broadcast group that will notice the error | |
1127 | * @code: error code, must be negative (as usual in kernelspace) | |
1128 | * | |
1129 | * This function returns the number of broadcast listeners that have set the | |
1130 | * NETLINK_RECV_NO_ENOBUFS socket option. | |
1131 | */ | |
1132 | int netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code) | |
1133 | { | |
1134 | struct netlink_set_err_data info; | |
1135 | struct hlist_node *node; | |
1136 | struct sock *sk; | |
1137 | int ret = 0; | |
1138 | ||
1139 | info.exclude_sk = ssk; | |
1140 | info.pid = pid; | |
1141 | info.group = group; | |
1142 | /* sk->sk_err wants a positive error value */ | |
1143 | info.code = -code; | |
1144 | ||
1145 | read_lock(&nl_table_lock); | |
1146 | ||
1147 | sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list) | |
1148 | ret += do_one_set_err(sk, &info); | |
1149 | ||
1150 | read_unlock(&nl_table_lock); | |
1151 | return ret; | |
1152 | } | |
1153 | EXPORT_SYMBOL(netlink_set_err); | |
1154 | ||
1155 | /* must be called with netlink table grabbed */ | |
1156 | static void netlink_update_socket_mc(struct netlink_sock *nlk, | |
1157 | unsigned int group, | |
1158 | int is_new) | |
1159 | { | |
1160 | int old, new = !!is_new, subscriptions; | |
1161 | ||
1162 | old = test_bit(group - 1, nlk->groups); | |
1163 | subscriptions = nlk->subscriptions - old + new; | |
1164 | if (new) | |
1165 | __set_bit(group - 1, nlk->groups); | |
1166 | else | |
1167 | __clear_bit(group - 1, nlk->groups); | |
1168 | netlink_update_subscriptions(&nlk->sk, subscriptions); | |
1169 | netlink_update_listeners(&nlk->sk); | |
1170 | } | |
1171 | ||
1172 | static int netlink_setsockopt(struct socket *sock, int level, int optname, | |
1173 | char __user *optval, unsigned int optlen) | |
1174 | { | |
1175 | struct sock *sk = sock->sk; | |
1176 | struct netlink_sock *nlk = nlk_sk(sk); | |
1177 | unsigned int val = 0; | |
1178 | int err; | |
1179 | ||
1180 | if (level != SOL_NETLINK) | |
1181 | return -ENOPROTOOPT; | |
1182 | ||
1183 | if (optlen >= sizeof(int) && | |
1184 | get_user(val, (unsigned int __user *)optval)) | |
1185 | return -EFAULT; | |
1186 | ||
1187 | switch (optname) { | |
1188 | case NETLINK_PKTINFO: | |
1189 | if (val) | |
1190 | nlk->flags |= NETLINK_RECV_PKTINFO; | |
1191 | else | |
1192 | nlk->flags &= ~NETLINK_RECV_PKTINFO; | |
1193 | err = 0; | |
1194 | break; | |
1195 | case NETLINK_ADD_MEMBERSHIP: | |
1196 | case NETLINK_DROP_MEMBERSHIP: { | |
1197 | if (!netlink_capable(sock, NL_NONROOT_RECV)) | |
1198 | return -EPERM; | |
1199 | err = netlink_realloc_groups(sk); | |
1200 | if (err) | |
1201 | return err; | |
1202 | if (!val || val - 1 >= nlk->ngroups) | |
1203 | return -EINVAL; | |
1204 | netlink_table_grab(); | |
1205 | netlink_update_socket_mc(nlk, val, | |
1206 | optname == NETLINK_ADD_MEMBERSHIP); | |
1207 | netlink_table_ungrab(); | |
1208 | err = 0; | |
1209 | break; | |
1210 | } | |
1211 | case NETLINK_BROADCAST_ERROR: | |
1212 | if (val) | |
1213 | nlk->flags |= NETLINK_BROADCAST_SEND_ERROR; | |
1214 | else | |
1215 | nlk->flags &= ~NETLINK_BROADCAST_SEND_ERROR; | |
1216 | err = 0; | |
1217 | break; | |
1218 | case NETLINK_NO_ENOBUFS: | |
1219 | if (val) { | |
1220 | nlk->flags |= NETLINK_RECV_NO_ENOBUFS; | |
1221 | clear_bit(0, &nlk->state); | |
1222 | wake_up_interruptible(&nlk->wait); | |
1223 | } else | |
1224 | nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS; | |
1225 | err = 0; | |
1226 | break; | |
1227 | default: | |
1228 | err = -ENOPROTOOPT; | |
1229 | } | |
1230 | return err; | |
1231 | } | |
1232 | ||
1233 | static int netlink_getsockopt(struct socket *sock, int level, int optname, | |
1234 | char __user *optval, int __user *optlen) | |
1235 | { | |
1236 | struct sock *sk = sock->sk; | |
1237 | struct netlink_sock *nlk = nlk_sk(sk); | |
1238 | int len, val, err; | |
1239 | ||
1240 | if (level != SOL_NETLINK) | |
1241 | return -ENOPROTOOPT; | |
1242 | ||
1243 | if (get_user(len, optlen)) | |
1244 | return -EFAULT; | |
1245 | if (len < 0) | |
1246 | return -EINVAL; | |
1247 | ||
1248 | switch (optname) { | |
1249 | case NETLINK_PKTINFO: | |
1250 | if (len < sizeof(int)) | |
1251 | return -EINVAL; | |
1252 | len = sizeof(int); | |
1253 | val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0; | |
1254 | if (put_user(len, optlen) || | |
1255 | put_user(val, optval)) | |
1256 | return -EFAULT; | |
1257 | err = 0; | |
1258 | break; | |
1259 | case NETLINK_BROADCAST_ERROR: | |
1260 | if (len < sizeof(int)) | |
1261 | return -EINVAL; | |
1262 | len = sizeof(int); | |
1263 | val = nlk->flags & NETLINK_BROADCAST_SEND_ERROR ? 1 : 0; | |
1264 | if (put_user(len, optlen) || | |
1265 | put_user(val, optval)) | |
1266 | return -EFAULT; | |
1267 | err = 0; | |
1268 | break; | |
1269 | case NETLINK_NO_ENOBUFS: | |
1270 | if (len < sizeof(int)) | |
1271 | return -EINVAL; | |
1272 | len = sizeof(int); | |
1273 | val = nlk->flags & NETLINK_RECV_NO_ENOBUFS ? 1 : 0; | |
1274 | if (put_user(len, optlen) || | |
1275 | put_user(val, optval)) | |
1276 | return -EFAULT; | |
1277 | err = 0; | |
1278 | break; | |
1279 | default: | |
1280 | err = -ENOPROTOOPT; | |
1281 | } | |
1282 | return err; | |
1283 | } | |
1284 | ||
1285 | static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb) | |
1286 | { | |
1287 | struct nl_pktinfo info; | |
1288 | ||
1289 | info.group = NETLINK_CB(skb).dst_group; | |
1290 | put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info); | |
1291 | } | |
1292 | ||
1293 | static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock, | |
1294 | struct msghdr *msg, size_t len) | |
1295 | { | |
1296 | struct sock_iocb *siocb = kiocb_to_siocb(kiocb); | |
1297 | struct sock *sk = sock->sk; | |
1298 | struct netlink_sock *nlk = nlk_sk(sk); | |
1299 | struct sockaddr_nl *addr = msg->msg_name; | |
1300 | u32 dst_pid; | |
1301 | u32 dst_group; | |
1302 | struct sk_buff *skb; | |
1303 | int err; | |
1304 | struct scm_cookie scm; | |
1305 | ||
1306 | if (msg->msg_flags&MSG_OOB) | |
1307 | return -EOPNOTSUPP; | |
1308 | ||
1309 | if (NULL == siocb->scm) | |
1310 | siocb->scm = &scm; | |
1311 | err = scm_send(sock, msg, siocb->scm); | |
1312 | if (err < 0) | |
1313 | return err; | |
1314 | ||
1315 | if (msg->msg_namelen) { | |
1316 | if (addr->nl_family != AF_NETLINK) | |
1317 | return -EINVAL; | |
1318 | dst_pid = addr->nl_pid; | |
1319 | dst_group = ffs(addr->nl_groups); | |
1320 | if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND)) | |
1321 | return -EPERM; | |
1322 | } else { | |
1323 | dst_pid = nlk->dst_pid; | |
1324 | dst_group = nlk->dst_group; | |
1325 | } | |
1326 | ||
1327 | if (!nlk->pid) { | |
1328 | err = netlink_autobind(sock); | |
1329 | if (err) | |
1330 | goto out; | |
1331 | } | |
1332 | ||
1333 | err = -EMSGSIZE; | |
1334 | if (len > sk->sk_sndbuf - 32) | |
1335 | goto out; | |
1336 | err = -ENOBUFS; | |
1337 | skb = alloc_skb(len, GFP_KERNEL); | |
1338 | if (skb == NULL) | |
1339 | goto out; | |
1340 | ||
1341 | NETLINK_CB(skb).pid = nlk->pid; | |
1342 | NETLINK_CB(skb).dst_group = dst_group; | |
1343 | NETLINK_CB(skb).loginuid = audit_get_loginuid(current); | |
1344 | NETLINK_CB(skb).sessionid = audit_get_sessionid(current); | |
1345 | security_task_getsecid(current, &(NETLINK_CB(skb).sid)); | |
1346 | memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); | |
1347 | ||
1348 | /* What can I do? Netlink is asynchronous, so that | |
1349 | we will have to save current capabilities to | |
1350 | check them, when this message will be delivered | |
1351 | to corresponding kernel module. --ANK (980802) | |
1352 | */ | |
1353 | ||
1354 | err = -EFAULT; | |
1355 | if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { | |
1356 | kfree_skb(skb); | |
1357 | goto out; | |
1358 | } | |
1359 | ||
1360 | err = security_netlink_send(sk, skb); | |
1361 | if (err) { | |
1362 | kfree_skb(skb); | |
1363 | goto out; | |
1364 | } | |
1365 | ||
1366 | if (dst_group) { | |
1367 | atomic_inc(&skb->users); | |
1368 | netlink_broadcast(sk, skb, dst_pid, dst_group, GFP_KERNEL); | |
1369 | } | |
1370 | err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT); | |
1371 | ||
1372 | out: | |
1373 | return err; | |
1374 | } | |
1375 | ||
1376 | static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, | |
1377 | struct msghdr *msg, size_t len, | |
1378 | int flags) | |
1379 | { | |
1380 | struct sock_iocb *siocb = kiocb_to_siocb(kiocb); | |
1381 | struct scm_cookie scm; | |
1382 | struct sock *sk = sock->sk; | |
1383 | struct netlink_sock *nlk = nlk_sk(sk); | |
1384 | int noblock = flags&MSG_DONTWAIT; | |
1385 | size_t copied; | |
1386 | struct sk_buff *skb, *frag __maybe_unused = NULL; | |
1387 | int err; | |
1388 | ||
1389 | if (flags&MSG_OOB) | |
1390 | return -EOPNOTSUPP; | |
1391 | ||
1392 | copied = 0; | |
1393 | ||
1394 | skb = skb_recv_datagram(sk, flags, noblock, &err); | |
1395 | if (skb == NULL) | |
1396 | goto out; | |
1397 | ||
1398 | #ifdef CONFIG_COMPAT_NETLINK_MESSAGES | |
1399 | if (unlikely(skb_shinfo(skb)->frag_list)) { | |
1400 | bool need_compat = !!(flags & MSG_CMSG_COMPAT); | |
1401 | ||
1402 | /* | |
1403 | * If this skb has a frag_list, then here that means that | |
1404 | * we will have to use the frag_list skb for compat tasks | |
1405 | * and the regular skb for non-compat tasks. | |
1406 | * | |
1407 | * The skb might (and likely will) be cloned, so we can't | |
1408 | * just reset frag_list and go on with things -- we need to | |
1409 | * keep that. For the compat case that's easy -- simply get | |
1410 | * a reference to the compat skb and free the regular one | |
1411 | * including the frag. For the non-compat case, we need to | |
1412 | * avoid sending the frag to the user -- so assign NULL but | |
1413 | * restore it below before freeing the skb. | |
1414 | */ | |
1415 | if (need_compat) { | |
1416 | struct sk_buff *compskb = skb_shinfo(skb)->frag_list; | |
1417 | skb_get(compskb); | |
1418 | kfree_skb(skb); | |
1419 | skb = compskb; | |
1420 | } else { | |
1421 | frag = skb_shinfo(skb)->frag_list; | |
1422 | skb_shinfo(skb)->frag_list = NULL; | |
1423 | } | |
1424 | } | |
1425 | #endif | |
1426 | ||
1427 | msg->msg_namelen = 0; | |
1428 | ||
1429 | copied = skb->len; | |
1430 | if (len < copied) { | |
1431 | msg->msg_flags |= MSG_TRUNC; | |
1432 | copied = len; | |
1433 | } | |
1434 | ||
1435 | skb_reset_transport_header(skb); | |
1436 | err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); | |
1437 | ||
1438 | if (msg->msg_name) { | |
1439 | struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name; | |
1440 | addr->nl_family = AF_NETLINK; | |
1441 | addr->nl_pad = 0; | |
1442 | addr->nl_pid = NETLINK_CB(skb).pid; | |
1443 | addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group); | |
1444 | msg->msg_namelen = sizeof(*addr); | |
1445 | } | |
1446 | ||
1447 | if (nlk->flags & NETLINK_RECV_PKTINFO) | |
1448 | netlink_cmsg_recv_pktinfo(msg, skb); | |
1449 | ||
1450 | if (NULL == siocb->scm) { | |
1451 | memset(&scm, 0, sizeof(scm)); | |
1452 | siocb->scm = &scm; | |
1453 | } | |
1454 | siocb->scm->creds = *NETLINK_CREDS(skb); | |
1455 | if (flags & MSG_TRUNC) | |
1456 | copied = skb->len; | |
1457 | ||
1458 | #ifdef CONFIG_COMPAT_NETLINK_MESSAGES | |
1459 | skb_shinfo(skb)->frag_list = frag; | |
1460 | #endif | |
1461 | ||
1462 | skb_free_datagram(sk, skb); | |
1463 | ||
1464 | if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) | |
1465 | netlink_dump(sk); | |
1466 | ||
1467 | scm_recv(sock, msg, siocb->scm, flags); | |
1468 | out: | |
1469 | netlink_rcv_wake(sk); | |
1470 | return err ? : copied; | |
1471 | } | |
1472 | ||
1473 | static void netlink_data_ready(struct sock *sk, int len) | |
1474 | { | |
1475 | BUG(); | |
1476 | } | |
1477 | ||
1478 | /* | |
1479 | * We export these functions to other modules. They provide a | |
1480 | * complete set of kernel non-blocking support for message | |
1481 | * queueing. | |
1482 | */ | |
1483 | ||
1484 | struct sock * | |
1485 | netlink_kernel_create(struct net *net, int unit, unsigned int groups, | |
1486 | void (*input)(struct sk_buff *skb), | |
1487 | struct mutex *cb_mutex, struct module *module) | |
1488 | { | |
1489 | struct socket *sock; | |
1490 | struct sock *sk; | |
1491 | struct netlink_sock *nlk; | |
1492 | unsigned long *listeners = NULL; | |
1493 | ||
1494 | BUG_ON(!nl_table); | |
1495 | ||
1496 | if (unit < 0 || unit >= MAX_LINKS) | |
1497 | return NULL; | |
1498 | ||
1499 | if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock)) | |
1500 | return NULL; | |
1501 | ||
1502 | /* | |
1503 | * We have to just have a reference on the net from sk, but don't | |
1504 | * get_net it. Besides, we cannot get and then put the net here. | |
1505 | * So we create one inside init_net and the move it to net. | |
1506 | */ | |
1507 | ||
1508 | if (__netlink_create(&init_net, sock, cb_mutex, unit) < 0) | |
1509 | goto out_sock_release_nosk; | |
1510 | ||
1511 | sk = sock->sk; | |
1512 | sk_change_net(sk, net); | |
1513 | ||
1514 | if (groups < 32) | |
1515 | groups = 32; | |
1516 | ||
1517 | listeners = kzalloc(NLGRPSZ(groups) + sizeof(struct listeners_rcu_head), | |
1518 | GFP_KERNEL); | |
1519 | if (!listeners) | |
1520 | goto out_sock_release; | |
1521 | ||
1522 | sk->sk_data_ready = netlink_data_ready; | |
1523 | if (input) | |
1524 | nlk_sk(sk)->netlink_rcv = input; | |
1525 | ||
1526 | if (netlink_insert(sk, net, 0)) | |
1527 | goto out_sock_release; | |
1528 | ||
1529 | nlk = nlk_sk(sk); | |
1530 | nlk->flags |= NETLINK_KERNEL_SOCKET; | |
1531 | ||
1532 | netlink_table_grab(); | |
1533 | if (!nl_table[unit].registered) { | |
1534 | nl_table[unit].groups = groups; | |
1535 | nl_table[unit].listeners = listeners; | |
1536 | nl_table[unit].cb_mutex = cb_mutex; | |
1537 | nl_table[unit].module = module; | |
1538 | nl_table[unit].registered = 1; | |
1539 | } else { | |
1540 | kfree(listeners); | |
1541 | nl_table[unit].registered++; | |
1542 | } | |
1543 | netlink_table_ungrab(); | |
1544 | return sk; | |
1545 | ||
1546 | out_sock_release: | |
1547 | kfree(listeners); | |
1548 | netlink_kernel_release(sk); | |
1549 | return NULL; | |
1550 | ||
1551 | out_sock_release_nosk: | |
1552 | sock_release(sock); | |
1553 | return NULL; | |
1554 | } | |
1555 | EXPORT_SYMBOL(netlink_kernel_create); | |
1556 | ||
1557 | ||
1558 | void | |
1559 | netlink_kernel_release(struct sock *sk) | |
1560 | { | |
1561 | sk_release_kernel(sk); | |
1562 | } | |
1563 | EXPORT_SYMBOL(netlink_kernel_release); | |
1564 | ||
1565 | ||
1566 | static void netlink_free_old_listeners(struct rcu_head *rcu_head) | |
1567 | { | |
1568 | struct listeners_rcu_head *lrh; | |
1569 | ||
1570 | lrh = container_of(rcu_head, struct listeners_rcu_head, rcu_head); | |
1571 | kfree(lrh->ptr); | |
1572 | } | |
1573 | ||
1574 | int __netlink_change_ngroups(struct sock *sk, unsigned int groups) | |
1575 | { | |
1576 | unsigned long *listeners, *old = NULL; | |
1577 | struct listeners_rcu_head *old_rcu_head; | |
1578 | struct netlink_table *tbl = &nl_table[sk->sk_protocol]; | |
1579 | ||
1580 | if (groups < 32) | |
1581 | groups = 32; | |
1582 | ||
1583 | if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) { | |
1584 | listeners = kzalloc(NLGRPSZ(groups) + | |
1585 | sizeof(struct listeners_rcu_head), | |
1586 | GFP_ATOMIC); | |
1587 | if (!listeners) | |
1588 | return -ENOMEM; | |
1589 | old = tbl->listeners; | |
1590 | memcpy(listeners, old, NLGRPSZ(tbl->groups)); | |
1591 | rcu_assign_pointer(tbl->listeners, listeners); | |
1592 | /* | |
1593 | * Free the old memory after an RCU grace period so we | |
1594 | * don't leak it. We use call_rcu() here in order to be | |
1595 | * able to call this function from atomic contexts. The | |
1596 | * allocation of this memory will have reserved enough | |
1597 | * space for struct listeners_rcu_head at the end. | |
1598 | */ | |
1599 | old_rcu_head = (void *)(tbl->listeners + | |
1600 | NLGRPLONGS(tbl->groups)); | |
1601 | old_rcu_head->ptr = old; | |
1602 | call_rcu(&old_rcu_head->rcu_head, netlink_free_old_listeners); | |
1603 | } | |
1604 | tbl->groups = groups; | |
1605 | ||
1606 | return 0; | |
1607 | } | |
1608 | ||
1609 | /** | |
1610 | * netlink_change_ngroups - change number of multicast groups | |
1611 | * | |
1612 | * This changes the number of multicast groups that are available | |
1613 | * on a certain netlink family. Note that it is not possible to | |
1614 | * change the number of groups to below 32. Also note that it does | |
1615 | * not implicitly call netlink_clear_multicast_users() when the | |
1616 | * number of groups is reduced. | |
1617 | * | |
1618 | * @sk: The kernel netlink socket, as returned by netlink_kernel_create(). | |
1619 | * @groups: The new number of groups. | |
1620 | */ | |
1621 | int netlink_change_ngroups(struct sock *sk, unsigned int groups) | |
1622 | { | |
1623 | int err; | |
1624 | ||
1625 | netlink_table_grab(); | |
1626 | err = __netlink_change_ngroups(sk, groups); | |
1627 | netlink_table_ungrab(); | |
1628 | ||
1629 | return err; | |
1630 | } | |
1631 | ||
1632 | void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group) | |
1633 | { | |
1634 | struct sock *sk; | |
1635 | struct hlist_node *node; | |
1636 | struct netlink_table *tbl = &nl_table[ksk->sk_protocol]; | |
1637 | ||
1638 | sk_for_each_bound(sk, node, &tbl->mc_list) | |
1639 | netlink_update_socket_mc(nlk_sk(sk), group, 0); | |
1640 | } | |
1641 | ||
1642 | /** | |
1643 | * netlink_clear_multicast_users - kick off multicast listeners | |
1644 | * | |
1645 | * This function removes all listeners from the given group. | |
1646 | * @ksk: The kernel netlink socket, as returned by | |
1647 | * netlink_kernel_create(). | |
1648 | * @group: The multicast group to clear. | |
1649 | */ | |
1650 | void netlink_clear_multicast_users(struct sock *ksk, unsigned int group) | |
1651 | { | |
1652 | netlink_table_grab(); | |
1653 | __netlink_clear_multicast_users(ksk, group); | |
1654 | netlink_table_ungrab(); | |
1655 | } | |
1656 | ||
1657 | void netlink_set_nonroot(int protocol, unsigned int flags) | |
1658 | { | |
1659 | if ((unsigned int)protocol < MAX_LINKS) | |
1660 | nl_table[protocol].nl_nonroot = flags; | |
1661 | } | |
1662 | EXPORT_SYMBOL(netlink_set_nonroot); | |
1663 | ||
1664 | static void netlink_destroy_callback(struct netlink_callback *cb) | |
1665 | { | |
1666 | kfree_skb(cb->skb); | |
1667 | kfree(cb); | |
1668 | } | |
1669 | ||
1670 | /* | |
1671 | * It looks a bit ugly. | |
1672 | * It would be better to create kernel thread. | |
1673 | */ | |
1674 | ||
1675 | static int netlink_dump(struct sock *sk) | |
1676 | { | |
1677 | struct netlink_sock *nlk = nlk_sk(sk); | |
1678 | struct netlink_callback *cb; | |
1679 | struct sk_buff *skb; | |
1680 | struct nlmsghdr *nlh; | |
1681 | int len, err = -ENOBUFS; | |
1682 | ||
1683 | skb = sock_rmalloc(sk, NLMSG_GOODSIZE, 0, GFP_KERNEL); | |
1684 | if (!skb) | |
1685 | goto errout; | |
1686 | ||
1687 | mutex_lock(nlk->cb_mutex); | |
1688 | ||
1689 | cb = nlk->cb; | |
1690 | if (cb == NULL) { | |
1691 | err = -EINVAL; | |
1692 | goto errout_skb; | |
1693 | } | |
1694 | ||
1695 | len = cb->dump(skb, cb); | |
1696 | ||
1697 | if (len > 0) { | |
1698 | mutex_unlock(nlk->cb_mutex); | |
1699 | ||
1700 | if (sk_filter(sk, skb)) | |
1701 | kfree_skb(skb); | |
1702 | else { | |
1703 | skb_queue_tail(&sk->sk_receive_queue, skb); | |
1704 | sk->sk_data_ready(sk, skb->len); | |
1705 | } | |
1706 | return 0; | |
1707 | } | |
1708 | ||
1709 | nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI); | |
1710 | if (!nlh) | |
1711 | goto errout_skb; | |
1712 | ||
1713 | memcpy(nlmsg_data(nlh), &len, sizeof(len)); | |
1714 | ||
1715 | if (sk_filter(sk, skb)) | |
1716 | kfree_skb(skb); | |
1717 | else { | |
1718 | skb_queue_tail(&sk->sk_receive_queue, skb); | |
1719 | sk->sk_data_ready(sk, skb->len); | |
1720 | } | |
1721 | ||
1722 | if (cb->done) | |
1723 | cb->done(cb); | |
1724 | nlk->cb = NULL; | |
1725 | mutex_unlock(nlk->cb_mutex); | |
1726 | ||
1727 | netlink_destroy_callback(cb); | |
1728 | return 0; | |
1729 | ||
1730 | errout_skb: | |
1731 | mutex_unlock(nlk->cb_mutex); | |
1732 | kfree_skb(skb); | |
1733 | errout: | |
1734 | return err; | |
1735 | } | |
1736 | ||
1737 | int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, | |
1738 | const struct nlmsghdr *nlh, | |
1739 | int (*dump)(struct sk_buff *skb, | |
1740 | struct netlink_callback *), | |
1741 | int (*done)(struct netlink_callback *)) | |
1742 | { | |
1743 | struct netlink_callback *cb; | |
1744 | struct sock *sk; | |
1745 | struct netlink_sock *nlk; | |
1746 | ||
1747 | cb = kzalloc(sizeof(*cb), GFP_KERNEL); | |
1748 | if (cb == NULL) | |
1749 | return -ENOBUFS; | |
1750 | ||
1751 | cb->dump = dump; | |
1752 | cb->done = done; | |
1753 | cb->nlh = nlh; | |
1754 | atomic_inc(&skb->users); | |
1755 | cb->skb = skb; | |
1756 | ||
1757 | sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).pid); | |
1758 | if (sk == NULL) { | |
1759 | netlink_destroy_callback(cb); | |
1760 | return -ECONNREFUSED; | |
1761 | } | |
1762 | nlk = nlk_sk(sk); | |
1763 | /* A dump is in progress... */ | |
1764 | mutex_lock(nlk->cb_mutex); | |
1765 | if (nlk->cb) { | |
1766 | mutex_unlock(nlk->cb_mutex); | |
1767 | netlink_destroy_callback(cb); | |
1768 | sock_put(sk); | |
1769 | return -EBUSY; | |
1770 | } | |
1771 | nlk->cb = cb; | |
1772 | mutex_unlock(nlk->cb_mutex); | |
1773 | ||
1774 | netlink_dump(sk); | |
1775 | sock_put(sk); | |
1776 | ||
1777 | /* We successfully started a dump, by returning -EINTR we | |
1778 | * signal not to send ACK even if it was requested. | |
1779 | */ | |
1780 | return -EINTR; | |
1781 | } | |
1782 | EXPORT_SYMBOL(netlink_dump_start); | |
1783 | ||
1784 | void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err) | |
1785 | { | |
1786 | struct sk_buff *skb; | |
1787 | struct nlmsghdr *rep; | |
1788 | struct nlmsgerr *errmsg; | |
1789 | size_t payload = sizeof(*errmsg); | |
1790 | ||
1791 | /* error messages get the original request appened */ | |
1792 | if (err) | |
1793 | payload += nlmsg_len(nlh); | |
1794 | ||
1795 | skb = nlmsg_new(payload, GFP_KERNEL); | |
1796 | if (!skb) { | |
1797 | struct sock *sk; | |
1798 | ||
1799 | sk = netlink_lookup(sock_net(in_skb->sk), | |
1800 | in_skb->sk->sk_protocol, | |
1801 | NETLINK_CB(in_skb).pid); | |
1802 | if (sk) { | |
1803 | sk->sk_err = ENOBUFS; | |
1804 | sk->sk_error_report(sk); | |
1805 | sock_put(sk); | |
1806 | } | |
1807 | return; | |
1808 | } | |
1809 | ||
1810 | rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, | |
1811 | NLMSG_ERROR, payload, 0); | |
1812 | errmsg = nlmsg_data(rep); | |
1813 | errmsg->error = err; | |
1814 | memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh)); | |
1815 | netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT); | |
1816 | } | |
1817 | EXPORT_SYMBOL(netlink_ack); | |
1818 | ||
1819 | int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *, | |
1820 | struct nlmsghdr *)) | |
1821 | { | |
1822 | struct nlmsghdr *nlh; | |
1823 | int err; | |
1824 | ||
1825 | while (skb->len >= nlmsg_total_size(0)) { | |
1826 | int msglen; | |
1827 | ||
1828 | nlh = nlmsg_hdr(skb); | |
1829 | err = 0; | |
1830 | ||
1831 | if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len) | |
1832 | return 0; | |
1833 | ||
1834 | /* Only requests are handled by the kernel */ | |
1835 | if (!(nlh->nlmsg_flags & NLM_F_REQUEST)) | |
1836 | goto ack; | |
1837 | ||
1838 | /* Skip control messages */ | |
1839 | if (nlh->nlmsg_type < NLMSG_MIN_TYPE) | |
1840 | goto ack; | |
1841 | ||
1842 | err = cb(skb, nlh); | |
1843 | if (err == -EINTR) | |
1844 | goto skip; | |
1845 | ||
1846 | ack: | |
1847 | if (nlh->nlmsg_flags & NLM_F_ACK || err) | |
1848 | netlink_ack(skb, nlh, err); | |
1849 | ||
1850 | skip: | |
1851 | msglen = NLMSG_ALIGN(nlh->nlmsg_len); | |
1852 | if (msglen > skb->len) | |
1853 | msglen = skb->len; | |
1854 | skb_pull(skb, msglen); | |
1855 | } | |
1856 | ||
1857 | return 0; | |
1858 | } | |
1859 | EXPORT_SYMBOL(netlink_rcv_skb); | |
1860 | ||
1861 | /** | |
1862 | * nlmsg_notify - send a notification netlink message | |
1863 | * @sk: netlink socket to use | |
1864 | * @skb: notification message | |
1865 | * @pid: destination netlink pid for reports or 0 | |
1866 | * @group: destination multicast group or 0 | |
1867 | * @report: 1 to report back, 0 to disable | |
1868 | * @flags: allocation flags | |
1869 | */ | |
1870 | int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 pid, | |
1871 | unsigned int group, int report, gfp_t flags) | |
1872 | { | |
1873 | int err = 0; | |
1874 | ||
1875 | if (group) { | |
1876 | int exclude_pid = 0; | |
1877 | ||
1878 | if (report) { | |
1879 | atomic_inc(&skb->users); | |
1880 | exclude_pid = pid; | |
1881 | } | |
1882 | ||
1883 | /* errors reported via destination sk->sk_err, but propagate | |
1884 | * delivery errors if NETLINK_BROADCAST_ERROR flag is set */ | |
1885 | err = nlmsg_multicast(sk, skb, exclude_pid, group, flags); | |
1886 | } | |
1887 | ||
1888 | if (report) { | |
1889 | int err2; | |
1890 | ||
1891 | err2 = nlmsg_unicast(sk, skb, pid); | |
1892 | if (!err || err == -ESRCH) | |
1893 | err = err2; | |
1894 | } | |
1895 | ||
1896 | return err; | |
1897 | } | |
1898 | EXPORT_SYMBOL(nlmsg_notify); | |
1899 | ||
1900 | #ifdef CONFIG_PROC_FS | |
1901 | struct nl_seq_iter { | |
1902 | struct seq_net_private p; | |
1903 | int link; | |
1904 | int hash_idx; | |
1905 | }; | |
1906 | ||
1907 | static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos) | |
1908 | { | |
1909 | struct nl_seq_iter *iter = seq->private; | |
1910 | int i, j; | |
1911 | struct sock *s; | |
1912 | struct hlist_node *node; | |
1913 | loff_t off = 0; | |
1914 | ||
1915 | for (i = 0; i < MAX_LINKS; i++) { | |
1916 | struct nl_pid_hash *hash = &nl_table[i].hash; | |
1917 | ||
1918 | for (j = 0; j <= hash->mask; j++) { | |
1919 | sk_for_each(s, node, &hash->table[j]) { | |
1920 | if (sock_net(s) != seq_file_net(seq)) | |
1921 | continue; | |
1922 | if (off == pos) { | |
1923 | iter->link = i; | |
1924 | iter->hash_idx = j; | |
1925 | return s; | |
1926 | } | |
1927 | ++off; | |
1928 | } | |
1929 | } | |
1930 | } | |
1931 | return NULL; | |
1932 | } | |
1933 | ||
1934 | static void *netlink_seq_start(struct seq_file *seq, loff_t *pos) | |
1935 | __acquires(nl_table_lock) | |
1936 | { | |
1937 | read_lock(&nl_table_lock); | |
1938 | return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN; | |
1939 | } | |
1940 | ||
1941 | static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |
1942 | { | |
1943 | struct sock *s; | |
1944 | struct nl_seq_iter *iter; | |
1945 | int i, j; | |
1946 | ||
1947 | ++*pos; | |
1948 | ||
1949 | if (v == SEQ_START_TOKEN) | |
1950 | return netlink_seq_socket_idx(seq, 0); | |
1951 | ||
1952 | iter = seq->private; | |
1953 | s = v; | |
1954 | do { | |
1955 | s = sk_next(s); | |
1956 | } while (s && sock_net(s) != seq_file_net(seq)); | |
1957 | if (s) | |
1958 | return s; | |
1959 | ||
1960 | i = iter->link; | |
1961 | j = iter->hash_idx + 1; | |
1962 | ||
1963 | do { | |
1964 | struct nl_pid_hash *hash = &nl_table[i].hash; | |
1965 | ||
1966 | for (; j <= hash->mask; j++) { | |
1967 | s = sk_head(&hash->table[j]); | |
1968 | while (s && sock_net(s) != seq_file_net(seq)) | |
1969 | s = sk_next(s); | |
1970 | if (s) { | |
1971 | iter->link = i; | |
1972 | iter->hash_idx = j; | |
1973 | return s; | |
1974 | } | |
1975 | } | |
1976 | ||
1977 | j = 0; | |
1978 | } while (++i < MAX_LINKS); | |
1979 | ||
1980 | return NULL; | |
1981 | } | |
1982 | ||
1983 | static void netlink_seq_stop(struct seq_file *seq, void *v) | |
1984 | __releases(nl_table_lock) | |
1985 | { | |
1986 | read_unlock(&nl_table_lock); | |
1987 | } | |
1988 | ||
1989 | ||
1990 | static int netlink_seq_show(struct seq_file *seq, void *v) | |
1991 | { | |
1992 | if (v == SEQ_START_TOKEN) | |
1993 | seq_puts(seq, | |
1994 | "sk Eth Pid Groups " | |
1995 | "Rmem Wmem Dump Locks Drops Inode\n"); | |
1996 | else { | |
1997 | struct sock *s = v; | |
1998 | struct netlink_sock *nlk = nlk_sk(s); | |
1999 | ||
2000 | seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d %-8lu\n", | |
2001 | s, | |
2002 | s->sk_protocol, | |
2003 | nlk->pid, | |
2004 | nlk->groups ? (u32)nlk->groups[0] : 0, | |
2005 | sk_rmem_alloc_get(s), | |
2006 | sk_wmem_alloc_get(s), | |
2007 | nlk->cb, | |
2008 | atomic_read(&s->sk_refcnt), | |
2009 | atomic_read(&s->sk_drops), | |
2010 | sock_i_ino(s) | |
2011 | ); | |
2012 | ||
2013 | } | |
2014 | return 0; | |
2015 | } | |
2016 | ||
2017 | static const struct seq_operations netlink_seq_ops = { | |
2018 | .start = netlink_seq_start, | |
2019 | .next = netlink_seq_next, | |
2020 | .stop = netlink_seq_stop, | |
2021 | .show = netlink_seq_show, | |
2022 | }; | |
2023 | ||
2024 | ||
2025 | static int netlink_seq_open(struct inode *inode, struct file *file) | |
2026 | { | |
2027 | return seq_open_net(inode, file, &netlink_seq_ops, | |
2028 | sizeof(struct nl_seq_iter)); | |
2029 | } | |
2030 | ||
2031 | static const struct file_operations netlink_seq_fops = { | |
2032 | .owner = THIS_MODULE, | |
2033 | .open = netlink_seq_open, | |
2034 | .read = seq_read, | |
2035 | .llseek = seq_lseek, | |
2036 | .release = seq_release_net, | |
2037 | }; | |
2038 | ||
2039 | #endif | |
2040 | ||
2041 | int netlink_register_notifier(struct notifier_block *nb) | |
2042 | { | |
2043 | return atomic_notifier_chain_register(&netlink_chain, nb); | |
2044 | } | |
2045 | EXPORT_SYMBOL(netlink_register_notifier); | |
2046 | ||
2047 | int netlink_unregister_notifier(struct notifier_block *nb) | |
2048 | { | |
2049 | return atomic_notifier_chain_unregister(&netlink_chain, nb); | |
2050 | } | |
2051 | EXPORT_SYMBOL(netlink_unregister_notifier); | |
2052 | ||
2053 | static const struct proto_ops netlink_ops = { | |
2054 | .family = PF_NETLINK, | |
2055 | .owner = THIS_MODULE, | |
2056 | .release = netlink_release, | |
2057 | .bind = netlink_bind, | |
2058 | .connect = netlink_connect, | |
2059 | .socketpair = sock_no_socketpair, | |
2060 | .accept = sock_no_accept, | |
2061 | .getname = netlink_getname, | |
2062 | .poll = datagram_poll, | |
2063 | .ioctl = sock_no_ioctl, | |
2064 | .listen = sock_no_listen, | |
2065 | .shutdown = sock_no_shutdown, | |
2066 | .setsockopt = netlink_setsockopt, | |
2067 | .getsockopt = netlink_getsockopt, | |
2068 | .sendmsg = netlink_sendmsg, | |
2069 | .recvmsg = netlink_recvmsg, | |
2070 | .mmap = sock_no_mmap, | |
2071 | .sendpage = sock_no_sendpage, | |
2072 | }; | |
2073 | ||
2074 | static const struct net_proto_family netlink_family_ops = { | |
2075 | .family = PF_NETLINK, | |
2076 | .create = netlink_create, | |
2077 | .owner = THIS_MODULE, /* for consistency 8) */ | |
2078 | }; | |
2079 | ||
2080 | static int __net_init netlink_net_init(struct net *net) | |
2081 | { | |
2082 | #ifdef CONFIG_PROC_FS | |
2083 | if (!proc_net_fops_create(net, "netlink", 0, &netlink_seq_fops)) | |
2084 | return -ENOMEM; | |
2085 | #endif | |
2086 | return 0; | |
2087 | } | |
2088 | ||
2089 | static void __net_exit netlink_net_exit(struct net *net) | |
2090 | { | |
2091 | #ifdef CONFIG_PROC_FS | |
2092 | proc_net_remove(net, "netlink"); | |
2093 | #endif | |
2094 | } | |
2095 | ||
2096 | static struct pernet_operations __net_initdata netlink_net_ops = { | |
2097 | .init = netlink_net_init, | |
2098 | .exit = netlink_net_exit, | |
2099 | }; | |
2100 | ||
2101 | static int __init netlink_proto_init(void) | |
2102 | { | |
2103 | struct sk_buff *dummy_skb; | |
2104 | int i; | |
2105 | unsigned long limit; | |
2106 | unsigned int order; | |
2107 | int err = proto_register(&netlink_proto, 0); | |
2108 | ||
2109 | if (err != 0) | |
2110 | goto out; | |
2111 | ||
2112 | BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb)); | |
2113 | ||
2114 | nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL); | |
2115 | if (!nl_table) | |
2116 | goto panic; | |
2117 | ||
2118 | if (totalram_pages >= (128 * 1024)) | |
2119 | limit = totalram_pages >> (21 - PAGE_SHIFT); | |
2120 | else | |
2121 | limit = totalram_pages >> (23 - PAGE_SHIFT); | |
2122 | ||
2123 | order = get_bitmask_order(limit) - 1 + PAGE_SHIFT; | |
2124 | limit = (1UL << order) / sizeof(struct hlist_head); | |
2125 | order = get_bitmask_order(min(limit, (unsigned long)UINT_MAX)) - 1; | |
2126 | ||
2127 | for (i = 0; i < MAX_LINKS; i++) { | |
2128 | struct nl_pid_hash *hash = &nl_table[i].hash; | |
2129 | ||
2130 | hash->table = nl_pid_hash_zalloc(1 * sizeof(*hash->table)); | |
2131 | if (!hash->table) { | |
2132 | while (i-- > 0) | |
2133 | nl_pid_hash_free(nl_table[i].hash.table, | |
2134 | 1 * sizeof(*hash->table)); | |
2135 | kfree(nl_table); | |
2136 | goto panic; | |
2137 | } | |
2138 | hash->max_shift = order; | |
2139 | hash->shift = 0; | |
2140 | hash->mask = 0; | |
2141 | hash->rehash_time = jiffies; | |
2142 | } | |
2143 | ||
2144 | sock_register(&netlink_family_ops); | |
2145 | register_pernet_subsys(&netlink_net_ops); | |
2146 | /* The netlink device handler may be needed early. */ | |
2147 | rtnetlink_init(); | |
2148 | out: | |
2149 | return err; | |
2150 | panic: | |
2151 | panic("netlink_init: Cannot allocate nl_table\n"); | |
2152 | } | |
2153 | ||
2154 | core_initcall(netlink_proto_init); |