]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * NETLINK Kernel-user communication protocol. | |
3 | * | |
4 | * Authors: Alan Cox <alan@redhat.com> | |
5 | * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License | |
9 | * as published by the Free Software Foundation; either version | |
10 | * 2 of the License, or (at your option) any later version. | |
11 | * | |
12 | * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith | |
13 | * added netlink_proto_exit | |
14 | * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br> | |
15 | * use nlk_sk, as sk->protinfo is on a diet 8) | |
16 | * | |
17 | */ | |
18 | ||
19 | #include <linux/config.h> | |
20 | #include <linux/module.h> | |
21 | ||
22 | #include <linux/kernel.h> | |
23 | #include <linux/init.h> | |
1da177e4 LT |
24 | #include <linux/signal.h> |
25 | #include <linux/sched.h> | |
26 | #include <linux/errno.h> | |
27 | #include <linux/string.h> | |
28 | #include <linux/stat.h> | |
29 | #include <linux/socket.h> | |
30 | #include <linux/un.h> | |
31 | #include <linux/fcntl.h> | |
32 | #include <linux/termios.h> | |
33 | #include <linux/sockios.h> | |
34 | #include <linux/net.h> | |
35 | #include <linux/fs.h> | |
36 | #include <linux/slab.h> | |
37 | #include <asm/uaccess.h> | |
38 | #include <linux/skbuff.h> | |
39 | #include <linux/netdevice.h> | |
40 | #include <linux/rtnetlink.h> | |
41 | #include <linux/proc_fs.h> | |
42 | #include <linux/seq_file.h> | |
43 | #include <linux/smp_lock.h> | |
44 | #include <linux/notifier.h> | |
45 | #include <linux/security.h> | |
46 | #include <linux/jhash.h> | |
47 | #include <linux/jiffies.h> | |
48 | #include <linux/random.h> | |
49 | #include <linux/bitops.h> | |
50 | #include <linux/mm.h> | |
51 | #include <linux/types.h> | |
54e0f520 AM |
52 | #include <linux/audit.h> |
53 | ||
1da177e4 LT |
54 | #include <net/sock.h> |
55 | #include <net/scm.h> | |
56 | ||
57 | #define Nprintk(a...) | |
58 | ||
59 | struct netlink_sock { | |
60 | /* struct sock has to be the first member of netlink_sock */ | |
61 | struct sock sk; | |
62 | u32 pid; | |
63 | unsigned int groups; | |
64 | u32 dst_pid; | |
65 | unsigned int dst_groups; | |
66 | unsigned long state; | |
67 | wait_queue_head_t wait; | |
68 | struct netlink_callback *cb; | |
69 | spinlock_t cb_lock; | |
70 | void (*data_ready)(struct sock *sk, int bytes); | |
71 | }; | |
72 | ||
73 | static inline struct netlink_sock *nlk_sk(struct sock *sk) | |
74 | { | |
75 | return (struct netlink_sock *)sk; | |
76 | } | |
77 | ||
78 | struct nl_pid_hash { | |
79 | struct hlist_head *table; | |
80 | unsigned long rehash_time; | |
81 | ||
82 | unsigned int mask; | |
83 | unsigned int shift; | |
84 | ||
85 | unsigned int entries; | |
86 | unsigned int max_shift; | |
87 | ||
88 | u32 rnd; | |
89 | }; | |
90 | ||
91 | struct netlink_table { | |
92 | struct nl_pid_hash hash; | |
93 | struct hlist_head mc_list; | |
94 | unsigned int nl_nonroot; | |
95 | }; | |
96 | ||
97 | static struct netlink_table *nl_table; | |
98 | ||
99 | static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait); | |
100 | ||
101 | static int netlink_dump(struct sock *sk); | |
102 | static void netlink_destroy_callback(struct netlink_callback *cb); | |
103 | ||
104 | static DEFINE_RWLOCK(nl_table_lock); | |
105 | static atomic_t nl_table_users = ATOMIC_INIT(0); | |
106 | ||
107 | static struct notifier_block *netlink_chain; | |
108 | ||
109 | static struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid) | |
110 | { | |
111 | return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask]; | |
112 | } | |
113 | ||
114 | static void netlink_sock_destruct(struct sock *sk) | |
115 | { | |
116 | skb_queue_purge(&sk->sk_receive_queue); | |
117 | ||
118 | if (!sock_flag(sk, SOCK_DEAD)) { | |
119 | printk("Freeing alive netlink socket %p\n", sk); | |
120 | return; | |
121 | } | |
122 | BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc)); | |
123 | BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc)); | |
124 | BUG_TRAP(!nlk_sk(sk)->cb); | |
125 | } | |
126 | ||
127 | /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on SMP. | |
128 | * Look, when several writers sleep and reader wakes them up, all but one | |
129 | * immediately hit write lock and grab all the cpus. Exclusive sleep solves | |
130 | * this, _but_ remember, it adds useless work on UP machines. | |
131 | */ | |
132 | ||
133 | static void netlink_table_grab(void) | |
134 | { | |
135 | write_lock_bh(&nl_table_lock); | |
136 | ||
137 | if (atomic_read(&nl_table_users)) { | |
138 | DECLARE_WAITQUEUE(wait, current); | |
139 | ||
140 | add_wait_queue_exclusive(&nl_table_wait, &wait); | |
141 | for(;;) { | |
142 | set_current_state(TASK_UNINTERRUPTIBLE); | |
143 | if (atomic_read(&nl_table_users) == 0) | |
144 | break; | |
145 | write_unlock_bh(&nl_table_lock); | |
146 | schedule(); | |
147 | write_lock_bh(&nl_table_lock); | |
148 | } | |
149 | ||
150 | __set_current_state(TASK_RUNNING); | |
151 | remove_wait_queue(&nl_table_wait, &wait); | |
152 | } | |
153 | } | |
154 | ||
155 | static __inline__ void netlink_table_ungrab(void) | |
156 | { | |
157 | write_unlock_bh(&nl_table_lock); | |
158 | wake_up(&nl_table_wait); | |
159 | } | |
160 | ||
161 | static __inline__ void | |
162 | netlink_lock_table(void) | |
163 | { | |
164 | /* read_lock() synchronizes us to netlink_table_grab */ | |
165 | ||
166 | read_lock(&nl_table_lock); | |
167 | atomic_inc(&nl_table_users); | |
168 | read_unlock(&nl_table_lock); | |
169 | } | |
170 | ||
171 | static __inline__ void | |
172 | netlink_unlock_table(void) | |
173 | { | |
174 | if (atomic_dec_and_test(&nl_table_users)) | |
175 | wake_up(&nl_table_wait); | |
176 | } | |
177 | ||
178 | static __inline__ struct sock *netlink_lookup(int protocol, u32 pid) | |
179 | { | |
180 | struct nl_pid_hash *hash = &nl_table[protocol].hash; | |
181 | struct hlist_head *head; | |
182 | struct sock *sk; | |
183 | struct hlist_node *node; | |
184 | ||
185 | read_lock(&nl_table_lock); | |
186 | head = nl_pid_hashfn(hash, pid); | |
187 | sk_for_each(sk, node, head) { | |
188 | if (nlk_sk(sk)->pid == pid) { | |
189 | sock_hold(sk); | |
190 | goto found; | |
191 | } | |
192 | } | |
193 | sk = NULL; | |
194 | found: | |
195 | read_unlock(&nl_table_lock); | |
196 | return sk; | |
197 | } | |
198 | ||
199 | static inline struct hlist_head *nl_pid_hash_alloc(size_t size) | |
200 | { | |
201 | if (size <= PAGE_SIZE) | |
202 | return kmalloc(size, GFP_ATOMIC); | |
203 | else | |
204 | return (struct hlist_head *) | |
205 | __get_free_pages(GFP_ATOMIC, get_order(size)); | |
206 | } | |
207 | ||
208 | static inline void nl_pid_hash_free(struct hlist_head *table, size_t size) | |
209 | { | |
210 | if (size <= PAGE_SIZE) | |
211 | kfree(table); | |
212 | else | |
213 | free_pages((unsigned long)table, get_order(size)); | |
214 | } | |
215 | ||
216 | static int nl_pid_hash_rehash(struct nl_pid_hash *hash, int grow) | |
217 | { | |
218 | unsigned int omask, mask, shift; | |
219 | size_t osize, size; | |
220 | struct hlist_head *otable, *table; | |
221 | int i; | |
222 | ||
223 | omask = mask = hash->mask; | |
224 | osize = size = (mask + 1) * sizeof(*table); | |
225 | shift = hash->shift; | |
226 | ||
227 | if (grow) { | |
228 | if (++shift > hash->max_shift) | |
229 | return 0; | |
230 | mask = mask * 2 + 1; | |
231 | size *= 2; | |
232 | } | |
233 | ||
234 | table = nl_pid_hash_alloc(size); | |
235 | if (!table) | |
236 | return 0; | |
237 | ||
238 | memset(table, 0, size); | |
239 | otable = hash->table; | |
240 | hash->table = table; | |
241 | hash->mask = mask; | |
242 | hash->shift = shift; | |
243 | get_random_bytes(&hash->rnd, sizeof(hash->rnd)); | |
244 | ||
245 | for (i = 0; i <= omask; i++) { | |
246 | struct sock *sk; | |
247 | struct hlist_node *node, *tmp; | |
248 | ||
249 | sk_for_each_safe(sk, node, tmp, &otable[i]) | |
250 | __sk_add_node(sk, nl_pid_hashfn(hash, nlk_sk(sk)->pid)); | |
251 | } | |
252 | ||
253 | nl_pid_hash_free(otable, osize); | |
254 | hash->rehash_time = jiffies + 10 * 60 * HZ; | |
255 | return 1; | |
256 | } | |
257 | ||
258 | static inline int nl_pid_hash_dilute(struct nl_pid_hash *hash, int len) | |
259 | { | |
260 | int avg = hash->entries >> hash->shift; | |
261 | ||
262 | if (unlikely(avg > 1) && nl_pid_hash_rehash(hash, 1)) | |
263 | return 1; | |
264 | ||
265 | if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) { | |
266 | nl_pid_hash_rehash(hash, 0); | |
267 | return 1; | |
268 | } | |
269 | ||
270 | return 0; | |
271 | } | |
272 | ||
273 | static struct proto_ops netlink_ops; | |
274 | ||
275 | static int netlink_insert(struct sock *sk, u32 pid) | |
276 | { | |
277 | struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash; | |
278 | struct hlist_head *head; | |
279 | int err = -EADDRINUSE; | |
280 | struct sock *osk; | |
281 | struct hlist_node *node; | |
282 | int len; | |
283 | ||
284 | netlink_table_grab(); | |
285 | head = nl_pid_hashfn(hash, pid); | |
286 | len = 0; | |
287 | sk_for_each(osk, node, head) { | |
288 | if (nlk_sk(osk)->pid == pid) | |
289 | break; | |
290 | len++; | |
291 | } | |
292 | if (node) | |
293 | goto err; | |
294 | ||
295 | err = -EBUSY; | |
296 | if (nlk_sk(sk)->pid) | |
297 | goto err; | |
298 | ||
299 | err = -ENOMEM; | |
300 | if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX)) | |
301 | goto err; | |
302 | ||
303 | if (len && nl_pid_hash_dilute(hash, len)) | |
304 | head = nl_pid_hashfn(hash, pid); | |
305 | hash->entries++; | |
306 | nlk_sk(sk)->pid = pid; | |
307 | sk_add_node(sk, head); | |
308 | err = 0; | |
309 | ||
310 | err: | |
311 | netlink_table_ungrab(); | |
312 | return err; | |
313 | } | |
314 | ||
315 | static void netlink_remove(struct sock *sk) | |
316 | { | |
317 | netlink_table_grab(); | |
d470e3b4 DM |
318 | if (sk_del_node_init(sk)) |
319 | nl_table[sk->sk_protocol].hash.entries--; | |
1da177e4 LT |
320 | if (nlk_sk(sk)->groups) |
321 | __sk_del_bind_node(sk); | |
322 | netlink_table_ungrab(); | |
323 | } | |
324 | ||
325 | static struct proto netlink_proto = { | |
326 | .name = "NETLINK", | |
327 | .owner = THIS_MODULE, | |
328 | .obj_size = sizeof(struct netlink_sock), | |
329 | }; | |
330 | ||
331 | static int netlink_create(struct socket *sock, int protocol) | |
332 | { | |
333 | struct sock *sk; | |
334 | struct netlink_sock *nlk; | |
335 | ||
336 | sock->state = SS_UNCONNECTED; | |
337 | ||
338 | if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM) | |
339 | return -ESOCKTNOSUPPORT; | |
340 | ||
341 | if (protocol<0 || protocol >= MAX_LINKS) | |
342 | return -EPROTONOSUPPORT; | |
343 | ||
344 | sock->ops = &netlink_ops; | |
345 | ||
346 | sk = sk_alloc(PF_NETLINK, GFP_KERNEL, &netlink_proto, 1); | |
347 | if (!sk) | |
348 | return -ENOMEM; | |
349 | ||
350 | sock_init_data(sock, sk); | |
351 | ||
352 | nlk = nlk_sk(sk); | |
353 | ||
354 | spin_lock_init(&nlk->cb_lock); | |
355 | init_waitqueue_head(&nlk->wait); | |
356 | sk->sk_destruct = netlink_sock_destruct; | |
357 | ||
358 | sk->sk_protocol = protocol; | |
359 | return 0; | |
360 | } | |
361 | ||
362 | static int netlink_release(struct socket *sock) | |
363 | { | |
364 | struct sock *sk = sock->sk; | |
365 | struct netlink_sock *nlk; | |
366 | ||
367 | if (!sk) | |
368 | return 0; | |
369 | ||
370 | netlink_remove(sk); | |
371 | nlk = nlk_sk(sk); | |
372 | ||
373 | spin_lock(&nlk->cb_lock); | |
374 | if (nlk->cb) { | |
375 | nlk->cb->done(nlk->cb); | |
376 | netlink_destroy_callback(nlk->cb); | |
377 | nlk->cb = NULL; | |
1da177e4 LT |
378 | } |
379 | spin_unlock(&nlk->cb_lock); | |
380 | ||
381 | /* OK. Socket is unlinked, and, therefore, | |
382 | no new packets will arrive */ | |
383 | ||
384 | sock_orphan(sk); | |
385 | sock->sk = NULL; | |
386 | wake_up_interruptible_all(&nlk->wait); | |
387 | ||
388 | skb_queue_purge(&sk->sk_write_queue); | |
389 | ||
390 | if (nlk->pid && !nlk->groups) { | |
391 | struct netlink_notify n = { | |
392 | .protocol = sk->sk_protocol, | |
393 | .pid = nlk->pid, | |
394 | }; | |
395 | notifier_call_chain(&netlink_chain, NETLINK_URELEASE, &n); | |
396 | } | |
397 | ||
398 | sock_put(sk); | |
399 | return 0; | |
400 | } | |
401 | ||
402 | static int netlink_autobind(struct socket *sock) | |
403 | { | |
404 | struct sock *sk = sock->sk; | |
405 | struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash; | |
406 | struct hlist_head *head; | |
407 | struct sock *osk; | |
408 | struct hlist_node *node; | |
409 | s32 pid = current->pid; | |
410 | int err; | |
411 | static s32 rover = -4097; | |
412 | ||
413 | retry: | |
414 | cond_resched(); | |
415 | netlink_table_grab(); | |
416 | head = nl_pid_hashfn(hash, pid); | |
417 | sk_for_each(osk, node, head) { | |
418 | if (nlk_sk(osk)->pid == pid) { | |
419 | /* Bind collision, search negative pid values. */ | |
420 | pid = rover--; | |
421 | if (rover > -4097) | |
422 | rover = -4097; | |
423 | netlink_table_ungrab(); | |
424 | goto retry; | |
425 | } | |
426 | } | |
427 | netlink_table_ungrab(); | |
428 | ||
429 | err = netlink_insert(sk, pid); | |
430 | if (err == -EADDRINUSE) | |
431 | goto retry; | |
d470e3b4 DM |
432 | |
433 | /* If 2 threads race to autobind, that is fine. */ | |
434 | if (err == -EBUSY) | |
435 | err = 0; | |
436 | ||
437 | return err; | |
1da177e4 LT |
438 | } |
439 | ||
440 | static inline int netlink_capable(struct socket *sock, unsigned int flag) | |
441 | { | |
442 | return (nl_table[sock->sk->sk_protocol].nl_nonroot & flag) || | |
443 | capable(CAP_NET_ADMIN); | |
444 | } | |
445 | ||
446 | static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len) | |
447 | { | |
448 | struct sock *sk = sock->sk; | |
449 | struct netlink_sock *nlk = nlk_sk(sk); | |
450 | struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; | |
451 | int err; | |
452 | ||
453 | if (nladdr->nl_family != AF_NETLINK) | |
454 | return -EINVAL; | |
455 | ||
456 | /* Only superuser is allowed to listen multicasts */ | |
457 | if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_RECV)) | |
458 | return -EPERM; | |
459 | ||
460 | if (nlk->pid) { | |
461 | if (nladdr->nl_pid != nlk->pid) | |
462 | return -EINVAL; | |
463 | } else { | |
464 | err = nladdr->nl_pid ? | |
465 | netlink_insert(sk, nladdr->nl_pid) : | |
466 | netlink_autobind(sock); | |
467 | if (err) | |
468 | return err; | |
469 | } | |
470 | ||
471 | if (!nladdr->nl_groups && !nlk->groups) | |
472 | return 0; | |
473 | ||
474 | netlink_table_grab(); | |
475 | if (nlk->groups && !nladdr->nl_groups) | |
476 | __sk_del_bind_node(sk); | |
477 | else if (!nlk->groups && nladdr->nl_groups) | |
478 | sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list); | |
479 | nlk->groups = nladdr->nl_groups; | |
480 | netlink_table_ungrab(); | |
481 | ||
482 | return 0; | |
483 | } | |
484 | ||
485 | static int netlink_connect(struct socket *sock, struct sockaddr *addr, | |
486 | int alen, int flags) | |
487 | { | |
488 | int err = 0; | |
489 | struct sock *sk = sock->sk; | |
490 | struct netlink_sock *nlk = nlk_sk(sk); | |
491 | struct sockaddr_nl *nladdr=(struct sockaddr_nl*)addr; | |
492 | ||
493 | if (addr->sa_family == AF_UNSPEC) { | |
494 | sk->sk_state = NETLINK_UNCONNECTED; | |
495 | nlk->dst_pid = 0; | |
496 | nlk->dst_groups = 0; | |
497 | return 0; | |
498 | } | |
499 | if (addr->sa_family != AF_NETLINK) | |
500 | return -EINVAL; | |
501 | ||
502 | /* Only superuser is allowed to send multicasts */ | |
503 | if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND)) | |
504 | return -EPERM; | |
505 | ||
506 | if (!nlk->pid) | |
507 | err = netlink_autobind(sock); | |
508 | ||
509 | if (err == 0) { | |
510 | sk->sk_state = NETLINK_CONNECTED; | |
511 | nlk->dst_pid = nladdr->nl_pid; | |
512 | nlk->dst_groups = nladdr->nl_groups; | |
513 | } | |
514 | ||
515 | return err; | |
516 | } | |
517 | ||
518 | static int netlink_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer) | |
519 | { | |
520 | struct sock *sk = sock->sk; | |
521 | struct netlink_sock *nlk = nlk_sk(sk); | |
522 | struct sockaddr_nl *nladdr=(struct sockaddr_nl *)addr; | |
523 | ||
524 | nladdr->nl_family = AF_NETLINK; | |
525 | nladdr->nl_pad = 0; | |
526 | *addr_len = sizeof(*nladdr); | |
527 | ||
528 | if (peer) { | |
529 | nladdr->nl_pid = nlk->dst_pid; | |
530 | nladdr->nl_groups = nlk->dst_groups; | |
531 | } else { | |
532 | nladdr->nl_pid = nlk->pid; | |
533 | nladdr->nl_groups = nlk->groups; | |
534 | } | |
535 | return 0; | |
536 | } | |
537 | ||
538 | static void netlink_overrun(struct sock *sk) | |
539 | { | |
540 | if (!test_and_set_bit(0, &nlk_sk(sk)->state)) { | |
541 | sk->sk_err = ENOBUFS; | |
542 | sk->sk_error_report(sk); | |
543 | } | |
544 | } | |
545 | ||
546 | static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid) | |
547 | { | |
548 | int protocol = ssk->sk_protocol; | |
549 | struct sock *sock; | |
550 | struct netlink_sock *nlk; | |
551 | ||
552 | sock = netlink_lookup(protocol, pid); | |
553 | if (!sock) | |
554 | return ERR_PTR(-ECONNREFUSED); | |
555 | ||
556 | /* Don't bother queuing skb if kernel socket has no input function */ | |
557 | nlk = nlk_sk(sock); | |
558 | if ((nlk->pid == 0 && !nlk->data_ready) || | |
559 | (sock->sk_state == NETLINK_CONNECTED && | |
560 | nlk->dst_pid != nlk_sk(ssk)->pid)) { | |
561 | sock_put(sock); | |
562 | return ERR_PTR(-ECONNREFUSED); | |
563 | } | |
564 | return sock; | |
565 | } | |
566 | ||
567 | struct sock *netlink_getsockbyfilp(struct file *filp) | |
568 | { | |
569 | struct inode *inode = filp->f_dentry->d_inode; | |
570 | struct sock *sock; | |
571 | ||
572 | if (!S_ISSOCK(inode->i_mode)) | |
573 | return ERR_PTR(-ENOTSOCK); | |
574 | ||
575 | sock = SOCKET_I(inode)->sk; | |
576 | if (sock->sk_family != AF_NETLINK) | |
577 | return ERR_PTR(-EINVAL); | |
578 | ||
579 | sock_hold(sock); | |
580 | return sock; | |
581 | } | |
582 | ||
583 | /* | |
584 | * Attach a skb to a netlink socket. | |
585 | * The caller must hold a reference to the destination socket. On error, the | |
586 | * reference is dropped. The skb is not send to the destination, just all | |
587 | * all error checks are performed and memory in the queue is reserved. | |
588 | * Return values: | |
589 | * < 0: error. skb freed, reference to sock dropped. | |
590 | * 0: continue | |
591 | * 1: repeat lookup - reference dropped while waiting for socket memory. | |
592 | */ | |
593 | int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, long timeo) | |
594 | { | |
595 | struct netlink_sock *nlk; | |
596 | ||
597 | nlk = nlk_sk(sk); | |
598 | ||
599 | if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || | |
600 | test_bit(0, &nlk->state)) { | |
601 | DECLARE_WAITQUEUE(wait, current); | |
602 | if (!timeo) { | |
603 | if (!nlk->pid) | |
604 | netlink_overrun(sk); | |
605 | sock_put(sk); | |
606 | kfree_skb(skb); | |
607 | return -EAGAIN; | |
608 | } | |
609 | ||
610 | __set_current_state(TASK_INTERRUPTIBLE); | |
611 | add_wait_queue(&nlk->wait, &wait); | |
612 | ||
613 | if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || | |
614 | test_bit(0, &nlk->state)) && | |
615 | !sock_flag(sk, SOCK_DEAD)) | |
616 | timeo = schedule_timeout(timeo); | |
617 | ||
618 | __set_current_state(TASK_RUNNING); | |
619 | remove_wait_queue(&nlk->wait, &wait); | |
620 | sock_put(sk); | |
621 | ||
622 | if (signal_pending(current)) { | |
623 | kfree_skb(skb); | |
624 | return sock_intr_errno(timeo); | |
625 | } | |
626 | return 1; | |
627 | } | |
628 | skb_set_owner_r(skb, sk); | |
629 | return 0; | |
630 | } | |
631 | ||
632 | int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol) | |
633 | { | |
634 | struct netlink_sock *nlk; | |
635 | int len = skb->len; | |
636 | ||
637 | nlk = nlk_sk(sk); | |
638 | ||
639 | skb_queue_tail(&sk->sk_receive_queue, skb); | |
640 | sk->sk_data_ready(sk, len); | |
641 | sock_put(sk); | |
642 | return len; | |
643 | } | |
644 | ||
645 | void netlink_detachskb(struct sock *sk, struct sk_buff *skb) | |
646 | { | |
647 | kfree_skb(skb); | |
648 | sock_put(sk); | |
649 | } | |
650 | ||
651 | static inline struct sk_buff *netlink_trim(struct sk_buff *skb, int allocation) | |
652 | { | |
653 | int delta; | |
654 | ||
655 | skb_orphan(skb); | |
656 | ||
657 | delta = skb->end - skb->tail; | |
658 | if (delta * 2 < skb->truesize) | |
659 | return skb; | |
660 | ||
661 | if (skb_shared(skb)) { | |
662 | struct sk_buff *nskb = skb_clone(skb, allocation); | |
663 | if (!nskb) | |
664 | return skb; | |
665 | kfree_skb(skb); | |
666 | skb = nskb; | |
667 | } | |
668 | ||
669 | if (!pskb_expand_head(skb, 0, -delta, allocation)) | |
670 | skb->truesize -= delta; | |
671 | ||
672 | return skb; | |
673 | } | |
674 | ||
675 | int netlink_unicast(struct sock *ssk, struct sk_buff *skb, u32 pid, int nonblock) | |
676 | { | |
677 | struct sock *sk; | |
678 | int err; | |
679 | long timeo; | |
680 | ||
681 | skb = netlink_trim(skb, gfp_any()); | |
682 | ||
683 | timeo = sock_sndtimeo(ssk, nonblock); | |
684 | retry: | |
685 | sk = netlink_getsockbypid(ssk, pid); | |
686 | if (IS_ERR(sk)) { | |
687 | kfree_skb(skb); | |
688 | return PTR_ERR(sk); | |
689 | } | |
690 | err = netlink_attachskb(sk, skb, nonblock, timeo); | |
691 | if (err == 1) | |
692 | goto retry; | |
693 | if (err) | |
694 | return err; | |
695 | ||
696 | return netlink_sendskb(sk, skb, ssk->sk_protocol); | |
697 | } | |
698 | ||
699 | static __inline__ int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb) | |
700 | { | |
701 | struct netlink_sock *nlk = nlk_sk(sk); | |
702 | ||
703 | if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && | |
704 | !test_bit(0, &nlk->state)) { | |
705 | skb_set_owner_r(skb, sk); | |
706 | skb_queue_tail(&sk->sk_receive_queue, skb); | |
707 | sk->sk_data_ready(sk, skb->len); | |
708 | return atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf; | |
709 | } | |
710 | return -1; | |
711 | } | |
712 | ||
713 | struct netlink_broadcast_data { | |
714 | struct sock *exclude_sk; | |
715 | u32 pid; | |
716 | u32 group; | |
717 | int failure; | |
718 | int congested; | |
719 | int delivered; | |
720 | int allocation; | |
721 | struct sk_buff *skb, *skb2; | |
722 | }; | |
723 | ||
724 | static inline int do_one_broadcast(struct sock *sk, | |
725 | struct netlink_broadcast_data *p) | |
726 | { | |
727 | struct netlink_sock *nlk = nlk_sk(sk); | |
728 | int val; | |
729 | ||
730 | if (p->exclude_sk == sk) | |
731 | goto out; | |
732 | ||
733 | if (nlk->pid == p->pid || !(nlk->groups & p->group)) | |
734 | goto out; | |
735 | ||
736 | if (p->failure) { | |
737 | netlink_overrun(sk); | |
738 | goto out; | |
739 | } | |
740 | ||
741 | sock_hold(sk); | |
742 | if (p->skb2 == NULL) { | |
68acc024 | 743 | if (skb_shared(p->skb)) { |
1da177e4 LT |
744 | p->skb2 = skb_clone(p->skb, p->allocation); |
745 | } else { | |
68acc024 TC |
746 | p->skb2 = skb_get(p->skb); |
747 | /* | |
748 | * skb ownership may have been set when | |
749 | * delivered to a previous socket. | |
750 | */ | |
751 | skb_orphan(p->skb2); | |
1da177e4 LT |
752 | } |
753 | } | |
754 | if (p->skb2 == NULL) { | |
755 | netlink_overrun(sk); | |
756 | /* Clone failed. Notify ALL listeners. */ | |
757 | p->failure = 1; | |
758 | } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) { | |
759 | netlink_overrun(sk); | |
760 | } else { | |
761 | p->congested |= val; | |
762 | p->delivered = 1; | |
763 | p->skb2 = NULL; | |
764 | } | |
765 | sock_put(sk); | |
766 | ||
767 | out: | |
768 | return 0; | |
769 | } | |
770 | ||
771 | int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid, | |
772 | u32 group, int allocation) | |
773 | { | |
774 | struct netlink_broadcast_data info; | |
775 | struct hlist_node *node; | |
776 | struct sock *sk; | |
777 | ||
778 | skb = netlink_trim(skb, allocation); | |
779 | ||
780 | info.exclude_sk = ssk; | |
781 | info.pid = pid; | |
782 | info.group = group; | |
783 | info.failure = 0; | |
784 | info.congested = 0; | |
785 | info.delivered = 0; | |
786 | info.allocation = allocation; | |
787 | info.skb = skb; | |
788 | info.skb2 = NULL; | |
789 | ||
790 | /* While we sleep in clone, do not allow to change socket list */ | |
791 | ||
792 | netlink_lock_table(); | |
793 | ||
794 | sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list) | |
795 | do_one_broadcast(sk, &info); | |
796 | ||
aa1c6a6f TC |
797 | kfree_skb(skb); |
798 | ||
1da177e4 LT |
799 | netlink_unlock_table(); |
800 | ||
801 | if (info.skb2) | |
802 | kfree_skb(info.skb2); | |
1da177e4 LT |
803 | |
804 | if (info.delivered) { | |
805 | if (info.congested && (allocation & __GFP_WAIT)) | |
806 | yield(); | |
807 | return 0; | |
808 | } | |
809 | if (info.failure) | |
810 | return -ENOBUFS; | |
811 | return -ESRCH; | |
812 | } | |
813 | ||
814 | struct netlink_set_err_data { | |
815 | struct sock *exclude_sk; | |
816 | u32 pid; | |
817 | u32 group; | |
818 | int code; | |
819 | }; | |
820 | ||
821 | static inline int do_one_set_err(struct sock *sk, | |
822 | struct netlink_set_err_data *p) | |
823 | { | |
824 | struct netlink_sock *nlk = nlk_sk(sk); | |
825 | ||
826 | if (sk == p->exclude_sk) | |
827 | goto out; | |
828 | ||
829 | if (nlk->pid == p->pid || !(nlk->groups & p->group)) | |
830 | goto out; | |
831 | ||
832 | sk->sk_err = p->code; | |
833 | sk->sk_error_report(sk); | |
834 | out: | |
835 | return 0; | |
836 | } | |
837 | ||
838 | void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code) | |
839 | { | |
840 | struct netlink_set_err_data info; | |
841 | struct hlist_node *node; | |
842 | struct sock *sk; | |
843 | ||
844 | info.exclude_sk = ssk; | |
845 | info.pid = pid; | |
846 | info.group = group; | |
847 | info.code = code; | |
848 | ||
849 | read_lock(&nl_table_lock); | |
850 | ||
851 | sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list) | |
852 | do_one_set_err(sk, &info); | |
853 | ||
854 | read_unlock(&nl_table_lock); | |
855 | } | |
856 | ||
857 | static inline void netlink_rcv_wake(struct sock *sk) | |
858 | { | |
859 | struct netlink_sock *nlk = nlk_sk(sk); | |
860 | ||
861 | if (!skb_queue_len(&sk->sk_receive_queue)) | |
862 | clear_bit(0, &nlk->state); | |
863 | if (!test_bit(0, &nlk->state)) | |
864 | wake_up_interruptible(&nlk->wait); | |
865 | } | |
866 | ||
867 | static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock, | |
868 | struct msghdr *msg, size_t len) | |
869 | { | |
870 | struct sock_iocb *siocb = kiocb_to_siocb(kiocb); | |
871 | struct sock *sk = sock->sk; | |
872 | struct netlink_sock *nlk = nlk_sk(sk); | |
873 | struct sockaddr_nl *addr=msg->msg_name; | |
874 | u32 dst_pid; | |
875 | u32 dst_groups; | |
876 | struct sk_buff *skb; | |
877 | int err; | |
878 | struct scm_cookie scm; | |
879 | ||
880 | if (msg->msg_flags&MSG_OOB) | |
881 | return -EOPNOTSUPP; | |
882 | ||
883 | if (NULL == siocb->scm) | |
884 | siocb->scm = &scm; | |
885 | err = scm_send(sock, msg, siocb->scm); | |
886 | if (err < 0) | |
887 | return err; | |
888 | ||
889 | if (msg->msg_namelen) { | |
890 | if (addr->nl_family != AF_NETLINK) | |
891 | return -EINVAL; | |
892 | dst_pid = addr->nl_pid; | |
893 | dst_groups = addr->nl_groups; | |
894 | if (dst_groups && !netlink_capable(sock, NL_NONROOT_SEND)) | |
895 | return -EPERM; | |
896 | } else { | |
897 | dst_pid = nlk->dst_pid; | |
898 | dst_groups = nlk->dst_groups; | |
899 | } | |
900 | ||
901 | if (!nlk->pid) { | |
902 | err = netlink_autobind(sock); | |
903 | if (err) | |
904 | goto out; | |
905 | } | |
906 | ||
907 | err = -EMSGSIZE; | |
908 | if (len > sk->sk_sndbuf - 32) | |
909 | goto out; | |
910 | err = -ENOBUFS; | |
911 | skb = alloc_skb(len, GFP_KERNEL); | |
912 | if (skb==NULL) | |
913 | goto out; | |
914 | ||
915 | NETLINK_CB(skb).pid = nlk->pid; | |
916 | NETLINK_CB(skb).groups = nlk->groups; | |
917 | NETLINK_CB(skb).dst_pid = dst_pid; | |
918 | NETLINK_CB(skb).dst_groups = dst_groups; | |
c94c257c | 919 | NETLINK_CB(skb).loginuid = audit_get_loginuid(current->audit_context); |
1da177e4 LT |
920 | memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); |
921 | ||
922 | /* What can I do? Netlink is asynchronous, so that | |
923 | we will have to save current capabilities to | |
924 | check them, when this message will be delivered | |
925 | to corresponding kernel module. --ANK (980802) | |
926 | */ | |
927 | ||
928 | err = -EFAULT; | |
929 | if (memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len)) { | |
930 | kfree_skb(skb); | |
931 | goto out; | |
932 | } | |
933 | ||
934 | err = security_netlink_send(sk, skb); | |
935 | if (err) { | |
936 | kfree_skb(skb); | |
937 | goto out; | |
938 | } | |
939 | ||
940 | if (dst_groups) { | |
941 | atomic_inc(&skb->users); | |
942 | netlink_broadcast(sk, skb, dst_pid, dst_groups, GFP_KERNEL); | |
943 | } | |
944 | err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT); | |
945 | ||
946 | out: | |
947 | return err; | |
948 | } | |
949 | ||
950 | static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, | |
951 | struct msghdr *msg, size_t len, | |
952 | int flags) | |
953 | { | |
954 | struct sock_iocb *siocb = kiocb_to_siocb(kiocb); | |
955 | struct scm_cookie scm; | |
956 | struct sock *sk = sock->sk; | |
957 | struct netlink_sock *nlk = nlk_sk(sk); | |
958 | int noblock = flags&MSG_DONTWAIT; | |
959 | size_t copied; | |
960 | struct sk_buff *skb; | |
961 | int err; | |
962 | ||
963 | if (flags&MSG_OOB) | |
964 | return -EOPNOTSUPP; | |
965 | ||
966 | copied = 0; | |
967 | ||
968 | skb = skb_recv_datagram(sk,flags,noblock,&err); | |
969 | if (skb==NULL) | |
970 | goto out; | |
971 | ||
972 | msg->msg_namelen = 0; | |
973 | ||
974 | copied = skb->len; | |
975 | if (len < copied) { | |
976 | msg->msg_flags |= MSG_TRUNC; | |
977 | copied = len; | |
978 | } | |
979 | ||
980 | skb->h.raw = skb->data; | |
981 | err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); | |
982 | ||
983 | if (msg->msg_name) { | |
984 | struct sockaddr_nl *addr = (struct sockaddr_nl*)msg->msg_name; | |
985 | addr->nl_family = AF_NETLINK; | |
986 | addr->nl_pad = 0; | |
987 | addr->nl_pid = NETLINK_CB(skb).pid; | |
988 | addr->nl_groups = NETLINK_CB(skb).dst_groups; | |
989 | msg->msg_namelen = sizeof(*addr); | |
990 | } | |
991 | ||
992 | if (NULL == siocb->scm) { | |
993 | memset(&scm, 0, sizeof(scm)); | |
994 | siocb->scm = &scm; | |
995 | } | |
996 | siocb->scm->creds = *NETLINK_CREDS(skb); | |
997 | skb_free_datagram(sk, skb); | |
998 | ||
999 | if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) | |
1000 | netlink_dump(sk); | |
1001 | ||
1002 | scm_recv(sock, msg, siocb->scm, flags); | |
1003 | ||
1004 | out: | |
1005 | netlink_rcv_wake(sk); | |
1006 | return err ? : copied; | |
1007 | } | |
1008 | ||
1009 | static void netlink_data_ready(struct sock *sk, int len) | |
1010 | { | |
1011 | struct netlink_sock *nlk = nlk_sk(sk); | |
1012 | ||
1013 | if (nlk->data_ready) | |
1014 | nlk->data_ready(sk, len); | |
1015 | netlink_rcv_wake(sk); | |
1016 | } | |
1017 | ||
1018 | /* | |
1019 | * We export these functions to other modules. They provide a | |
1020 | * complete set of kernel non-blocking support for message | |
1021 | * queueing. | |
1022 | */ | |
1023 | ||
1024 | struct sock * | |
1025 | netlink_kernel_create(int unit, void (*input)(struct sock *sk, int len)) | |
1026 | { | |
1027 | struct socket *sock; | |
1028 | struct sock *sk; | |
1029 | ||
1030 | if (!nl_table) | |
1031 | return NULL; | |
1032 | ||
1033 | if (unit<0 || unit>=MAX_LINKS) | |
1034 | return NULL; | |
1035 | ||
1036 | if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock)) | |
1037 | return NULL; | |
1038 | ||
1039 | if (netlink_create(sock, unit) < 0) { | |
1040 | sock_release(sock); | |
1041 | return NULL; | |
1042 | } | |
1043 | sk = sock->sk; | |
1044 | sk->sk_data_ready = netlink_data_ready; | |
1045 | if (input) | |
1046 | nlk_sk(sk)->data_ready = input; | |
1047 | ||
1048 | if (netlink_insert(sk, 0)) { | |
1049 | sock_release(sock); | |
1050 | return NULL; | |
1051 | } | |
1052 | return sk; | |
1053 | } | |
1054 | ||
1055 | void netlink_set_nonroot(int protocol, unsigned int flags) | |
1056 | { | |
1057 | if ((unsigned int)protocol < MAX_LINKS) | |
1058 | nl_table[protocol].nl_nonroot = flags; | |
1059 | } | |
1060 | ||
1061 | static void netlink_destroy_callback(struct netlink_callback *cb) | |
1062 | { | |
1063 | if (cb->skb) | |
1064 | kfree_skb(cb->skb); | |
1065 | kfree(cb); | |
1066 | } | |
1067 | ||
1068 | /* | |
1069 | * It looks a bit ugly. | |
1070 | * It would be better to create kernel thread. | |
1071 | */ | |
1072 | ||
1073 | static int netlink_dump(struct sock *sk) | |
1074 | { | |
1075 | struct netlink_sock *nlk = nlk_sk(sk); | |
1076 | struct netlink_callback *cb; | |
1077 | struct sk_buff *skb; | |
1078 | struct nlmsghdr *nlh; | |
1079 | int len; | |
1080 | ||
1081 | skb = sock_rmalloc(sk, NLMSG_GOODSIZE, 0, GFP_KERNEL); | |
1082 | if (!skb) | |
1083 | return -ENOBUFS; | |
1084 | ||
1085 | spin_lock(&nlk->cb_lock); | |
1086 | ||
1087 | cb = nlk->cb; | |
1088 | if (cb == NULL) { | |
1089 | spin_unlock(&nlk->cb_lock); | |
1090 | kfree_skb(skb); | |
1091 | return -EINVAL; | |
1092 | } | |
1093 | ||
1094 | len = cb->dump(skb, cb); | |
1095 | ||
1096 | if (len > 0) { | |
1097 | spin_unlock(&nlk->cb_lock); | |
1098 | skb_queue_tail(&sk->sk_receive_queue, skb); | |
1099 | sk->sk_data_ready(sk, len); | |
1100 | return 0; | |
1101 | } | |
1102 | ||
1797754e | 1103 | nlh = NLMSG_NEW_ANSWER(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI); |
1da177e4 LT |
1104 | memcpy(NLMSG_DATA(nlh), &len, sizeof(len)); |
1105 | skb_queue_tail(&sk->sk_receive_queue, skb); | |
1106 | sk->sk_data_ready(sk, skb->len); | |
1107 | ||
1108 | cb->done(cb); | |
1109 | nlk->cb = NULL; | |
1110 | spin_unlock(&nlk->cb_lock); | |
1111 | ||
1112 | netlink_destroy_callback(cb); | |
1da177e4 | 1113 | return 0; |
1797754e TG |
1114 | |
1115 | nlmsg_failure: | |
1116 | return -ENOBUFS; | |
1da177e4 LT |
1117 | } |
1118 | ||
1119 | int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, | |
1120 | struct nlmsghdr *nlh, | |
1121 | int (*dump)(struct sk_buff *skb, struct netlink_callback*), | |
1122 | int (*done)(struct netlink_callback*)) | |
1123 | { | |
1124 | struct netlink_callback *cb; | |
1125 | struct sock *sk; | |
1126 | struct netlink_sock *nlk; | |
1127 | ||
1128 | cb = kmalloc(sizeof(*cb), GFP_KERNEL); | |
1129 | if (cb == NULL) | |
1130 | return -ENOBUFS; | |
1131 | ||
1132 | memset(cb, 0, sizeof(*cb)); | |
1133 | cb->dump = dump; | |
1134 | cb->done = done; | |
1135 | cb->nlh = nlh; | |
1136 | atomic_inc(&skb->users); | |
1137 | cb->skb = skb; | |
1138 | ||
1139 | sk = netlink_lookup(ssk->sk_protocol, NETLINK_CB(skb).pid); | |
1140 | if (sk == NULL) { | |
1141 | netlink_destroy_callback(cb); | |
1142 | return -ECONNREFUSED; | |
1143 | } | |
1144 | nlk = nlk_sk(sk); | |
1145 | /* A dump is in progress... */ | |
1146 | spin_lock(&nlk->cb_lock); | |
1147 | if (nlk->cb) { | |
1148 | spin_unlock(&nlk->cb_lock); | |
1149 | netlink_destroy_callback(cb); | |
1150 | sock_put(sk); | |
1151 | return -EBUSY; | |
1152 | } | |
1153 | nlk->cb = cb; | |
1da177e4 LT |
1154 | spin_unlock(&nlk->cb_lock); |
1155 | ||
1156 | netlink_dump(sk); | |
1157 | sock_put(sk); | |
1158 | return 0; | |
1159 | } | |
1160 | ||
1161 | void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err) | |
1162 | { | |
1163 | struct sk_buff *skb; | |
1164 | struct nlmsghdr *rep; | |
1165 | struct nlmsgerr *errmsg; | |
1166 | int size; | |
1167 | ||
1168 | if (err == 0) | |
1169 | size = NLMSG_SPACE(sizeof(struct nlmsgerr)); | |
1170 | else | |
1171 | size = NLMSG_SPACE(4 + NLMSG_ALIGN(nlh->nlmsg_len)); | |
1172 | ||
1173 | skb = alloc_skb(size, GFP_KERNEL); | |
1174 | if (!skb) { | |
1175 | struct sock *sk; | |
1176 | ||
1177 | sk = netlink_lookup(in_skb->sk->sk_protocol, | |
1178 | NETLINK_CB(in_skb).pid); | |
1179 | if (sk) { | |
1180 | sk->sk_err = ENOBUFS; | |
1181 | sk->sk_error_report(sk); | |
1182 | sock_put(sk); | |
1183 | } | |
1184 | return; | |
1185 | } | |
1186 | ||
1187 | rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, | |
1797754e | 1188 | NLMSG_ERROR, sizeof(struct nlmsgerr), 0); |
1da177e4 LT |
1189 | errmsg = NLMSG_DATA(rep); |
1190 | errmsg->error = err; | |
1191 | memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(struct nlmsghdr)); | |
1192 | netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT); | |
1193 | } | |
1194 | ||
1195 | ||
1196 | #ifdef CONFIG_PROC_FS | |
1197 | struct nl_seq_iter { | |
1198 | int link; | |
1199 | int hash_idx; | |
1200 | }; | |
1201 | ||
1202 | static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos) | |
1203 | { | |
1204 | struct nl_seq_iter *iter = seq->private; | |
1205 | int i, j; | |
1206 | struct sock *s; | |
1207 | struct hlist_node *node; | |
1208 | loff_t off = 0; | |
1209 | ||
1210 | for (i=0; i<MAX_LINKS; i++) { | |
1211 | struct nl_pid_hash *hash = &nl_table[i].hash; | |
1212 | ||
1213 | for (j = 0; j <= hash->mask; j++) { | |
1214 | sk_for_each(s, node, &hash->table[j]) { | |
1215 | if (off == pos) { | |
1216 | iter->link = i; | |
1217 | iter->hash_idx = j; | |
1218 | return s; | |
1219 | } | |
1220 | ++off; | |
1221 | } | |
1222 | } | |
1223 | } | |
1224 | return NULL; | |
1225 | } | |
1226 | ||
1227 | static void *netlink_seq_start(struct seq_file *seq, loff_t *pos) | |
1228 | { | |
1229 | read_lock(&nl_table_lock); | |
1230 | return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN; | |
1231 | } | |
1232 | ||
1233 | static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |
1234 | { | |
1235 | struct sock *s; | |
1236 | struct nl_seq_iter *iter; | |
1237 | int i, j; | |
1238 | ||
1239 | ++*pos; | |
1240 | ||
1241 | if (v == SEQ_START_TOKEN) | |
1242 | return netlink_seq_socket_idx(seq, 0); | |
1243 | ||
1244 | s = sk_next(v); | |
1245 | if (s) | |
1246 | return s; | |
1247 | ||
1248 | iter = seq->private; | |
1249 | i = iter->link; | |
1250 | j = iter->hash_idx + 1; | |
1251 | ||
1252 | do { | |
1253 | struct nl_pid_hash *hash = &nl_table[i].hash; | |
1254 | ||
1255 | for (; j <= hash->mask; j++) { | |
1256 | s = sk_head(&hash->table[j]); | |
1257 | if (s) { | |
1258 | iter->link = i; | |
1259 | iter->hash_idx = j; | |
1260 | return s; | |
1261 | } | |
1262 | } | |
1263 | ||
1264 | j = 0; | |
1265 | } while (++i < MAX_LINKS); | |
1266 | ||
1267 | return NULL; | |
1268 | } | |
1269 | ||
1270 | static void netlink_seq_stop(struct seq_file *seq, void *v) | |
1271 | { | |
1272 | read_unlock(&nl_table_lock); | |
1273 | } | |
1274 | ||
1275 | ||
1276 | static int netlink_seq_show(struct seq_file *seq, void *v) | |
1277 | { | |
1278 | if (v == SEQ_START_TOKEN) | |
1279 | seq_puts(seq, | |
1280 | "sk Eth Pid Groups " | |
1281 | "Rmem Wmem Dump Locks\n"); | |
1282 | else { | |
1283 | struct sock *s = v; | |
1284 | struct netlink_sock *nlk = nlk_sk(s); | |
1285 | ||
1286 | seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %d\n", | |
1287 | s, | |
1288 | s->sk_protocol, | |
1289 | nlk->pid, | |
1290 | nlk->groups, | |
1291 | atomic_read(&s->sk_rmem_alloc), | |
1292 | atomic_read(&s->sk_wmem_alloc), | |
1293 | nlk->cb, | |
1294 | atomic_read(&s->sk_refcnt) | |
1295 | ); | |
1296 | ||
1297 | } | |
1298 | return 0; | |
1299 | } | |
1300 | ||
1301 | static struct seq_operations netlink_seq_ops = { | |
1302 | .start = netlink_seq_start, | |
1303 | .next = netlink_seq_next, | |
1304 | .stop = netlink_seq_stop, | |
1305 | .show = netlink_seq_show, | |
1306 | }; | |
1307 | ||
1308 | ||
1309 | static int netlink_seq_open(struct inode *inode, struct file *file) | |
1310 | { | |
1311 | struct seq_file *seq; | |
1312 | struct nl_seq_iter *iter; | |
1313 | int err; | |
1314 | ||
1315 | iter = kmalloc(sizeof(*iter), GFP_KERNEL); | |
1316 | if (!iter) | |
1317 | return -ENOMEM; | |
1318 | ||
1319 | err = seq_open(file, &netlink_seq_ops); | |
1320 | if (err) { | |
1321 | kfree(iter); | |
1322 | return err; | |
1323 | } | |
1324 | ||
1325 | memset(iter, 0, sizeof(*iter)); | |
1326 | seq = file->private_data; | |
1327 | seq->private = iter; | |
1328 | return 0; | |
1329 | } | |
1330 | ||
1331 | static struct file_operations netlink_seq_fops = { | |
1332 | .owner = THIS_MODULE, | |
1333 | .open = netlink_seq_open, | |
1334 | .read = seq_read, | |
1335 | .llseek = seq_lseek, | |
1336 | .release = seq_release_private, | |
1337 | }; | |
1338 | ||
1339 | #endif | |
1340 | ||
1341 | int netlink_register_notifier(struct notifier_block *nb) | |
1342 | { | |
1343 | return notifier_chain_register(&netlink_chain, nb); | |
1344 | } | |
1345 | ||
1346 | int netlink_unregister_notifier(struct notifier_block *nb) | |
1347 | { | |
1348 | return notifier_chain_unregister(&netlink_chain, nb); | |
1349 | } | |
1350 | ||
1351 | static struct proto_ops netlink_ops = { | |
1352 | .family = PF_NETLINK, | |
1353 | .owner = THIS_MODULE, | |
1354 | .release = netlink_release, | |
1355 | .bind = netlink_bind, | |
1356 | .connect = netlink_connect, | |
1357 | .socketpair = sock_no_socketpair, | |
1358 | .accept = sock_no_accept, | |
1359 | .getname = netlink_getname, | |
1360 | .poll = datagram_poll, | |
1361 | .ioctl = sock_no_ioctl, | |
1362 | .listen = sock_no_listen, | |
1363 | .shutdown = sock_no_shutdown, | |
1364 | .setsockopt = sock_no_setsockopt, | |
1365 | .getsockopt = sock_no_getsockopt, | |
1366 | .sendmsg = netlink_sendmsg, | |
1367 | .recvmsg = netlink_recvmsg, | |
1368 | .mmap = sock_no_mmap, | |
1369 | .sendpage = sock_no_sendpage, | |
1370 | }; | |
1371 | ||
1372 | static struct net_proto_family netlink_family_ops = { | |
1373 | .family = PF_NETLINK, | |
1374 | .create = netlink_create, | |
1375 | .owner = THIS_MODULE, /* for consistency 8) */ | |
1376 | }; | |
1377 | ||
1378 | extern void netlink_skb_parms_too_large(void); | |
1379 | ||
1380 | static int __init netlink_proto_init(void) | |
1381 | { | |
1382 | struct sk_buff *dummy_skb; | |
1383 | int i; | |
1384 | unsigned long max; | |
1385 | unsigned int order; | |
1386 | int err = proto_register(&netlink_proto, 0); | |
1387 | ||
1388 | if (err != 0) | |
1389 | goto out; | |
1390 | ||
1391 | if (sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb)) | |
1392 | netlink_skb_parms_too_large(); | |
1393 | ||
1394 | nl_table = kmalloc(sizeof(*nl_table) * MAX_LINKS, GFP_KERNEL); | |
1395 | if (!nl_table) { | |
1396 | enomem: | |
1397 | printk(KERN_CRIT "netlink_init: Cannot allocate nl_table\n"); | |
1398 | return -ENOMEM; | |
1399 | } | |
1400 | ||
1401 | memset(nl_table, 0, sizeof(*nl_table) * MAX_LINKS); | |
1402 | ||
1403 | if (num_physpages >= (128 * 1024)) | |
1404 | max = num_physpages >> (21 - PAGE_SHIFT); | |
1405 | else | |
1406 | max = num_physpages >> (23 - PAGE_SHIFT); | |
1407 | ||
1408 | order = get_bitmask_order(max) - 1 + PAGE_SHIFT; | |
1409 | max = (1UL << order) / sizeof(struct hlist_head); | |
1410 | order = get_bitmask_order(max > UINT_MAX ? UINT_MAX : max) - 1; | |
1411 | ||
1412 | for (i = 0; i < MAX_LINKS; i++) { | |
1413 | struct nl_pid_hash *hash = &nl_table[i].hash; | |
1414 | ||
1415 | hash->table = nl_pid_hash_alloc(1 * sizeof(*hash->table)); | |
1416 | if (!hash->table) { | |
1417 | while (i-- > 0) | |
1418 | nl_pid_hash_free(nl_table[i].hash.table, | |
1419 | 1 * sizeof(*hash->table)); | |
1420 | kfree(nl_table); | |
1421 | goto enomem; | |
1422 | } | |
1423 | memset(hash->table, 0, 1 * sizeof(*hash->table)); | |
1424 | hash->max_shift = order; | |
1425 | hash->shift = 0; | |
1426 | hash->mask = 0; | |
1427 | hash->rehash_time = jiffies; | |
1428 | } | |
1429 | ||
1430 | sock_register(&netlink_family_ops); | |
1431 | #ifdef CONFIG_PROC_FS | |
1432 | proc_net_fops_create("netlink", 0, &netlink_seq_fops); | |
1433 | #endif | |
1434 | /* The netlink device handler may be needed early. */ | |
1435 | rtnetlink_init(); | |
1436 | out: | |
1437 | return err; | |
1438 | } | |
1439 | ||
1440 | static void __exit netlink_proto_exit(void) | |
1441 | { | |
1442 | sock_unregister(PF_NETLINK); | |
1443 | proc_net_remove("netlink"); | |
1444 | kfree(nl_table); | |
1445 | nl_table = NULL; | |
1446 | proto_unregister(&netlink_proto); | |
1447 | } | |
1448 | ||
1449 | core_initcall(netlink_proto_init); | |
1450 | module_exit(netlink_proto_exit); | |
1451 | ||
1452 | MODULE_LICENSE("GPL"); | |
1453 | ||
1454 | MODULE_ALIAS_NETPROTO(PF_NETLINK); | |
1455 | ||
1456 | EXPORT_SYMBOL(netlink_ack); | |
1457 | EXPORT_SYMBOL(netlink_broadcast); | |
1458 | EXPORT_SYMBOL(netlink_dump_start); | |
1459 | EXPORT_SYMBOL(netlink_kernel_create); | |
1460 | EXPORT_SYMBOL(netlink_register_notifier); | |
1461 | EXPORT_SYMBOL(netlink_set_err); | |
1462 | EXPORT_SYMBOL(netlink_set_nonroot); | |
1463 | EXPORT_SYMBOL(netlink_unicast); | |
1464 | EXPORT_SYMBOL(netlink_unregister_notifier); | |
1465 |