]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * linux/fs/fcntl.c | |
3 | * | |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | |
5 | */ | |
6 | ||
7 | #include <linux/syscalls.h> | |
8 | #include <linux/init.h> | |
9 | #include <linux/mm.h> | |
10 | #include <linux/fs.h> | |
11 | #include <linux/file.h> | |
12 | #include <linux/capability.h> | |
13 | #include <linux/dnotify.h> | |
14 | #include <linux/smp_lock.h> | |
15 | #include <linux/slab.h> | |
16 | #include <linux/module.h> | |
17 | #include <linux/security.h> | |
18 | #include <linux/ptrace.h> | |
19 | #include <linux/signal.h> | |
20 | #include <linux/rcupdate.h> | |
21 | ||
22 | #include <asm/poll.h> | |
23 | #include <asm/siginfo.h> | |
24 | #include <asm/uaccess.h> | |
25 | ||
26 | void fastcall set_close_on_exec(unsigned int fd, int flag) | |
27 | { | |
28 | struct files_struct *files = current->files; | |
29 | struct fdtable *fdt; | |
30 | spin_lock(&files->file_lock); | |
31 | fdt = files_fdtable(files); | |
32 | if (flag) | |
33 | FD_SET(fd, fdt->close_on_exec); | |
34 | else | |
35 | FD_CLR(fd, fdt->close_on_exec); | |
36 | spin_unlock(&files->file_lock); | |
37 | } | |
38 | ||
39 | static int get_close_on_exec(unsigned int fd) | |
40 | { | |
41 | struct files_struct *files = current->files; | |
42 | struct fdtable *fdt; | |
43 | int res; | |
44 | rcu_read_lock(); | |
45 | fdt = files_fdtable(files); | |
46 | res = FD_ISSET(fd, fdt->close_on_exec); | |
47 | rcu_read_unlock(); | |
48 | return res; | |
49 | } | |
50 | ||
51 | /* | |
52 | * locate_fd finds a free file descriptor in the open_fds fdset, | |
53 | * expanding the fd arrays if necessary. Must be called with the | |
54 | * file_lock held for write. | |
55 | */ | |
56 | ||
57 | static int locate_fd(struct files_struct *files, | |
58 | struct file *file, unsigned int orig_start) | |
59 | { | |
60 | unsigned int newfd; | |
61 | unsigned int start; | |
62 | int error; | |
63 | struct fdtable *fdt; | |
64 | ||
65 | error = -EINVAL; | |
66 | if (orig_start >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur) | |
67 | goto out; | |
68 | ||
69 | repeat: | |
70 | fdt = files_fdtable(files); | |
71 | /* | |
72 | * Someone might have closed fd's in the range | |
73 | * orig_start..fdt->next_fd | |
74 | */ | |
75 | start = orig_start; | |
76 | if (start < files->next_fd) | |
77 | start = files->next_fd; | |
78 | ||
79 | newfd = start; | |
80 | if (start < fdt->max_fdset) { | |
81 | newfd = find_next_zero_bit(fdt->open_fds->fds_bits, | |
82 | fdt->max_fdset, start); | |
83 | } | |
84 | ||
85 | error = -EMFILE; | |
86 | if (newfd >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur) | |
87 | goto out; | |
88 | ||
89 | error = expand_files(files, newfd); | |
90 | if (error < 0) | |
91 | goto out; | |
92 | ||
93 | /* | |
94 | * If we needed to expand the fs array we | |
95 | * might have blocked - try again. | |
96 | */ | |
97 | if (error) | |
98 | goto repeat; | |
99 | ||
100 | /* | |
101 | * We reacquired files_lock, so we are safe as long as | |
102 | * we reacquire the fdtable pointer and use it while holding | |
103 | * the lock, no one can free it during that time. | |
104 | */ | |
105 | if (start <= files->next_fd) | |
106 | files->next_fd = newfd + 1; | |
107 | ||
108 | error = newfd; | |
109 | ||
110 | out: | |
111 | return error; | |
112 | } | |
113 | ||
114 | static int dupfd(struct file *file, unsigned int start) | |
115 | { | |
116 | struct files_struct * files = current->files; | |
117 | struct fdtable *fdt; | |
118 | int fd; | |
119 | ||
120 | spin_lock(&files->file_lock); | |
121 | fd = locate_fd(files, file, start); | |
122 | if (fd >= 0) { | |
123 | /* locate_fd() may have expanded fdtable, load the ptr */ | |
124 | fdt = files_fdtable(files); | |
125 | FD_SET(fd, fdt->open_fds); | |
126 | FD_CLR(fd, fdt->close_on_exec); | |
127 | spin_unlock(&files->file_lock); | |
128 | fd_install(fd, file); | |
129 | } else { | |
130 | spin_unlock(&files->file_lock); | |
131 | fput(file); | |
132 | } | |
133 | ||
134 | return fd; | |
135 | } | |
136 | ||
137 | asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd) | |
138 | { | |
139 | int err = -EBADF; | |
140 | struct file * file, *tofree; | |
141 | struct files_struct * files = current->files; | |
142 | struct fdtable *fdt; | |
143 | ||
144 | spin_lock(&files->file_lock); | |
145 | if (!(file = fcheck(oldfd))) | |
146 | goto out_unlock; | |
147 | err = newfd; | |
148 | if (newfd == oldfd) | |
149 | goto out_unlock; | |
150 | err = -EBADF; | |
151 | if (newfd >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur) | |
152 | goto out_unlock; | |
153 | get_file(file); /* We are now finished with oldfd */ | |
154 | ||
155 | err = expand_files(files, newfd); | |
156 | if (err < 0) | |
157 | goto out_fput; | |
158 | ||
159 | /* To avoid races with open() and dup(), we will mark the fd as | |
160 | * in-use in the open-file bitmap throughout the entire dup2() | |
161 | * process. This is quite safe: do_close() uses the fd array | |
162 | * entry, not the bitmap, to decide what work needs to be | |
163 | * done. --sct */ | |
164 | /* Doesn't work. open() might be there first. --AV */ | |
165 | ||
166 | /* Yes. It's a race. In user space. Nothing sane to do */ | |
167 | err = -EBUSY; | |
168 | fdt = files_fdtable(files); | |
169 | tofree = fdt->fd[newfd]; | |
170 | if (!tofree && FD_ISSET(newfd, fdt->open_fds)) | |
171 | goto out_fput; | |
172 | ||
173 | rcu_assign_pointer(fdt->fd[newfd], file); | |
174 | FD_SET(newfd, fdt->open_fds); | |
175 | FD_CLR(newfd, fdt->close_on_exec); | |
176 | spin_unlock(&files->file_lock); | |
177 | ||
178 | if (tofree) | |
179 | filp_close(tofree, files); | |
180 | err = newfd; | |
181 | out: | |
182 | return err; | |
183 | out_unlock: | |
184 | spin_unlock(&files->file_lock); | |
185 | goto out; | |
186 | ||
187 | out_fput: | |
188 | spin_unlock(&files->file_lock); | |
189 | fput(file); | |
190 | goto out; | |
191 | } | |
192 | ||
193 | asmlinkage long sys_dup(unsigned int fildes) | |
194 | { | |
195 | int ret = -EBADF; | |
196 | struct file * file = fget(fildes); | |
197 | ||
198 | if (file) | |
199 | ret = dupfd(file, 0); | |
200 | return ret; | |
201 | } | |
202 | ||
203 | #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | FASYNC | O_DIRECT | O_NOATIME) | |
204 | ||
205 | static int setfl(int fd, struct file * filp, unsigned long arg) | |
206 | { | |
207 | struct inode * inode = filp->f_dentry->d_inode; | |
208 | int error = 0; | |
209 | ||
210 | /* | |
211 | * O_APPEND cannot be cleared if the file is marked as append-only | |
212 | * and the file is open for write. | |
213 | */ | |
214 | if (((arg ^ filp->f_flags) & O_APPEND) && IS_APPEND(inode)) | |
215 | return -EPERM; | |
216 | ||
217 | /* O_NOATIME can only be set by the owner or superuser */ | |
218 | if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME)) | |
219 | if (current->fsuid != inode->i_uid && !capable(CAP_FOWNER)) | |
220 | return -EPERM; | |
221 | ||
222 | /* required for strict SunOS emulation */ | |
223 | if (O_NONBLOCK != O_NDELAY) | |
224 | if (arg & O_NDELAY) | |
225 | arg |= O_NONBLOCK; | |
226 | ||
227 | if (arg & O_DIRECT) { | |
228 | if (!filp->f_mapping || !filp->f_mapping->a_ops || | |
229 | !filp->f_mapping->a_ops->direct_IO) | |
230 | return -EINVAL; | |
231 | } | |
232 | ||
233 | if (filp->f_op && filp->f_op->check_flags) | |
234 | error = filp->f_op->check_flags(arg); | |
235 | if (error) | |
236 | return error; | |
237 | ||
238 | lock_kernel(); | |
239 | if ((arg ^ filp->f_flags) & FASYNC) { | |
240 | if (filp->f_op && filp->f_op->fasync) { | |
241 | error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0); | |
242 | if (error < 0) | |
243 | goto out; | |
244 | } | |
245 | } | |
246 | ||
247 | filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK); | |
248 | out: | |
249 | unlock_kernel(); | |
250 | return error; | |
251 | } | |
252 | ||
253 | static void f_modown(struct file *filp, struct pid *pid, enum pid_type type, | |
254 | uid_t uid, uid_t euid, int force) | |
255 | { | |
256 | write_lock_irq(&filp->f_owner.lock); | |
257 | if (force || !filp->f_owner.pid) { | |
258 | put_pid(filp->f_owner.pid); | |
259 | filp->f_owner.pid = get_pid(pid); | |
260 | filp->f_owner.pid_type = type; | |
261 | filp->f_owner.uid = uid; | |
262 | filp->f_owner.euid = euid; | |
263 | } | |
264 | write_unlock_irq(&filp->f_owner.lock); | |
265 | } | |
266 | ||
267 | int __f_setown(struct file *filp, struct pid *pid, enum pid_type type, | |
268 | int force) | |
269 | { | |
270 | int err; | |
271 | ||
272 | err = security_file_set_fowner(filp); | |
273 | if (err) | |
274 | return err; | |
275 | ||
276 | f_modown(filp, pid, type, current->uid, current->euid, force); | |
277 | return 0; | |
278 | } | |
279 | EXPORT_SYMBOL(__f_setown); | |
280 | ||
281 | int f_setown(struct file *filp, unsigned long arg, int force) | |
282 | { | |
283 | enum pid_type type; | |
284 | struct pid *pid; | |
285 | int who = arg; | |
286 | int result; | |
287 | type = PIDTYPE_PID; | |
288 | if (who < 0) { | |
289 | type = PIDTYPE_PGID; | |
290 | who = -who; | |
291 | } | |
292 | rcu_read_lock(); | |
293 | pid = find_pid(who); | |
294 | result = __f_setown(filp, pid, type, force); | |
295 | rcu_read_unlock(); | |
296 | return result; | |
297 | } | |
298 | EXPORT_SYMBOL(f_setown); | |
299 | ||
300 | void f_delown(struct file *filp) | |
301 | { | |
302 | f_modown(filp, NULL, PIDTYPE_PID, 0, 0, 1); | |
303 | } | |
304 | ||
305 | pid_t f_getown(struct file *filp) | |
306 | { | |
307 | pid_t pid; | |
308 | read_lock(&filp->f_owner.lock); | |
309 | pid = pid_nr(filp->f_owner.pid); | |
310 | if (filp->f_owner.pid_type == PIDTYPE_PGID) | |
311 | pid = -pid; | |
312 | read_unlock(&filp->f_owner.lock); | |
313 | return pid; | |
314 | } | |
315 | ||
316 | static long do_fcntl(int fd, unsigned int cmd, unsigned long arg, | |
317 | struct file *filp) | |
318 | { | |
319 | long err = -EINVAL; | |
320 | ||
321 | switch (cmd) { | |
322 | case F_DUPFD: | |
323 | get_file(filp); | |
324 | err = dupfd(filp, arg); | |
325 | break; | |
326 | case F_GETFD: | |
327 | err = get_close_on_exec(fd) ? FD_CLOEXEC : 0; | |
328 | break; | |
329 | case F_SETFD: | |
330 | err = 0; | |
331 | set_close_on_exec(fd, arg & FD_CLOEXEC); | |
332 | break; | |
333 | case F_GETFL: | |
334 | err = filp->f_flags; | |
335 | break; | |
336 | case F_SETFL: | |
337 | err = setfl(fd, filp, arg); | |
338 | break; | |
339 | case F_GETLK: | |
340 | err = fcntl_getlk(filp, (struct flock __user *) arg); | |
341 | break; | |
342 | case F_SETLK: | |
343 | case F_SETLKW: | |
344 | err = fcntl_setlk(fd, filp, cmd, (struct flock __user *) arg); | |
345 | break; | |
346 | case F_GETOWN: | |
347 | /* | |
348 | * XXX If f_owner is a process group, the | |
349 | * negative return value will get converted | |
350 | * into an error. Oops. If we keep the | |
351 | * current syscall conventions, the only way | |
352 | * to fix this will be in libc. | |
353 | */ | |
354 | err = f_getown(filp); | |
355 | force_successful_syscall_return(); | |
356 | break; | |
357 | case F_SETOWN: | |
358 | err = f_setown(filp, arg, 1); | |
359 | break; | |
360 | case F_GETSIG: | |
361 | err = filp->f_owner.signum; | |
362 | break; | |
363 | case F_SETSIG: | |
364 | /* arg == 0 restores default behaviour. */ | |
365 | if (!valid_signal(arg)) { | |
366 | break; | |
367 | } | |
368 | err = 0; | |
369 | filp->f_owner.signum = arg; | |
370 | break; | |
371 | case F_GETLEASE: | |
372 | err = fcntl_getlease(filp); | |
373 | break; | |
374 | case F_SETLEASE: | |
375 | err = fcntl_setlease(fd, filp, arg); | |
376 | break; | |
377 | case F_NOTIFY: | |
378 | err = fcntl_dirnotify(fd, filp, arg); | |
379 | break; | |
380 | default: | |
381 | break; | |
382 | } | |
383 | return err; | |
384 | } | |
385 | ||
386 | asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg) | |
387 | { | |
388 | struct file *filp; | |
389 | long err = -EBADF; | |
390 | ||
391 | filp = fget(fd); | |
392 | if (!filp) | |
393 | goto out; | |
394 | ||
395 | err = security_file_fcntl(filp, cmd, arg); | |
396 | if (err) { | |
397 | fput(filp); | |
398 | return err; | |
399 | } | |
400 | ||
401 | err = do_fcntl(fd, cmd, arg, filp); | |
402 | ||
403 | fput(filp); | |
404 | out: | |
405 | return err; | |
406 | } | |
407 | ||
408 | #if BITS_PER_LONG == 32 | |
409 | asmlinkage long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg) | |
410 | { | |
411 | struct file * filp; | |
412 | long err; | |
413 | ||
414 | err = -EBADF; | |
415 | filp = fget(fd); | |
416 | if (!filp) | |
417 | goto out; | |
418 | ||
419 | err = security_file_fcntl(filp, cmd, arg); | |
420 | if (err) { | |
421 | fput(filp); | |
422 | return err; | |
423 | } | |
424 | err = -EBADF; | |
425 | ||
426 | switch (cmd) { | |
427 | case F_GETLK64: | |
428 | err = fcntl_getlk64(filp, (struct flock64 __user *) arg); | |
429 | break; | |
430 | case F_SETLK64: | |
431 | case F_SETLKW64: | |
432 | err = fcntl_setlk64(fd, filp, cmd, | |
433 | (struct flock64 __user *) arg); | |
434 | break; | |
435 | default: | |
436 | err = do_fcntl(fd, cmd, arg, filp); | |
437 | break; | |
438 | } | |
439 | fput(filp); | |
440 | out: | |
441 | return err; | |
442 | } | |
443 | #endif | |
444 | ||
445 | /* Table to convert sigio signal codes into poll band bitmaps */ | |
446 | ||
447 | static const long band_table[NSIGPOLL] = { | |
448 | POLLIN | POLLRDNORM, /* POLL_IN */ | |
449 | POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */ | |
450 | POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */ | |
451 | POLLERR, /* POLL_ERR */ | |
452 | POLLPRI | POLLRDBAND, /* POLL_PRI */ | |
453 | POLLHUP | POLLERR /* POLL_HUP */ | |
454 | }; | |
455 | ||
456 | static inline int sigio_perm(struct task_struct *p, | |
457 | struct fown_struct *fown, int sig) | |
458 | { | |
459 | return (((fown->euid == 0) || | |
460 | (fown->euid == p->suid) || (fown->euid == p->uid) || | |
461 | (fown->uid == p->suid) || (fown->uid == p->uid)) && | |
462 | !security_file_send_sigiotask(p, fown, sig)); | |
463 | } | |
464 | ||
465 | static void send_sigio_to_task(struct task_struct *p, | |
466 | struct fown_struct *fown, | |
467 | int fd, | |
468 | int reason) | |
469 | { | |
470 | if (!sigio_perm(p, fown, fown->signum)) | |
471 | return; | |
472 | ||
473 | switch (fown->signum) { | |
474 | siginfo_t si; | |
475 | default: | |
476 | /* Queue a rt signal with the appropriate fd as its | |
477 | value. We use SI_SIGIO as the source, not | |
478 | SI_KERNEL, since kernel signals always get | |
479 | delivered even if we can't queue. Failure to | |
480 | queue in this case _should_ be reported; we fall | |
481 | back to SIGIO in that case. --sct */ | |
482 | si.si_signo = fown->signum; | |
483 | si.si_errno = 0; | |
484 | si.si_code = reason; | |
485 | /* Make sure we are called with one of the POLL_* | |
486 | reasons, otherwise we could leak kernel stack into | |
487 | userspace. */ | |
488 | BUG_ON((reason & __SI_MASK) != __SI_POLL); | |
489 | if (reason - POLL_IN >= NSIGPOLL) | |
490 | si.si_band = ~0L; | |
491 | else | |
492 | si.si_band = band_table[reason - POLL_IN]; | |
493 | si.si_fd = fd; | |
494 | if (!group_send_sig_info(fown->signum, &si, p)) | |
495 | break; | |
496 | /* fall-through: fall back on the old plain SIGIO signal */ | |
497 | case 0: | |
498 | group_send_sig_info(SIGIO, SEND_SIG_PRIV, p); | |
499 | } | |
500 | } | |
501 | ||
502 | void send_sigio(struct fown_struct *fown, int fd, int band) | |
503 | { | |
504 | struct task_struct *p; | |
505 | enum pid_type type; | |
506 | struct pid *pid; | |
507 | ||
508 | read_lock(&fown->lock); | |
509 | type = fown->pid_type; | |
510 | pid = fown->pid; | |
511 | if (!pid) | |
512 | goto out_unlock_fown; | |
513 | ||
514 | read_lock(&tasklist_lock); | |
515 | do_each_pid_task(pid, type, p) { | |
516 | send_sigio_to_task(p, fown, fd, band); | |
517 | } while_each_pid_task(pid, type, p); | |
518 | read_unlock(&tasklist_lock); | |
519 | out_unlock_fown: | |
520 | read_unlock(&fown->lock); | |
521 | } | |
522 | ||
523 | static void send_sigurg_to_task(struct task_struct *p, | |
524 | struct fown_struct *fown) | |
525 | { | |
526 | if (sigio_perm(p, fown, SIGURG)) | |
527 | group_send_sig_info(SIGURG, SEND_SIG_PRIV, p); | |
528 | } | |
529 | ||
530 | int send_sigurg(struct fown_struct *fown) | |
531 | { | |
532 | struct task_struct *p; | |
533 | enum pid_type type; | |
534 | struct pid *pid; | |
535 | int ret = 0; | |
536 | ||
537 | read_lock(&fown->lock); | |
538 | type = fown->pid_type; | |
539 | pid = fown->pid; | |
540 | if (!pid) | |
541 | goto out_unlock_fown; | |
542 | ||
543 | ret = 1; | |
544 | ||
545 | read_lock(&tasklist_lock); | |
546 | do_each_pid_task(pid, type, p) { | |
547 | send_sigurg_to_task(p, fown); | |
548 | } while_each_pid_task(pid, type, p); | |
549 | read_unlock(&tasklist_lock); | |
550 | out_unlock_fown: | |
551 | read_unlock(&fown->lock); | |
552 | return ret; | |
553 | } | |
554 | ||
555 | static DEFINE_RWLOCK(fasync_lock); | |
556 | static kmem_cache_t *fasync_cache __read_mostly; | |
557 | ||
558 | /* | |
559 | * fasync_helper() is used by some character device drivers (mainly mice) | |
560 | * to set up the fasync queue. It returns negative on error, 0 if it did | |
561 | * no changes and positive if it added/deleted the entry. | |
562 | */ | |
563 | int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp) | |
564 | { | |
565 | struct fasync_struct *fa, **fp; | |
566 | struct fasync_struct *new = NULL; | |
567 | int result = 0; | |
568 | ||
569 | if (on) { | |
570 | new = kmem_cache_alloc(fasync_cache, SLAB_KERNEL); | |
571 | if (!new) | |
572 | return -ENOMEM; | |
573 | } | |
574 | write_lock_irq(&fasync_lock); | |
575 | for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) { | |
576 | if (fa->fa_file == filp) { | |
577 | if(on) { | |
578 | fa->fa_fd = fd; | |
579 | kmem_cache_free(fasync_cache, new); | |
580 | } else { | |
581 | *fp = fa->fa_next; | |
582 | kmem_cache_free(fasync_cache, fa); | |
583 | result = 1; | |
584 | } | |
585 | goto out; | |
586 | } | |
587 | } | |
588 | ||
589 | if (on) { | |
590 | new->magic = FASYNC_MAGIC; | |
591 | new->fa_file = filp; | |
592 | new->fa_fd = fd; | |
593 | new->fa_next = *fapp; | |
594 | *fapp = new; | |
595 | result = 1; | |
596 | } | |
597 | out: | |
598 | write_unlock_irq(&fasync_lock); | |
599 | return result; | |
600 | } | |
601 | ||
602 | EXPORT_SYMBOL(fasync_helper); | |
603 | ||
604 | void __kill_fasync(struct fasync_struct *fa, int sig, int band) | |
605 | { | |
606 | while (fa) { | |
607 | struct fown_struct * fown; | |
608 | if (fa->magic != FASYNC_MAGIC) { | |
609 | printk(KERN_ERR "kill_fasync: bad magic number in " | |
610 | "fasync_struct!\n"); | |
611 | return; | |
612 | } | |
613 | fown = &fa->fa_file->f_owner; | |
614 | /* Don't send SIGURG to processes which have not set a | |
615 | queued signum: SIGURG has its own default signalling | |
616 | mechanism. */ | |
617 | if (!(sig == SIGURG && fown->signum == 0)) | |
618 | send_sigio(fown, fa->fa_fd, band); | |
619 | fa = fa->fa_next; | |
620 | } | |
621 | } | |
622 | ||
623 | EXPORT_SYMBOL(__kill_fasync); | |
624 | ||
625 | void kill_fasync(struct fasync_struct **fp, int sig, int band) | |
626 | { | |
627 | /* First a quick test without locking: usually | |
628 | * the list is empty. | |
629 | */ | |
630 | if (*fp) { | |
631 | read_lock(&fasync_lock); | |
632 | /* reread *fp after obtaining the lock */ | |
633 | __kill_fasync(*fp, sig, band); | |
634 | read_unlock(&fasync_lock); | |
635 | } | |
636 | } | |
637 | EXPORT_SYMBOL(kill_fasync); | |
638 | ||
639 | static int __init fasync_init(void) | |
640 | { | |
641 | fasync_cache = kmem_cache_create("fasync_cache", | |
642 | sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL, NULL); | |
643 | return 0; | |
644 | } | |
645 | ||
646 | module_init(fasync_init) |