]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - fs/kernfs/file.c
Merge branch 'cgroup/for-4.11-rdmacg' into cgroup/for-4.11
[mirror_ubuntu-bionic-kernel.git] / fs / kernfs / file.c
CommitLineData
b8441ed2
TH
1/*
2 * fs/kernfs/file.c - kernfs file implementation
3 *
4 * Copyright (c) 2001-3 Patrick Mochel
5 * Copyright (c) 2007 SUSE Linux Products GmbH
6 * Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org>
7 *
8 * This file is released under the GPLv2.
9 */
414985ae
TH
10
11#include <linux/fs.h>
12#include <linux/seq_file.h>
13#include <linux/slab.h>
14#include <linux/poll.h>
15#include <linux/pagemap.h>
414985ae 16#include <linux/sched.h>
d911d987 17#include <linux/fsnotify.h>
414985ae
TH
18
19#include "kernfs-internal.h"
20
21/*
c525aadd 22 * There's one kernfs_open_file for each open file and one kernfs_open_node
324a56e1 23 * for each kernfs_node with one or more open files.
414985ae 24 *
c525aadd
TH
25 * kernfs_node->attr.open points to kernfs_open_node. attr.open is
26 * protected by kernfs_open_node_lock.
414985ae
TH
27 *
28 * filp->private_data points to seq_file whose ->private points to
c525aadd
TH
29 * kernfs_open_file. kernfs_open_files are chained at
30 * kernfs_open_node->files, which is protected by kernfs_open_file_mutex.
414985ae 31 */
c525aadd
TH
32static DEFINE_SPINLOCK(kernfs_open_node_lock);
33static DEFINE_MUTEX(kernfs_open_file_mutex);
414985ae 34
c525aadd 35struct kernfs_open_node {
414985ae
TH
36 atomic_t refcnt;
37 atomic_t event;
38 wait_queue_head_t poll;
c525aadd 39 struct list_head files; /* goes through kernfs_open_file.list */
414985ae
TH
40};
41
ecca47ce
TH
42/*
43 * kernfs_notify() may be called from any context and bounces notifications
44 * through a work item. To minimize space overhead in kernfs_node, the
45 * pending queue is implemented as a singly linked list of kernfs_nodes.
46 * The list is terminated with the self pointer so that whether a
47 * kernfs_node is on the list or not can be determined by testing the next
48 * pointer for NULL.
49 */
50#define KERNFS_NOTIFY_EOL ((void *)&kernfs_notify_list)
51
52static DEFINE_SPINLOCK(kernfs_notify_lock);
53static struct kernfs_node *kernfs_notify_list = KERNFS_NOTIFY_EOL;
54
c525aadd 55static struct kernfs_open_file *kernfs_of(struct file *file)
414985ae
TH
56{
57 return ((struct seq_file *)file->private_data)->private;
58}
59
60/*
324a56e1 61 * Determine the kernfs_ops for the given kernfs_node. This function must
414985ae
TH
62 * be called while holding an active reference.
63 */
324a56e1 64static const struct kernfs_ops *kernfs_ops(struct kernfs_node *kn)
414985ae 65{
df23fc39 66 if (kn->flags & KERNFS_LOCKDEP)
324a56e1 67 lockdep_assert_held(kn);
adc5e8b5 68 return kn->attr.ops;
414985ae
TH
69}
70
bb305947
TH
71/*
72 * As kernfs_seq_stop() is also called after kernfs_seq_start() or
73 * kernfs_seq_next() failure, it needs to distinguish whether it's stopping
74 * a seq_file iteration which is fully initialized with an active reference
75 * or an aborted kernfs_seq_start() due to get_active failure. The
76 * position pointer is the only context for each seq_file iteration and
77 * thus the stop condition should be encoded in it. As the return value is
78 * directly visible to userland, ERR_PTR(-ENODEV) is the only acceptable
79 * choice to indicate get_active failure.
80 *
81 * Unfortunately, this is complicated due to the optional custom seq_file
82 * operations which may return ERR_PTR(-ENODEV) too. kernfs_seq_stop()
83 * can't distinguish whether ERR_PTR(-ENODEV) is from get_active failure or
84 * custom seq_file operations and thus can't decide whether put_active
85 * should be performed or not only on ERR_PTR(-ENODEV).
86 *
87 * This is worked around by factoring out the custom seq_stop() and
88 * put_active part into kernfs_seq_stop_active(), skipping it from
89 * kernfs_seq_stop() if ERR_PTR(-ENODEV) while invoking it directly after
90 * custom seq_file operations fail with ERR_PTR(-ENODEV) - this ensures
91 * that kernfs_seq_stop_active() is skipped only after get_active failure.
92 */
93static void kernfs_seq_stop_active(struct seq_file *sf, void *v)
94{
95 struct kernfs_open_file *of = sf->private;
96 const struct kernfs_ops *ops = kernfs_ops(of->kn);
97
98 if (ops->seq_stop)
99 ops->seq_stop(sf, v);
100 kernfs_put_active(of->kn);
101}
102
414985ae
TH
103static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos)
104{
c525aadd 105 struct kernfs_open_file *of = sf->private;
414985ae
TH
106 const struct kernfs_ops *ops;
107
108 /*
2b75869b 109 * @of->mutex nests outside active ref and is primarily to ensure that
414985ae
TH
110 * the ops aren't called concurrently for the same open file.
111 */
112 mutex_lock(&of->mutex);
c637b8ac 113 if (!kernfs_get_active(of->kn))
414985ae
TH
114 return ERR_PTR(-ENODEV);
115
324a56e1 116 ops = kernfs_ops(of->kn);
414985ae 117 if (ops->seq_start) {
bb305947
TH
118 void *next = ops->seq_start(sf, ppos);
119 /* see the comment above kernfs_seq_stop_active() */
120 if (next == ERR_PTR(-ENODEV))
121 kernfs_seq_stop_active(sf, next);
122 return next;
414985ae
TH
123 } else {
124 /*
125 * The same behavior and code as single_open(). Returns
126 * !NULL if pos is at the beginning; otherwise, NULL.
127 */
128 return NULL + !*ppos;
129 }
130}
131
132static void *kernfs_seq_next(struct seq_file *sf, void *v, loff_t *ppos)
133{
c525aadd 134 struct kernfs_open_file *of = sf->private;
324a56e1 135 const struct kernfs_ops *ops = kernfs_ops(of->kn);
414985ae
TH
136
137 if (ops->seq_next) {
bb305947
TH
138 void *next = ops->seq_next(sf, v, ppos);
139 /* see the comment above kernfs_seq_stop_active() */
140 if (next == ERR_PTR(-ENODEV))
141 kernfs_seq_stop_active(sf, next);
142 return next;
414985ae
TH
143 } else {
144 /*
145 * The same behavior and code as single_open(), always
146 * terminate after the initial read.
147 */
148 ++*ppos;
149 return NULL;
150 }
151}
152
153static void kernfs_seq_stop(struct seq_file *sf, void *v)
154{
c525aadd 155 struct kernfs_open_file *of = sf->private;
414985ae 156
bb305947
TH
157 if (v != ERR_PTR(-ENODEV))
158 kernfs_seq_stop_active(sf, v);
414985ae
TH
159 mutex_unlock(&of->mutex);
160}
161
162static int kernfs_seq_show(struct seq_file *sf, void *v)
163{
c525aadd 164 struct kernfs_open_file *of = sf->private;
414985ae 165
adc5e8b5 166 of->event = atomic_read(&of->kn->attr.open->event);
414985ae 167
adc5e8b5 168 return of->kn->attr.ops->seq_show(sf, v);
414985ae
TH
169}
170
171static const struct seq_operations kernfs_seq_ops = {
172 .start = kernfs_seq_start,
173 .next = kernfs_seq_next,
174 .stop = kernfs_seq_stop,
175 .show = kernfs_seq_show,
176};
177
178/*
179 * As reading a bin file can have side-effects, the exact offset and bytes
180 * specified in read(2) call should be passed to the read callback making
181 * it difficult to use seq_file. Implement simplistic custom buffering for
182 * bin files.
183 */
c525aadd 184static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
414985ae
TH
185 char __user *user_buf, size_t count,
186 loff_t *ppos)
187{
188 ssize_t len = min_t(size_t, count, PAGE_SIZE);
189 const struct kernfs_ops *ops;
190 char *buf;
191
4ef67a8c 192 buf = of->prealloc_buf;
e4234a1f
CW
193 if (buf)
194 mutex_lock(&of->prealloc_mutex);
195 else
4ef67a8c 196 buf = kmalloc(len, GFP_KERNEL);
414985ae
TH
197 if (!buf)
198 return -ENOMEM;
199
200 /*
4ef67a8c 201 * @of->mutex nests outside active ref and is used both to ensure that
e4234a1f 202 * the ops aren't called concurrently for the same open file.
414985ae
TH
203 */
204 mutex_lock(&of->mutex);
c637b8ac 205 if (!kernfs_get_active(of->kn)) {
414985ae
TH
206 len = -ENODEV;
207 mutex_unlock(&of->mutex);
208 goto out_free;
209 }
210
7cff4b18 211 of->event = atomic_read(&of->kn->attr.open->event);
324a56e1 212 ops = kernfs_ops(of->kn);
414985ae
TH
213 if (ops->read)
214 len = ops->read(of, buf, len, *ppos);
215 else
216 len = -EINVAL;
217
e4234a1f
CW
218 kernfs_put_active(of->kn);
219 mutex_unlock(&of->mutex);
220
414985ae 221 if (len < 0)
e4234a1f 222 goto out_free;
414985ae
TH
223
224 if (copy_to_user(user_buf, buf, len)) {
225 len = -EFAULT;
e4234a1f 226 goto out_free;
414985ae
TH
227 }
228
229 *ppos += len;
230
231 out_free:
e4234a1f
CW
232 if (buf == of->prealloc_buf)
233 mutex_unlock(&of->prealloc_mutex);
234 else
4ef67a8c 235 kfree(buf);
414985ae
TH
236 return len;
237}
238
239/**
c637b8ac 240 * kernfs_fop_read - kernfs vfs read callback
414985ae
TH
241 * @file: file pointer
242 * @user_buf: data to write
243 * @count: number of bytes
244 * @ppos: starting offset
245 */
c637b8ac
TH
246static ssize_t kernfs_fop_read(struct file *file, char __user *user_buf,
247 size_t count, loff_t *ppos)
414985ae 248{
c525aadd 249 struct kernfs_open_file *of = kernfs_of(file);
414985ae 250
df23fc39 251 if (of->kn->flags & KERNFS_HAS_SEQ_SHOW)
414985ae
TH
252 return seq_read(file, user_buf, count, ppos);
253 else
254 return kernfs_file_direct_read(of, user_buf, count, ppos);
255}
256
257/**
c637b8ac 258 * kernfs_fop_write - kernfs vfs write callback
414985ae
TH
259 * @file: file pointer
260 * @user_buf: data to write
261 * @count: number of bytes
262 * @ppos: starting offset
263 *
264 * Copy data in from userland and pass it to the matching kernfs write
265 * operation.
266 *
267 * There is no easy way for us to know if userspace is only doing a partial
268 * write, so we don't support them. We expect the entire buffer to come on
269 * the first write. Hint: if you're writing a value, first read the file,
270 * modify only the the value you're changing, then write entire buffer
271 * back.
272 */
c637b8ac
TH
273static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
274 size_t count, loff_t *ppos)
414985ae 275{
c525aadd 276 struct kernfs_open_file *of = kernfs_of(file);
414985ae 277 const struct kernfs_ops *ops;
b7ce40cf
TH
278 size_t len;
279 char *buf;
4d3773c4 280
b7ce40cf 281 if (of->atomic_write_len) {
4d3773c4 282 len = count;
b7ce40cf
TH
283 if (len > of->atomic_write_len)
284 return -E2BIG;
4d3773c4
TH
285 } else {
286 len = min_t(size_t, count, PAGE_SIZE);
287 }
288
2b75869b 289 buf = of->prealloc_buf;
e4234a1f
CW
290 if (buf)
291 mutex_lock(&of->prealloc_mutex);
292 else
2b75869b 293 buf = kmalloc(len + 1, GFP_KERNEL);
b7ce40cf
TH
294 if (!buf)
295 return -ENOMEM;
414985ae 296
e4234a1f
CW
297 if (copy_from_user(buf, user_buf, len)) {
298 len = -EFAULT;
299 goto out_free;
300 }
301 buf[len] = '\0'; /* guarantee string termination */
302
b7ce40cf 303 /*
2b75869b 304 * @of->mutex nests outside active ref and is used both to ensure that
e4234a1f 305 * the ops aren't called concurrently for the same open file.
b7ce40cf
TH
306 */
307 mutex_lock(&of->mutex);
308 if (!kernfs_get_active(of->kn)) {
309 mutex_unlock(&of->mutex);
310 len = -ENODEV;
311 goto out_free;
312 }
313
314 ops = kernfs_ops(of->kn);
315 if (ops->write)
316 len = ops->write(of, buf, len, *ppos);
317 else
318 len = -EINVAL;
319
e4234a1f
CW
320 kernfs_put_active(of->kn);
321 mutex_unlock(&of->mutex);
322
414985ae
TH
323 if (len > 0)
324 *ppos += len;
2b75869b 325
b7ce40cf 326out_free:
e4234a1f
CW
327 if (buf == of->prealloc_buf)
328 mutex_unlock(&of->prealloc_mutex);
329 else
2b75869b 330 kfree(buf);
414985ae
TH
331 return len;
332}
333
334static void kernfs_vma_open(struct vm_area_struct *vma)
335{
336 struct file *file = vma->vm_file;
c525aadd 337 struct kernfs_open_file *of = kernfs_of(file);
414985ae
TH
338
339 if (!of->vm_ops)
340 return;
341
c637b8ac 342 if (!kernfs_get_active(of->kn))
414985ae
TH
343 return;
344
345 if (of->vm_ops->open)
346 of->vm_ops->open(vma);
347
c637b8ac 348 kernfs_put_active(of->kn);
414985ae
TH
349}
350
351static int kernfs_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
352{
353 struct file *file = vma->vm_file;
c525aadd 354 struct kernfs_open_file *of = kernfs_of(file);
414985ae
TH
355 int ret;
356
357 if (!of->vm_ops)
358 return VM_FAULT_SIGBUS;
359
c637b8ac 360 if (!kernfs_get_active(of->kn))
414985ae
TH
361 return VM_FAULT_SIGBUS;
362
363 ret = VM_FAULT_SIGBUS;
364 if (of->vm_ops->fault)
365 ret = of->vm_ops->fault(vma, vmf);
366
c637b8ac 367 kernfs_put_active(of->kn);
414985ae
TH
368 return ret;
369}
370
371static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
372 struct vm_fault *vmf)
373{
374 struct file *file = vma->vm_file;
c525aadd 375 struct kernfs_open_file *of = kernfs_of(file);
414985ae
TH
376 int ret;
377
378 if (!of->vm_ops)
379 return VM_FAULT_SIGBUS;
380
c637b8ac 381 if (!kernfs_get_active(of->kn))
414985ae
TH
382 return VM_FAULT_SIGBUS;
383
384 ret = 0;
385 if (of->vm_ops->page_mkwrite)
386 ret = of->vm_ops->page_mkwrite(vma, vmf);
387 else
388 file_update_time(file);
389
c637b8ac 390 kernfs_put_active(of->kn);
414985ae
TH
391 return ret;
392}
393
394static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
395 void *buf, int len, int write)
396{
397 struct file *file = vma->vm_file;
c525aadd 398 struct kernfs_open_file *of = kernfs_of(file);
414985ae
TH
399 int ret;
400
401 if (!of->vm_ops)
402 return -EINVAL;
403
c637b8ac 404 if (!kernfs_get_active(of->kn))
414985ae
TH
405 return -EINVAL;
406
407 ret = -EINVAL;
408 if (of->vm_ops->access)
409 ret = of->vm_ops->access(vma, addr, buf, len, write);
410
c637b8ac 411 kernfs_put_active(of->kn);
414985ae
TH
412 return ret;
413}
414
415#ifdef CONFIG_NUMA
416static int kernfs_vma_set_policy(struct vm_area_struct *vma,
417 struct mempolicy *new)
418{
419 struct file *file = vma->vm_file;
c525aadd 420 struct kernfs_open_file *of = kernfs_of(file);
414985ae
TH
421 int ret;
422
423 if (!of->vm_ops)
424 return 0;
425
c637b8ac 426 if (!kernfs_get_active(of->kn))
414985ae
TH
427 return -EINVAL;
428
429 ret = 0;
430 if (of->vm_ops->set_policy)
431 ret = of->vm_ops->set_policy(vma, new);
432
c637b8ac 433 kernfs_put_active(of->kn);
414985ae
TH
434 return ret;
435}
436
437static struct mempolicy *kernfs_vma_get_policy(struct vm_area_struct *vma,
438 unsigned long addr)
439{
440 struct file *file = vma->vm_file;
c525aadd 441 struct kernfs_open_file *of = kernfs_of(file);
414985ae
TH
442 struct mempolicy *pol;
443
444 if (!of->vm_ops)
445 return vma->vm_policy;
446
c637b8ac 447 if (!kernfs_get_active(of->kn))
414985ae
TH
448 return vma->vm_policy;
449
450 pol = vma->vm_policy;
451 if (of->vm_ops->get_policy)
452 pol = of->vm_ops->get_policy(vma, addr);
453
c637b8ac 454 kernfs_put_active(of->kn);
414985ae
TH
455 return pol;
456}
457
414985ae
TH
458#endif
459
460static const struct vm_operations_struct kernfs_vm_ops = {
461 .open = kernfs_vma_open,
462 .fault = kernfs_vma_fault,
463 .page_mkwrite = kernfs_vma_page_mkwrite,
464 .access = kernfs_vma_access,
465#ifdef CONFIG_NUMA
466 .set_policy = kernfs_vma_set_policy,
467 .get_policy = kernfs_vma_get_policy,
414985ae
TH
468#endif
469};
470
c637b8ac 471static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma)
414985ae 472{
c525aadd 473 struct kernfs_open_file *of = kernfs_of(file);
414985ae
TH
474 const struct kernfs_ops *ops;
475 int rc;
476
9b2db6e1
TH
477 /*
478 * mmap path and of->mutex are prone to triggering spurious lockdep
479 * warnings and we don't want to add spurious locking dependency
480 * between the two. Check whether mmap is actually implemented
481 * without grabbing @of->mutex by testing HAS_MMAP flag. See the
482 * comment in kernfs_file_open() for more details.
483 */
df23fc39 484 if (!(of->kn->flags & KERNFS_HAS_MMAP))
9b2db6e1
TH
485 return -ENODEV;
486
414985ae
TH
487 mutex_lock(&of->mutex);
488
489 rc = -ENODEV;
c637b8ac 490 if (!kernfs_get_active(of->kn))
414985ae
TH
491 goto out_unlock;
492
324a56e1 493 ops = kernfs_ops(of->kn);
9b2db6e1 494 rc = ops->mmap(of, vma);
b44b2140
TH
495 if (rc)
496 goto out_put;
414985ae
TH
497
498 /*
499 * PowerPC's pci_mmap of legacy_mem uses shmem_zero_setup()
500 * to satisfy versions of X which crash if the mmap fails: that
501 * substitutes a new vm_file, and we don't then want bin_vm_ops.
502 */
503 if (vma->vm_file != file)
504 goto out_put;
505
506 rc = -EINVAL;
507 if (of->mmapped && of->vm_ops != vma->vm_ops)
508 goto out_put;
509
510 /*
511 * It is not possible to successfully wrap close.
512 * So error if someone is trying to use close.
513 */
514 rc = -EINVAL;
515 if (vma->vm_ops && vma->vm_ops->close)
516 goto out_put;
517
518 rc = 0;
a1d82aff 519 of->mmapped = true;
414985ae
TH
520 of->vm_ops = vma->vm_ops;
521 vma->vm_ops = &kernfs_vm_ops;
522out_put:
c637b8ac 523 kernfs_put_active(of->kn);
414985ae
TH
524out_unlock:
525 mutex_unlock(&of->mutex);
526
527 return rc;
528}
529
530/**
c637b8ac 531 * kernfs_get_open_node - get or create kernfs_open_node
324a56e1 532 * @kn: target kernfs_node
c525aadd 533 * @of: kernfs_open_file for this instance of open
414985ae 534 *
adc5e8b5
TH
535 * If @kn->attr.open exists, increment its reference count; otherwise,
536 * create one. @of is chained to the files list.
414985ae
TH
537 *
538 * LOCKING:
539 * Kernel thread context (may sleep).
540 *
541 * RETURNS:
542 * 0 on success, -errno on failure.
543 */
c637b8ac
TH
544static int kernfs_get_open_node(struct kernfs_node *kn,
545 struct kernfs_open_file *of)
414985ae 546{
c525aadd 547 struct kernfs_open_node *on, *new_on = NULL;
414985ae
TH
548
549 retry:
c525aadd
TH
550 mutex_lock(&kernfs_open_file_mutex);
551 spin_lock_irq(&kernfs_open_node_lock);
414985ae 552
c525aadd
TH
553 if (!kn->attr.open && new_on) {
554 kn->attr.open = new_on;
555 new_on = NULL;
414985ae
TH
556 }
557
c525aadd
TH
558 on = kn->attr.open;
559 if (on) {
560 atomic_inc(&on->refcnt);
561 list_add_tail(&of->list, &on->files);
414985ae
TH
562 }
563
c525aadd
TH
564 spin_unlock_irq(&kernfs_open_node_lock);
565 mutex_unlock(&kernfs_open_file_mutex);
414985ae 566
c525aadd
TH
567 if (on) {
568 kfree(new_on);
414985ae
TH
569 return 0;
570 }
571
572 /* not there, initialize a new one and retry */
c525aadd
TH
573 new_on = kmalloc(sizeof(*new_on), GFP_KERNEL);
574 if (!new_on)
414985ae
TH
575 return -ENOMEM;
576
c525aadd
TH
577 atomic_set(&new_on->refcnt, 0);
578 atomic_set(&new_on->event, 1);
579 init_waitqueue_head(&new_on->poll);
580 INIT_LIST_HEAD(&new_on->files);
414985ae
TH
581 goto retry;
582}
583
584/**
c637b8ac 585 * kernfs_put_open_node - put kernfs_open_node
324a56e1 586 * @kn: target kernfs_nodet
c525aadd 587 * @of: associated kernfs_open_file
414985ae 588 *
adc5e8b5 589 * Put @kn->attr.open and unlink @of from the files list. If
414985ae
TH
590 * reference count reaches zero, disassociate and free it.
591 *
592 * LOCKING:
593 * None.
594 */
c637b8ac
TH
595static void kernfs_put_open_node(struct kernfs_node *kn,
596 struct kernfs_open_file *of)
414985ae 597{
c525aadd 598 struct kernfs_open_node *on = kn->attr.open;
414985ae
TH
599 unsigned long flags;
600
c525aadd
TH
601 mutex_lock(&kernfs_open_file_mutex);
602 spin_lock_irqsave(&kernfs_open_node_lock, flags);
414985ae
TH
603
604 if (of)
605 list_del(&of->list);
606
c525aadd 607 if (atomic_dec_and_test(&on->refcnt))
adc5e8b5 608 kn->attr.open = NULL;
414985ae 609 else
c525aadd 610 on = NULL;
414985ae 611
c525aadd
TH
612 spin_unlock_irqrestore(&kernfs_open_node_lock, flags);
613 mutex_unlock(&kernfs_open_file_mutex);
414985ae 614
c525aadd 615 kfree(on);
414985ae
TH
616}
617
c637b8ac 618static int kernfs_fop_open(struct inode *inode, struct file *file)
414985ae 619{
324a56e1 620 struct kernfs_node *kn = file->f_path.dentry->d_fsdata;
555724a8 621 struct kernfs_root *root = kernfs_root(kn);
414985ae 622 const struct kernfs_ops *ops;
c525aadd 623 struct kernfs_open_file *of;
414985ae
TH
624 bool has_read, has_write, has_mmap;
625 int error = -EACCES;
626
c637b8ac 627 if (!kernfs_get_active(kn))
414985ae
TH
628 return -ENODEV;
629
324a56e1 630 ops = kernfs_ops(kn);
414985ae
TH
631
632 has_read = ops->seq_show || ops->read || ops->mmap;
633 has_write = ops->write || ops->mmap;
634 has_mmap = ops->mmap;
635
555724a8
TH
636 /* see the flag definition for details */
637 if (root->flags & KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK) {
638 if ((file->f_mode & FMODE_WRITE) &&
639 (!(inode->i_mode & S_IWUGO) || !has_write))
640 goto err_out;
414985ae 641
555724a8
TH
642 if ((file->f_mode & FMODE_READ) &&
643 (!(inode->i_mode & S_IRUGO) || !has_read))
644 goto err_out;
645 }
414985ae 646
c525aadd 647 /* allocate a kernfs_open_file for the file */
414985ae 648 error = -ENOMEM;
c525aadd 649 of = kzalloc(sizeof(struct kernfs_open_file), GFP_KERNEL);
414985ae
TH
650 if (!of)
651 goto err_out;
652
653 /*
654 * The following is done to give a different lockdep key to
655 * @of->mutex for files which implement mmap. This is a rather
656 * crude way to avoid false positive lockdep warning around
657 * mm->mmap_sem - mmap nests @of->mutex under mm->mmap_sem and
658 * reading /sys/block/sda/trace/act_mask grabs sr_mutex, under
659 * which mm->mmap_sem nests, while holding @of->mutex. As each
660 * open file has a separate mutex, it's okay as long as those don't
661 * happen on the same file. At this point, we can't easily give
662 * each file a separate locking class. Let's differentiate on
663 * whether the file has mmap or not for now.
9b2db6e1
TH
664 *
665 * Both paths of the branch look the same. They're supposed to
666 * look that way and give @of->mutex different static lockdep keys.
414985ae
TH
667 */
668 if (has_mmap)
669 mutex_init(&of->mutex);
670 else
671 mutex_init(&of->mutex);
672
324a56e1 673 of->kn = kn;
414985ae
TH
674 of->file = file;
675
b7ce40cf
TH
676 /*
677 * Write path needs to atomic_write_len outside active reference.
678 * Cache it in open_file. See kernfs_fop_write() for details.
679 */
680 of->atomic_write_len = ops->atomic_write_len;
681
4ef67a8c
N
682 error = -EINVAL;
683 /*
684 * ->seq_show is incompatible with ->prealloc,
685 * as seq_read does its own allocation.
686 * ->read must be used instead.
687 */
688 if (ops->prealloc && ops->seq_show)
689 goto err_free;
2b75869b
N
690 if (ops->prealloc) {
691 int len = of->atomic_write_len ?: PAGE_SIZE;
692 of->prealloc_buf = kmalloc(len + 1, GFP_KERNEL);
693 error = -ENOMEM;
694 if (!of->prealloc_buf)
695 goto err_free;
e4234a1f 696 mutex_init(&of->prealloc_mutex);
2b75869b
N
697 }
698
414985ae
TH
699 /*
700 * Always instantiate seq_file even if read access doesn't use
701 * seq_file or is not requested. This unifies private data access
702 * and readable regular files are the vast majority anyway.
703 */
704 if (ops->seq_show)
705 error = seq_open(file, &kernfs_seq_ops);
706 else
707 error = seq_open(file, NULL);
708 if (error)
709 goto err_free;
710
0e67db2f
TH
711 of->seq_file = file->private_data;
712 of->seq_file->private = of;
414985ae
TH
713
714 /* seq_file clears PWRITE unconditionally, restore it if WRITE */
715 if (file->f_mode & FMODE_WRITE)
716 file->f_mode |= FMODE_PWRITE;
717
c637b8ac
TH
718 /* make sure we have open node struct */
719 error = kernfs_get_open_node(kn, of);
414985ae 720 if (error)
0e67db2f
TH
721 goto err_seq_release;
722
723 if (ops->open) {
724 /* nobody has access to @of yet, skip @of->mutex */
725 error = ops->open(of);
726 if (error)
727 goto err_put_node;
728 }
414985ae
TH
729
730 /* open succeeded, put active references */
c637b8ac 731 kernfs_put_active(kn);
414985ae
TH
732 return 0;
733
0e67db2f
TH
734err_put_node:
735 kernfs_put_open_node(kn, of);
736err_seq_release:
414985ae
TH
737 seq_release(inode, file);
738err_free:
2b75869b 739 kfree(of->prealloc_buf);
414985ae
TH
740 kfree(of);
741err_out:
c637b8ac 742 kernfs_put_active(kn);
414985ae
TH
743 return error;
744}
745
0e67db2f
TH
746/* used from release/drain to ensure that ->release() is called exactly once */
747static void kernfs_release_file(struct kernfs_node *kn,
748 struct kernfs_open_file *of)
749{
750 if (!(kn->flags & KERNFS_HAS_RELEASE))
751 return;
752
753 mutex_lock(&of->mutex);
754 if (!of->released) {
755 /*
756 * A file is never detached without being released and we
757 * need to be able to release files which are deactivated
758 * and being drained. Don't use kernfs_ops().
759 */
760 kn->attr.ops->release(of);
761 of->released = true;
762 }
763 mutex_unlock(&of->mutex);
764}
765
c637b8ac 766static int kernfs_fop_release(struct inode *inode, struct file *filp)
414985ae 767{
324a56e1 768 struct kernfs_node *kn = filp->f_path.dentry->d_fsdata;
c525aadd 769 struct kernfs_open_file *of = kernfs_of(filp);
414985ae 770
0e67db2f 771 kernfs_release_file(kn, of);
c637b8ac 772 kernfs_put_open_node(kn, of);
414985ae 773 seq_release(inode, filp);
2b75869b 774 kfree(of->prealloc_buf);
414985ae
TH
775 kfree(of);
776
777 return 0;
778}
779
0e67db2f 780void kernfs_drain_open_files(struct kernfs_node *kn)
414985ae 781{
c525aadd
TH
782 struct kernfs_open_node *on;
783 struct kernfs_open_file *of;
414985ae 784
0e67db2f 785 if (!(kn->flags & (KERNFS_HAS_MMAP | KERNFS_HAS_RELEASE)))
55f6e30d
GKH
786 return;
787
c525aadd
TH
788 spin_lock_irq(&kernfs_open_node_lock);
789 on = kn->attr.open;
790 if (on)
791 atomic_inc(&on->refcnt);
792 spin_unlock_irq(&kernfs_open_node_lock);
793 if (!on)
414985ae
TH
794 return;
795
c525aadd 796 mutex_lock(&kernfs_open_file_mutex);
0e67db2f 797
c525aadd 798 list_for_each_entry(of, &on->files, list) {
414985ae 799 struct inode *inode = file_inode(of->file);
0e67db2f
TH
800
801 if (kn->flags & KERNFS_HAS_MMAP)
802 unmap_mapping_range(inode->i_mapping, 0, 0, 1);
803
804 kernfs_release_file(kn, of);
414985ae 805 }
0e67db2f 806
c525aadd 807 mutex_unlock(&kernfs_open_file_mutex);
414985ae 808
c637b8ac 809 kernfs_put_open_node(kn, NULL);
414985ae
TH
810}
811
c637b8ac
TH
812/*
813 * Kernfs attribute files are pollable. The idea is that you read
414985ae
TH
814 * the content and then you use 'poll' or 'select' to wait for
815 * the content to change. When the content changes (assuming the
816 * manager for the kobject supports notification), poll will
817 * return POLLERR|POLLPRI, and select will return the fd whether
818 * it is waiting for read, write, or exceptions.
819 * Once poll/select indicates that the value has changed, you
820 * need to close and re-open the file, or seek to 0 and read again.
821 * Reminder: this only works for attributes which actively support
822 * it, and it is not possible to test an attribute from userspace
823 * to see if it supports poll (Neither 'poll' nor 'select' return
824 * an appropriate error code). When in doubt, set a suitable timeout value.
825 */
c637b8ac 826static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
414985ae 827{
c525aadd 828 struct kernfs_open_file *of = kernfs_of(filp);
324a56e1 829 struct kernfs_node *kn = filp->f_path.dentry->d_fsdata;
c525aadd 830 struct kernfs_open_node *on = kn->attr.open;
414985ae 831
c637b8ac 832 if (!kernfs_get_active(kn))
414985ae
TH
833 goto trigger;
834
c525aadd 835 poll_wait(filp, &on->poll, wait);
414985ae 836
c637b8ac 837 kernfs_put_active(kn);
414985ae 838
c525aadd 839 if (of->event != atomic_read(&on->event))
414985ae
TH
840 goto trigger;
841
842 return DEFAULT_POLLMASK;
843
844 trigger:
845 return DEFAULT_POLLMASK|POLLERR|POLLPRI;
846}
847
ecca47ce 848static void kernfs_notify_workfn(struct work_struct *work)
414985ae 849{
ecca47ce 850 struct kernfs_node *kn;
c525aadd 851 struct kernfs_open_node *on;
d911d987 852 struct kernfs_super_info *info;
ecca47ce
TH
853repeat:
854 /* pop one off the notify_list */
855 spin_lock_irq(&kernfs_notify_lock);
856 kn = kernfs_notify_list;
857 if (kn == KERNFS_NOTIFY_EOL) {
858 spin_unlock_irq(&kernfs_notify_lock);
d911d987 859 return;
ecca47ce
TH
860 }
861 kernfs_notify_list = kn->attr.notify_next;
862 kn->attr.notify_next = NULL;
863 spin_unlock_irq(&kernfs_notify_lock);
d911d987
TH
864
865 /* kick poll */
ecca47ce 866 spin_lock_irq(&kernfs_open_node_lock);
414985ae 867
d911d987
TH
868 on = kn->attr.open;
869 if (on) {
870 atomic_inc(&on->event);
871 wake_up_interruptible(&on->poll);
414985ae
TH
872 }
873
ecca47ce 874 spin_unlock_irq(&kernfs_open_node_lock);
d911d987
TH
875
876 /* kick fsnotify */
877 mutex_lock(&kernfs_mutex);
878
ecca47ce 879 list_for_each_entry(info, &kernfs_root(kn)->supers, node) {
df6a58c5 880 struct kernfs_node *parent;
d911d987 881 struct inode *inode;
d911d987 882
df6a58c5
TH
883 /*
884 * We want fsnotify_modify() on @kn but as the
885 * modifications aren't originating from userland don't
886 * have the matching @file available. Look up the inodes
887 * and generate the events manually.
888 */
d911d987
TH
889 inode = ilookup(info->sb, kn->ino);
890 if (!inode)
891 continue;
892
df6a58c5
TH
893 parent = kernfs_get_parent(kn);
894 if (parent) {
895 struct inode *p_inode;
896
897 p_inode = ilookup(info->sb, parent->ino);
898 if (p_inode) {
899 fsnotify(p_inode, FS_MODIFY | FS_EVENT_ON_CHILD,
900 inode, FSNOTIFY_EVENT_INODE, kn->name, 0);
901 iput(p_inode);
902 }
903
904 kernfs_put(parent);
d911d987
TH
905 }
906
df6a58c5
TH
907 fsnotify(inode, FS_MODIFY, inode, FSNOTIFY_EVENT_INODE,
908 kn->name, 0);
d911d987
TH
909 iput(inode);
910 }
911
912 mutex_unlock(&kernfs_mutex);
ecca47ce
TH
913 kernfs_put(kn);
914 goto repeat;
915}
916
917/**
918 * kernfs_notify - notify a kernfs file
919 * @kn: file to notify
920 *
921 * Notify @kn such that poll(2) on @kn wakes up. Maybe be called from any
922 * context.
923 */
924void kernfs_notify(struct kernfs_node *kn)
925{
926 static DECLARE_WORK(kernfs_notify_work, kernfs_notify_workfn);
927 unsigned long flags;
928
929 if (WARN_ON(kernfs_type(kn) != KERNFS_FILE))
930 return;
931
932 spin_lock_irqsave(&kernfs_notify_lock, flags);
933 if (!kn->attr.notify_next) {
934 kernfs_get(kn);
935 kn->attr.notify_next = kernfs_notify_list;
936 kernfs_notify_list = kn;
937 schedule_work(&kernfs_notify_work);
938 }
939 spin_unlock_irqrestore(&kernfs_notify_lock, flags);
414985ae
TH
940}
941EXPORT_SYMBOL_GPL(kernfs_notify);
942
a797bfc3 943const struct file_operations kernfs_file_fops = {
c637b8ac
TH
944 .read = kernfs_fop_read,
945 .write = kernfs_fop_write,
414985ae 946 .llseek = generic_file_llseek,
c637b8ac
TH
947 .mmap = kernfs_fop_mmap,
948 .open = kernfs_fop_open,
949 .release = kernfs_fop_release,
950 .poll = kernfs_fop_poll,
2a9becdd 951 .fsync = noop_fsync,
414985ae
TH
952};
953
954/**
2063d608 955 * __kernfs_create_file - kernfs internal function to create a file
414985ae
TH
956 * @parent: directory to create the file in
957 * @name: name of the file
958 * @mode: mode of the file
959 * @size: size of the file
960 * @ops: kernfs operations for the file
961 * @priv: private data for the file
962 * @ns: optional namespace tag of the file
963 * @key: lockdep key for the file's active_ref, %NULL to disable lockdep
964 *
965 * Returns the created node on success, ERR_PTR() value on error.
966 */
2063d608
TH
967struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
968 const char *name,
969 umode_t mode, loff_t size,
970 const struct kernfs_ops *ops,
971 void *priv, const void *ns,
2063d608 972 struct lock_class_key *key)
414985ae 973{
324a56e1 974 struct kernfs_node *kn;
2063d608 975 unsigned flags;
414985ae
TH
976 int rc;
977
2063d608 978 flags = KERNFS_FILE;
2063d608 979
db4aad20 980 kn = kernfs_new_node(parent, name, (mode & S_IALLUGO) | S_IFREG, flags);
324a56e1 981 if (!kn)
414985ae
TH
982 return ERR_PTR(-ENOMEM);
983
adc5e8b5
TH
984 kn->attr.ops = ops;
985 kn->attr.size = size;
986 kn->ns = ns;
324a56e1 987 kn->priv = priv;
414985ae
TH
988
989#ifdef CONFIG_DEBUG_LOCK_ALLOC
990 if (key) {
324a56e1 991 lockdep_init_map(&kn->dep_map, "s_active", key, 0);
df23fc39 992 kn->flags |= KERNFS_LOCKDEP;
414985ae
TH
993 }
994#endif
995
996 /*
adc5e8b5 997 * kn->attr.ops is accesible only while holding active ref. We
414985ae
TH
998 * need to know whether some ops are implemented outside active
999 * ref. Cache their existence in flags.
1000 */
1001 if (ops->seq_show)
df23fc39 1002 kn->flags |= KERNFS_HAS_SEQ_SHOW;
414985ae 1003 if (ops->mmap)
df23fc39 1004 kn->flags |= KERNFS_HAS_MMAP;
0e67db2f
TH
1005 if (ops->release)
1006 kn->flags |= KERNFS_HAS_RELEASE;
414985ae 1007
988cd7af 1008 rc = kernfs_add_one(kn);
414985ae 1009 if (rc) {
324a56e1 1010 kernfs_put(kn);
414985ae
TH
1011 return ERR_PTR(rc);
1012 }
324a56e1 1013 return kn;
414985ae 1014}