]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/video/fb_defio.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[mirror_ubuntu-bionic-kernel.git] / drivers / video / fb_defio.c
1 /*
2 * linux/drivers/video/fb_defio.c
3 *
4 * Copyright (C) 2006 Jaya Kumar
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
8 * for more details.
9 */
10
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/mm.h>
16 #include <linux/vmalloc.h>
17 #include <linux/delay.h>
18 #include <linux/interrupt.h>
19 #include <linux/fb.h>
20 #include <linux/list.h>
21
22 /* to support deferred IO */
23 #include <linux/rmap.h>
24 #include <linux/pagemap.h>
25
26 static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs)
27 {
28 void *screen_base = (void __force *) info->screen_base;
29 struct page *page;
30
31 if (is_vmalloc_addr(screen_base + offs))
32 page = vmalloc_to_page(screen_base + offs);
33 else
34 page = pfn_to_page((info->fix.smem_start + offs) >> PAGE_SHIFT);
35
36 return page;
37 }
38
39 /* this is to find and return the vmalloc-ed fb pages */
40 static int fb_deferred_io_fault(struct vm_area_struct *vma,
41 struct vm_fault *vmf)
42 {
43 unsigned long offset;
44 struct page *page;
45 struct fb_info *info = vma->vm_private_data;
46
47 offset = vmf->pgoff << PAGE_SHIFT;
48 if (offset >= info->fix.smem_len)
49 return VM_FAULT_SIGBUS;
50
51 page = fb_deferred_io_page(info, offset);
52 if (!page)
53 return VM_FAULT_SIGBUS;
54
55 get_page(page);
56
57 if (vma->vm_file)
58 page->mapping = vma->vm_file->f_mapping;
59 else
60 printk(KERN_ERR "no mapping available\n");
61
62 BUG_ON(!page->mapping);
63 page->index = vmf->pgoff;
64
65 vmf->page = page;
66 return 0;
67 }
68
69 int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasync)
70 {
71 struct fb_info *info = file->private_data;
72 struct inode *inode = file->f_path.dentry->d_inode;
73 int err = filemap_write_and_wait_range(inode->i_mapping, start, end);
74 if (err)
75 return err;
76
77 /* Skip if deferred io is compiled-in but disabled on this fbdev */
78 if (!info->fbdefio)
79 return 0;
80
81 mutex_lock(&inode->i_mutex);
82 /* Kill off the delayed work */
83 cancel_delayed_work_sync(&info->deferred_work);
84
85 /* Run it immediately */
86 err = schedule_delayed_work(&info->deferred_work, 0);
87 mutex_unlock(&inode->i_mutex);
88 return err;
89 }
90 EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
91
92 /* vm_ops->page_mkwrite handler */
93 static int fb_deferred_io_mkwrite(struct vm_area_struct *vma,
94 struct vm_fault *vmf)
95 {
96 struct page *page = vmf->page;
97 struct fb_info *info = vma->vm_private_data;
98 struct fb_deferred_io *fbdefio = info->fbdefio;
99 struct page *cur;
100
101 /* this is a callback we get when userspace first tries to
102 write to the page. we schedule a workqueue. that workqueue
103 will eventually mkclean the touched pages and execute the
104 deferred framebuffer IO. then if userspace touches a page
105 again, we repeat the same scheme */
106
107 /* protect against the workqueue changing the page list */
108 mutex_lock(&fbdefio->lock);
109
110 /* first write in this cycle, notify the driver */
111 if (fbdefio->first_io && list_empty(&fbdefio->pagelist))
112 fbdefio->first_io(info);
113
114 /*
115 * We want the page to remain locked from ->page_mkwrite until
116 * the PTE is marked dirty to avoid page_mkclean() being called
117 * before the PTE is updated, which would leave the page ignored
118 * by defio.
119 * Do this by locking the page here and informing the caller
120 * about it with VM_FAULT_LOCKED.
121 */
122 lock_page(page);
123
124 /* we loop through the pagelist before adding in order
125 to keep the pagelist sorted */
126 list_for_each_entry(cur, &fbdefio->pagelist, lru) {
127 /* this check is to catch the case where a new
128 process could start writing to the same page
129 through a new pte. this new access can cause the
130 mkwrite even when the original ps's pte is marked
131 writable */
132 if (unlikely(cur == page))
133 goto page_already_added;
134 else if (cur->index > page->index)
135 break;
136 }
137
138 list_add_tail(&page->lru, &cur->lru);
139
140 page_already_added:
141 mutex_unlock(&fbdefio->lock);
142
143 /* come back after delay to process the deferred IO */
144 schedule_delayed_work(&info->deferred_work, fbdefio->delay);
145 return VM_FAULT_LOCKED;
146 }
147
148 static const struct vm_operations_struct fb_deferred_io_vm_ops = {
149 .fault = fb_deferred_io_fault,
150 .page_mkwrite = fb_deferred_io_mkwrite,
151 };
152
153 static int fb_deferred_io_set_page_dirty(struct page *page)
154 {
155 if (!PageDirty(page))
156 SetPageDirty(page);
157 return 0;
158 }
159
160 static const struct address_space_operations fb_deferred_io_aops = {
161 .set_page_dirty = fb_deferred_io_set_page_dirty,
162 };
163
164 static int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
165 {
166 vma->vm_ops = &fb_deferred_io_vm_ops;
167 vma->vm_flags |= ( VM_RESERVED | VM_DONTEXPAND );
168 if (!(info->flags & FBINFO_VIRTFB))
169 vma->vm_flags |= VM_IO;
170 vma->vm_private_data = info;
171 return 0;
172 }
173
174 /* workqueue callback */
175 static void fb_deferred_io_work(struct work_struct *work)
176 {
177 struct fb_info *info = container_of(work, struct fb_info,
178 deferred_work.work);
179 struct list_head *node, *next;
180 struct page *cur;
181 struct fb_deferred_io *fbdefio = info->fbdefio;
182
183 /* here we mkclean the pages, then do all deferred IO */
184 mutex_lock(&fbdefio->lock);
185 list_for_each_entry(cur, &fbdefio->pagelist, lru) {
186 lock_page(cur);
187 page_mkclean(cur);
188 unlock_page(cur);
189 }
190
191 /* driver's callback with pagelist */
192 fbdefio->deferred_io(info, &fbdefio->pagelist);
193
194 /* clear the list */
195 list_for_each_safe(node, next, &fbdefio->pagelist) {
196 list_del(node);
197 }
198 mutex_unlock(&fbdefio->lock);
199 }
200
201 void fb_deferred_io_init(struct fb_info *info)
202 {
203 struct fb_deferred_io *fbdefio = info->fbdefio;
204
205 BUG_ON(!fbdefio);
206 mutex_init(&fbdefio->lock);
207 info->fbops->fb_mmap = fb_deferred_io_mmap;
208 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
209 INIT_LIST_HEAD(&fbdefio->pagelist);
210 if (fbdefio->delay == 0) /* set a default of 1 s */
211 fbdefio->delay = HZ;
212 }
213 EXPORT_SYMBOL_GPL(fb_deferred_io_init);
214
215 void fb_deferred_io_open(struct fb_info *info,
216 struct inode *inode,
217 struct file *file)
218 {
219 file->f_mapping->a_ops = &fb_deferred_io_aops;
220 }
221 EXPORT_SYMBOL_GPL(fb_deferred_io_open);
222
223 void fb_deferred_io_cleanup(struct fb_info *info)
224 {
225 struct fb_deferred_io *fbdefio = info->fbdefio;
226 struct page *page;
227 int i;
228
229 BUG_ON(!fbdefio);
230 cancel_delayed_work_sync(&info->deferred_work);
231
232 /* clear out the mapping that we setup */
233 for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
234 page = fb_deferred_io_page(info, i);
235 page->mapping = NULL;
236 }
237
238 info->fbops->fb_mmap = NULL;
239 mutex_destroy(&fbdefio->lock);
240 }
241 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
242
243 MODULE_LICENSE("GPL");