]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/staging/vboxvideo/vbox_fb.c
Merge tag 'for-linus-20170825' of git://git.infradead.org/linux-mtd
[mirror_ubuntu-artful-kernel.git] / drivers / staging / vboxvideo / vbox_fb.c
1 /*
2 * Copyright (C) 2013-2017 Oracle Corporation
3 * This file is based on ast_fb.c
4 * Copyright 2012 Red Hat Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
24 * of the Software.
25 *
26 * Authors: Dave Airlie <airlied@redhat.com>
27 * Michael Thayer <michael.thayer@oracle.com,
28 */
29 #include <linux/module.h>
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/string.h>
33 #include <linux/mm.h>
34 #include <linux/tty.h>
35 #include <linux/sysrq.h>
36 #include <linux/delay.h>
37 #include <linux/fb.h>
38 #include <linux/init.h>
39
40 #include <drm/drmP.h>
41 #include <drm/drm_crtc.h>
42 #include <drm/drm_fb_helper.h>
43 #include <drm/drm_crtc_helper.h>
44
45 #include "vbox_drv.h"
46 #include "vboxvideo.h"
47
48 #define VBOX_DIRTY_DELAY (HZ / 30)
49 /**
50 * Tell the host about dirty rectangles to update.
51 */
52 static void vbox_dirty_update(struct vbox_fbdev *fbdev,
53 int x, int y, int width, int height)
54 {
55 struct drm_gem_object *obj;
56 struct vbox_bo *bo;
57 int ret = -EBUSY;
58 bool store_for_later = false;
59 int x2, y2;
60 unsigned long flags;
61 struct drm_clip_rect rect;
62
63 obj = fbdev->afb.obj;
64 bo = gem_to_vbox_bo(obj);
65
66 /*
67 * try and reserve the BO, if we fail with busy
68 * then the BO is being moved and we should
69 * store up the damage until later.
70 */
71 if (drm_can_sleep())
72 ret = vbox_bo_reserve(bo, true);
73 if (ret) {
74 if (ret != -EBUSY)
75 return;
76
77 store_for_later = true;
78 }
79
80 x2 = x + width - 1;
81 y2 = y + height - 1;
82 spin_lock_irqsave(&fbdev->dirty_lock, flags);
83
84 if (fbdev->y1 < y)
85 y = fbdev->y1;
86 if (fbdev->y2 > y2)
87 y2 = fbdev->y2;
88 if (fbdev->x1 < x)
89 x = fbdev->x1;
90 if (fbdev->x2 > x2)
91 x2 = fbdev->x2;
92
93 if (store_for_later) {
94 fbdev->x1 = x;
95 fbdev->x2 = x2;
96 fbdev->y1 = y;
97 fbdev->y2 = y2;
98 spin_unlock_irqrestore(&fbdev->dirty_lock, flags);
99 return;
100 }
101
102 fbdev->x1 = INT_MAX;
103 fbdev->y1 = INT_MAX;
104 fbdev->x2 = 0;
105 fbdev->y2 = 0;
106
107 spin_unlock_irqrestore(&fbdev->dirty_lock, flags);
108
109 /*
110 * Not sure why the original code subtracted 1 here, but I will keep
111 * it that way to avoid unnecessary differences.
112 */
113 rect.x1 = x;
114 rect.x2 = x2 + 1;
115 rect.y1 = y;
116 rect.y2 = y2 + 1;
117 vbox_framebuffer_dirty_rectangles(&fbdev->afb.base, &rect, 1);
118
119 vbox_bo_unreserve(bo);
120 }
121
122 #ifdef CONFIG_FB_DEFERRED_IO
123 static void vbox_deferred_io(struct fb_info *info, struct list_head *pagelist)
124 {
125 struct vbox_fbdev *fbdev = info->par;
126 unsigned long start, end, min, max;
127 struct page *page;
128 int y1, y2;
129
130 min = ULONG_MAX;
131 max = 0;
132 list_for_each_entry(page, pagelist, lru) {
133 start = page->index << PAGE_SHIFT;
134 end = start + PAGE_SIZE - 1;
135 min = min(min, start);
136 max = max(max, end);
137 }
138
139 if (min < max) {
140 y1 = min / info->fix.line_length;
141 y2 = (max / info->fix.line_length) + 1;
142 DRM_INFO("%s: Calling dirty update: 0, %d, %d, %d\n",
143 __func__, y1, info->var.xres, y2 - y1 - 1);
144 vbox_dirty_update(fbdev, 0, y1, info->var.xres, y2 - y1 - 1);
145 }
146 }
147
148 static struct fb_deferred_io vbox_defio = {
149 .delay = VBOX_DIRTY_DELAY,
150 .deferred_io = vbox_deferred_io,
151 };
152 #endif
153
154 static void vbox_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
155 {
156 struct vbox_fbdev *fbdev = info->par;
157
158 sys_fillrect(info, rect);
159 vbox_dirty_update(fbdev, rect->dx, rect->dy, rect->width, rect->height);
160 }
161
162 static void vbox_copyarea(struct fb_info *info, const struct fb_copyarea *area)
163 {
164 struct vbox_fbdev *fbdev = info->par;
165
166 sys_copyarea(info, area);
167 vbox_dirty_update(fbdev, area->dx, area->dy, area->width, area->height);
168 }
169
170 static void vbox_imageblit(struct fb_info *info, const struct fb_image *image)
171 {
172 struct vbox_fbdev *fbdev = info->par;
173
174 sys_imageblit(info, image);
175 vbox_dirty_update(fbdev, image->dx, image->dy, image->width,
176 image->height);
177 }
178
179 static struct fb_ops vboxfb_ops = {
180 .owner = THIS_MODULE,
181 .fb_check_var = drm_fb_helper_check_var,
182 .fb_set_par = drm_fb_helper_set_par,
183 .fb_fillrect = vbox_fillrect,
184 .fb_copyarea = vbox_copyarea,
185 .fb_imageblit = vbox_imageblit,
186 .fb_pan_display = drm_fb_helper_pan_display,
187 .fb_blank = drm_fb_helper_blank,
188 .fb_setcmap = drm_fb_helper_setcmap,
189 .fb_debug_enter = drm_fb_helper_debug_enter,
190 .fb_debug_leave = drm_fb_helper_debug_leave,
191 };
192
193 static int vboxfb_create_object(struct vbox_fbdev *fbdev,
194 struct DRM_MODE_FB_CMD *mode_cmd,
195 struct drm_gem_object **gobj_p)
196 {
197 struct drm_device *dev = fbdev->helper.dev;
198 u32 size;
199 struct drm_gem_object *gobj;
200 u32 pitch = mode_cmd->pitches[0];
201 int ret;
202
203 size = pitch * mode_cmd->height;
204 ret = vbox_gem_create(dev, size, true, &gobj);
205 if (ret)
206 return ret;
207
208 *gobj_p = gobj;
209
210 return 0;
211 }
212
213 static int vboxfb_create(struct drm_fb_helper *helper,
214 struct drm_fb_helper_surface_size *sizes)
215 {
216 struct vbox_fbdev *fbdev =
217 container_of(helper, struct vbox_fbdev, helper);
218 struct drm_device *dev = fbdev->helper.dev;
219 struct DRM_MODE_FB_CMD mode_cmd;
220 struct drm_framebuffer *fb;
221 struct fb_info *info;
222 struct device *device = &dev->pdev->dev;
223 struct drm_gem_object *gobj;
224 struct vbox_bo *bo;
225 int size, ret;
226 u32 pitch;
227
228 mode_cmd.width = sizes->surface_width;
229 mode_cmd.height = sizes->surface_height;
230 pitch = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
231 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
232 sizes->surface_depth);
233 mode_cmd.pitches[0] = pitch;
234
235 size = pitch * mode_cmd.height;
236
237 ret = vboxfb_create_object(fbdev, &mode_cmd, &gobj);
238 if (ret) {
239 DRM_ERROR("failed to create fbcon backing object %d\n", ret);
240 return ret;
241 }
242
243 ret = vbox_framebuffer_init(dev, &fbdev->afb, &mode_cmd, gobj);
244 if (ret)
245 return ret;
246
247 bo = gem_to_vbox_bo(gobj);
248
249 ret = vbox_bo_reserve(bo, false);
250 if (ret)
251 return ret;
252
253 ret = vbox_bo_pin(bo, TTM_PL_FLAG_VRAM, NULL);
254 if (ret) {
255 vbox_bo_unreserve(bo);
256 return ret;
257 }
258
259 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
260 vbox_bo_unreserve(bo);
261 if (ret) {
262 DRM_ERROR("failed to kmap fbcon\n");
263 return ret;
264 }
265
266 info = framebuffer_alloc(0, device);
267 if (!info)
268 return -ENOMEM;
269 info->par = fbdev;
270
271 fbdev->size = size;
272
273 fb = &fbdev->afb.base;
274 fbdev->helper.fb = fb;
275 fbdev->helper.fbdev = info;
276
277 strcpy(info->fix.id, "vboxdrmfb");
278
279 /*
280 * The last flag forces a mode set on VT switches even if the kernel
281 * does not think it is needed.
282 */
283 info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT |
284 FBINFO_MISC_ALWAYS_SETPAR;
285 info->fbops = &vboxfb_ops;
286
287 ret = fb_alloc_cmap(&info->cmap, 256, 0);
288 if (ret)
289 return -ENOMEM;
290
291 /*
292 * This seems to be done for safety checking that the framebuffer
293 * is not registered twice by different drivers.
294 */
295 info->apertures = alloc_apertures(1);
296 if (!info->apertures)
297 return -ENOMEM;
298 info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0);
299 info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
300
301 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
302 drm_fb_helper_fill_var(info, &fbdev->helper, sizes->fb_width,
303 sizes->fb_height);
304
305 info->screen_base = bo->kmap.virtual;
306 info->screen_size = size;
307
308 #ifdef CONFIG_FB_DEFERRED_IO
309 info->fbdefio = &vbox_defio;
310 fb_deferred_io_init(info);
311 #endif
312
313 info->pixmap.flags = FB_PIXMAP_SYSTEM;
314
315 DRM_DEBUG_KMS("allocated %dx%d\n", fb->width, fb->height);
316
317 return 0;
318 }
319
320 static void vbox_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
321 u16 blue, int regno)
322 {
323 }
324
325 static void vbox_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
326 u16 *blue, int regno)
327 {
328 *red = regno;
329 *green = regno;
330 *blue = regno;
331 }
332
333 static struct drm_fb_helper_funcs vbox_fb_helper_funcs = {
334 .gamma_set = vbox_fb_gamma_set,
335 .gamma_get = vbox_fb_gamma_get,
336 .fb_probe = vboxfb_create,
337 };
338
339 void vbox_fbdev_fini(struct drm_device *dev)
340 {
341 struct vbox_private *vbox = dev->dev_private;
342 struct vbox_fbdev *fbdev = vbox->fbdev;
343 struct vbox_framebuffer *afb = &fbdev->afb;
344
345 drm_fb_helper_unregister_fbi(&fbdev->helper);
346
347 if (afb->obj) {
348 struct vbox_bo *bo = gem_to_vbox_bo(afb->obj);
349
350 if (!vbox_bo_reserve(bo, false)) {
351 if (bo->kmap.virtual)
352 ttm_bo_kunmap(&bo->kmap);
353 /*
354 * QXL does this, but is it really needed before
355 * freeing?
356 */
357 if (bo->pin_count)
358 vbox_bo_unpin(bo);
359 vbox_bo_unreserve(bo);
360 }
361 drm_gem_object_unreference_unlocked(afb->obj);
362 afb->obj = NULL;
363 }
364 drm_fb_helper_fini(&fbdev->helper);
365
366 drm_framebuffer_unregister_private(&afb->base);
367 drm_framebuffer_cleanup(&afb->base);
368 }
369
370 int vbox_fbdev_init(struct drm_device *dev)
371 {
372 struct vbox_private *vbox = dev->dev_private;
373 struct vbox_fbdev *fbdev;
374 int ret;
375
376 fbdev = devm_kzalloc(dev->dev, sizeof(*fbdev), GFP_KERNEL);
377 if (!fbdev)
378 return -ENOMEM;
379
380 vbox->fbdev = fbdev;
381 spin_lock_init(&fbdev->dirty_lock);
382
383 drm_fb_helper_prepare(dev, &fbdev->helper, &vbox_fb_helper_funcs);
384 ret = drm_fb_helper_init(dev, &fbdev->helper, vbox->num_crtcs);
385 if (ret)
386 return ret;
387
388 ret = drm_fb_helper_single_add_all_connectors(&fbdev->helper);
389 if (ret)
390 goto err_fini;
391
392 /* disable all the possible outputs/crtcs before entering KMS mode */
393 drm_helper_disable_unused_functions(dev);
394
395 ret = drm_fb_helper_initial_config(&fbdev->helper, 32);
396 if (ret)
397 goto err_fini;
398
399 return 0;
400
401 err_fini:
402 drm_fb_helper_fini(&fbdev->helper);
403 return ret;
404 }
405
406 void vbox_fbdev_set_base(struct vbox_private *vbox, unsigned long gpu_addr)
407 {
408 struct fb_info *fbdev = vbox->fbdev->helper.fbdev;
409
410 fbdev->fix.smem_start = fbdev->apertures->ranges[0].base + gpu_addr;
411 fbdev->fix.smem_len = vbox->available_vram_size - gpu_addr;
412 }