int left, right, top, bottom;
int i;
int inc = 1;
+
if (!num_clips) {
num_clips = 1;
clips = &norect;
const struct fb_fillrect *rect)
{
struct virtio_gpu_fbdev *vfbdev = info->par;
+
drm_fb_helper_sys_fillrect(info, rect);
virtio_gpu_dirty_update(&vfbdev->vgfb, true, rect->dx, rect->dy,
rect->width, rect->height);
const struct fb_copyarea *area)
{
struct virtio_gpu_fbdev *vfbdev = info->par;
+
drm_fb_helper_sys_copyarea(info, area);
virtio_gpu_dirty_update(&vfbdev->vgfb, true, area->dx, area->dy,
area->width, area->height);
const struct fb_image *image)
{
struct virtio_gpu_fbdev *vfbdev = info->par;
+
drm_fb_helper_sys_imageblit(info, image);
virtio_gpu_dirty_update(&vfbdev->vgfb, true, image->dx, image->dy,
image->width, image->height);
struct ttm_validate_buffer *buf;
struct ttm_buffer_object *bo;
struct virtio_gpu_object *qobj;
+
list_for_each_entry(buf, head, head) {
bo = buf->bo;
qobj = container_of(bo, struct virtio_gpu_object, tbo);
int ret;
struct virtio_gpu_drv_cap_cache *cache_ent;
void *ptr;
+
if (vgdev->num_capsets == 0)
return -ENOSYS;
{
struct drm_device *dev = vq->vdev->priv;
struct virtio_gpu_device *vgdev = dev->dev_private;
+
schedule_work(&vgdev->ctrlq.dequeue_work);
}
{
struct drm_device *dev = vq->vdev->priv;
struct virtio_gpu_device *vgdev = dev->dev_private;
+
schedule_work(&vgdev->cursorq.dequeue_work);
}
if (!obj->pages) {
int ret;
+
ret = virtio_gpu_object_get_sg_table(vgdev, obj);
if (ret)
return ret;