]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/staging/gma500/psb_gem.c
Merge branch 'pnfs-submit' of git://git.open-osd.org/linux-open-osd
[mirror_ubuntu-artful-kernel.git] / drivers / staging / gma500 / psb_gem.c
1 /*
2 * psb GEM interface
3 *
4 * Copyright (c) 2011, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Authors: Alan Cox
20 *
21 * TODO:
22 * - we don't actually put GEM objects into the GART yet
23 * - we need to work out if the MMU is relevant as well (eg for
24 * accelerated operations on a GEM object)
25 * - cache coherency
26 *
27 * ie this is just an initial framework to get us going.
28 */
29
30 #include <drm/drmP.h>
31 #include <drm/drm.h>
32 #include "psb_drm.h"
33 #include "psb_drv.h"
34
35 int psb_gem_init_object(struct drm_gem_object *obj)
36 {
37 return -EINVAL;
38 }
39
40 void psb_gem_free_object(struct drm_gem_object *obj)
41 {
42 struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
43 psb_gtt_free_range(obj->dev, gtt);
44 if (obj->map_list.map) {
45 /* Do things GEM should do for us */
46 struct drm_gem_mm *mm = obj->dev->mm_private;
47 struct drm_map_list *list = &obj->map_list;
48 drm_ht_remove_item(&mm->offset_hash, &list->hash);
49 drm_mm_put_block(list->file_offset_node);
50 kfree(list->map);
51 list->map = NULL;
52 }
53 drm_gem_object_release(obj);
54 }
55
56 int psb_gem_get_aperture(struct drm_device *dev, void *data,
57 struct drm_file *file)
58 {
59 return -EINVAL;
60 }
61
62 /**
63 * psb_gem_create_mmap_offset - invent an mmap offset
64 * @obj: our object
65 *
66 * This is basically doing by hand a pile of ugly crap which should
67 * be done automatically by the GEM library code but isn't
68 */
69 static int psb_gem_create_mmap_offset(struct drm_gem_object *obj)
70 {
71 struct drm_device *dev = obj->dev;
72 struct drm_gem_mm *mm = dev->mm_private;
73 struct drm_map_list *list;
74 struct drm_local_map *map;
75 int ret;
76
77 list = &obj->map_list;
78 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
79 if (list->map == NULL)
80 return -ENOMEM;
81 map = list->map;
82 map->type = _DRM_GEM;
83 map->size = obj->size;
84 map->handle =obj;
85
86 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
87 obj->size / PAGE_SIZE, 0, 0);
88 if (!list->file_offset_node) {
89 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
90 ret = -ENOSPC;
91 goto free_it;
92 }
93 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
94 obj->size / PAGE_SIZE, 0);
95 if (!list->file_offset_node) {
96 ret = -ENOMEM;
97 goto free_it;
98 }
99 list->hash.key = list->file_offset_node->start;
100 ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
101 if (ret) {
102 DRM_ERROR("failed to add to map hash\n");
103 goto free_mm;
104 }
105 return 0;
106
107 free_mm:
108 drm_mm_put_block(list->file_offset_node);
109 free_it:
110 kfree(list->map);
111 list->map = NULL;
112 return ret;
113 }
114
115 /**
116 * psb_gem_dumb_map_gtt - buffer mapping for dumb interface
117 * @file: our drm client file
118 * @dev: drm device
119 * @handle: GEM handle to the object (from dumb_create)
120 *
121 * Do the necessary setup to allow the mapping of the frame buffer
122 * into user memory. We don't have to do much here at the moment.
123 */
124 int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
125 uint32_t handle, uint64_t *offset)
126 {
127 int ret = 0;
128 struct drm_gem_object *obj;
129
130 if (!(dev->driver->driver_features & DRIVER_GEM))
131 return -ENODEV;
132
133 mutex_lock(&dev->struct_mutex);
134
135 /* GEM does all our handle to object mapping */
136 obj = drm_gem_object_lookup(dev, file, handle);
137 if (obj == NULL) {
138 ret = -ENOENT;
139 goto unlock;
140 }
141 /* What validation is needed here ? */
142
143 /* Make it mmapable */
144 if (!obj->map_list.map) {
145 ret = psb_gem_create_mmap_offset(obj);
146 if (ret)
147 goto out;
148 }
149 /* GEM should really work out the hash offsets for us */
150 *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
151 out:
152 drm_gem_object_unreference(obj);
153 unlock:
154 mutex_unlock(&dev->struct_mutex);
155 return ret;
156 }
157
158 /**
159 * psb_gem_create - create a mappable object
160 * @file: the DRM file of the client
161 * @dev: our device
162 * @size: the size requested
163 * @handlep: returned handle (opaque number)
164 *
165 * Create a GEM object, fill in the boilerplate and attach a handle to
166 * it so that userspace can speak about it. This does the core work
167 * for the various methods that do/will create GEM objects for things
168 */
169 static int psb_gem_create(struct drm_file *file,
170 struct drm_device *dev, uint64_t size, uint32_t *handlep)
171 {
172 struct gtt_range *r;
173 int ret;
174 u32 handle;
175
176 size = roundup(size, PAGE_SIZE);
177
178 /* Allocate our object - for now a direct gtt range which is not
179 stolen memory backed */
180 r = psb_gtt_alloc_range(dev, size, "gem", 0);
181 if (r == NULL)
182 return -ENOSPC;
183 /* Initialize the extra goodies GEM needs to do all the hard work */
184 if (drm_gem_object_init(dev, &r->gem, size) != 0) {
185 psb_gtt_free_range(dev, r);
186 /* GEM doesn't give an error code and we don't have an
187 EGEMSUCKS so make something up for now - FIXME */
188 return -ENOMEM;
189 }
190 /* Give the object a handle so we can carry it more easily */
191 ret = drm_gem_handle_create(file, &r->gem, &handle);
192 if (ret) {
193 drm_gem_object_release(&r->gem);
194 psb_gtt_free_range(dev, r);
195 return ret;
196 }
197 /* We have the initial and handle reference but need only one now */
198 drm_gem_object_unreference(&r->gem);
199 *handlep = handle;
200 return 0;
201 }
202
203 /**
204 * psb_gem_dumb_create - create a dumb buffer
205 * @drm_file: our client file
206 * @dev: our device
207 * @args: the requested arguments copied from userspace
208 *
209 * Allocate a buffer suitable for use for a frame buffer of the
210 * form described by user space. Give userspace a handle by which
211 * to reference it.
212 */
213 int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
214 struct drm_mode_create_dumb *args)
215 {
216 args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
217 args->size = args->pitch * args->height;
218 return psb_gem_create(file, dev, args->size, &args->handle);
219 }
220
221 /**
222 * psb_gem_dumb_destroy - destroy a dumb buffer
223 * @file: client file
224 * @dev: our DRM device
225 * @handle: the object handle
226 *
227 * Destroy a handle that was created via psb_gem_dumb_create, at least
228 * we hope it was created that way. i915 seems to assume the caller
229 * does the checking but that might be worth review ! FIXME
230 */
231 int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
232 uint32_t handle)
233 {
234 /* No special work needed, drop the reference and see what falls out */
235 return drm_gem_handle_delete(file, handle);
236 }
237
238 /**
239 * psb_gem_fault - pagefault handler for GEM objects
240 * @vma: the VMA of the GEM object
241 * @vmf: fault detail
242 *
243 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
244 * does most of the work for us including the actual map/unmap calls
245 * but we need to do the actual page work.
246 *
247 * This code eventually needs to handle faulting objects in and out
248 * of the GART and repacking it when we run out of space. We can put
249 * that off for now and for our simple uses
250 *
251 * The VMA was set up by GEM. In doing so it also ensured that the
252 * vma->vm_private_data points to the GEM object that is backing this
253 * mapping.
254 *
255 * To avoid aliasing and cache funnies we want to map the object
256 * through the GART. For the moment this is slightly hackish. It would
257 * be nicer if GEM provided mmap opened/closed hooks for us giving
258 * the object so that we could track things nicely. That needs changes
259 * to the core GEM code so must be tackled post staging
260 *
261 * FIXME
262 */
263 int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
264 {
265 struct drm_gem_object *obj;
266 struct gtt_range *r;
267 int ret;
268 unsigned long pfn;
269 pgoff_t page_offset;
270 struct drm_device *dev;
271
272 obj = vma->vm_private_data; /* GEM object */
273 dev = obj->dev;
274
275 r = container_of(obj, struct gtt_range, gem); /* Get the gtt range */
276
277 /* Make sure we don't parallel update on a fault, nor move or remove
278 something from beneath our feet */
279 mutex_lock(&dev->struct_mutex);
280
281 /* For now the mmap pins the object and it stays pinned. As things
282 stand that will do us no harm */
283 if (r->mmapping == 0) {
284 ret = psb_gtt_pin(r);
285 if (ret < 0) {
286 DRM_ERROR("gma500: pin failed: %d\n", ret);
287 goto fail;
288 }
289 r->mmapping = 1;
290 }
291
292 /* FIXME: Locking. We may also need to repack the GART sometimes */
293
294 /* Page relative to the VMA start */
295 page_offset = ((unsigned long) vmf->virtual_address - vma->vm_start)
296 >> PAGE_SHIFT;
297
298 /* Bus address of the page is gart + object offset + page offset */
299 /* Assumes gtt allocations are page aligned */
300 pfn = (r->resource.start >> PAGE_SHIFT) + page_offset;
301
302 pr_debug("Object GTT base at %p\n", (void *)(r->resource.start));
303 pr_debug("Inserting %p pfn %lx, pa %lx\n", vmf->virtual_address,
304 pfn, pfn << PAGE_SHIFT);
305
306 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
307
308 fail:
309 mutex_unlock(&dev->struct_mutex);
310 switch (ret) {
311 case 0:
312 case -ERESTARTSYS:
313 case -EINTR:
314 return VM_FAULT_NOPAGE;
315 case -ENOMEM:
316 return VM_FAULT_OOM;
317 default:
318 return VM_FAULT_SIGBUS;
319 }
320 }