]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/char/drm/i915_mem.c
Linux-2.6.12-rc2
[mirror_ubuntu-artful-kernel.git] / drivers / char / drm / i915_mem.c
1 /* i915_mem.c -- Simple agp/fb memory manager for i915 -*- linux-c -*-
2 */
3 /**************************************************************************
4 *
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
7 *
8 **************************************************************************/
9
10 #include "drmP.h"
11 #include "drm.h"
12 #include "i915_drm.h"
13 #include "i915_drv.h"
14
15 /* This memory manager is integrated into the global/local lru
16 * mechanisms used by the clients. Specifically, it operates by
17 * setting the 'in_use' fields of the global LRU to indicate whether
18 * this region is privately allocated to a client.
19 *
20 * This does require the client to actually respect that field.
21 *
22 * Currently no effort is made to allocate 'private' memory in any
23 * clever way - the LRU information isn't used to determine which
24 * block to allocate, and the ring is drained prior to allocations --
25 * in other words allocation is expensive.
26 */
27 static void mark_block(drm_device_t * dev, struct mem_block *p, int in_use)
28 {
29 drm_i915_private_t *dev_priv = dev->dev_private;
30 drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
31 drm_tex_region_t *list;
32 unsigned shift, nr;
33 unsigned start;
34 unsigned end;
35 unsigned i;
36 int age;
37
38 shift = dev_priv->tex_lru_log_granularity;
39 nr = I915_NR_TEX_REGIONS;
40
41 start = p->start >> shift;
42 end = (p->start + p->size - 1) >> shift;
43
44 age = ++sarea_priv->texAge;
45 list = sarea_priv->texList;
46
47 /* Mark the regions with the new flag and update their age. Move
48 * them to head of list to preserve LRU semantics.
49 */
50 for (i = start; i <= end; i++) {
51 list[i].in_use = in_use;
52 list[i].age = age;
53
54 /* remove_from_list(i)
55 */
56 list[(unsigned)list[i].next].prev = list[i].prev;
57 list[(unsigned)list[i].prev].next = list[i].next;
58
59 /* insert_at_head(list, i)
60 */
61 list[i].prev = nr;
62 list[i].next = list[nr].next;
63 list[(unsigned)list[nr].next].prev = i;
64 list[nr].next = i;
65 }
66 }
67
68 /* Very simple allocator for agp memory, working on a static range
69 * already mapped into each client's address space.
70 */
71
72 static struct mem_block *split_block(struct mem_block *p, int start, int size,
73 DRMFILE filp)
74 {
75 /* Maybe cut off the start of an existing block */
76 if (start > p->start) {
77 struct mem_block *newblock = drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
78 if (!newblock)
79 goto out;
80 newblock->start = start;
81 newblock->size = p->size - (start - p->start);
82 newblock->filp = NULL;
83 newblock->next = p->next;
84 newblock->prev = p;
85 p->next->prev = newblock;
86 p->next = newblock;
87 p->size -= newblock->size;
88 p = newblock;
89 }
90
91 /* Maybe cut off the end of an existing block */
92 if (size < p->size) {
93 struct mem_block *newblock = drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
94 if (!newblock)
95 goto out;
96 newblock->start = start + size;
97 newblock->size = p->size - size;
98 newblock->filp = NULL;
99 newblock->next = p->next;
100 newblock->prev = p;
101 p->next->prev = newblock;
102 p->next = newblock;
103 p->size = size;
104 }
105
106 out:
107 /* Our block is in the middle */
108 p->filp = filp;
109 return p;
110 }
111
112 static struct mem_block *alloc_block(struct mem_block *heap, int size,
113 int align2, DRMFILE filp)
114 {
115 struct mem_block *p;
116 int mask = (1 << align2) - 1;
117
118 for (p = heap->next; p != heap; p = p->next) {
119 int start = (p->start + mask) & ~mask;
120 if (p->filp == NULL && start + size <= p->start + p->size)
121 return split_block(p, start, size, filp);
122 }
123
124 return NULL;
125 }
126
127 static struct mem_block *find_block(struct mem_block *heap, int start)
128 {
129 struct mem_block *p;
130
131 for (p = heap->next; p != heap; p = p->next)
132 if (p->start == start)
133 return p;
134
135 return NULL;
136 }
137
138 static void free_block(struct mem_block *p)
139 {
140 p->filp = NULL;
141
142 /* Assumes a single contiguous range. Needs a special filp in
143 * 'heap' to stop it being subsumed.
144 */
145 if (p->next->filp == NULL) {
146 struct mem_block *q = p->next;
147 p->size += q->size;
148 p->next = q->next;
149 p->next->prev = p;
150 drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
151 }
152
153 if (p->prev->filp == NULL) {
154 struct mem_block *q = p->prev;
155 q->size += p->size;
156 q->next = p->next;
157 q->next->prev = q;
158 drm_free(p, sizeof(*q), DRM_MEM_BUFLISTS);
159 }
160 }
161
162 /* Initialize. How to check for an uninitialized heap?
163 */
164 static int init_heap(struct mem_block **heap, int start, int size)
165 {
166 struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFLISTS);
167
168 if (!blocks)
169 return -ENOMEM;
170
171 *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFLISTS);
172 if (!*heap) {
173 drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFLISTS);
174 return -ENOMEM;
175 }
176
177 blocks->start = start;
178 blocks->size = size;
179 blocks->filp = NULL;
180 blocks->next = blocks->prev = *heap;
181
182 memset(*heap, 0, sizeof(**heap));
183 (*heap)->filp = (DRMFILE) - 1;
184 (*heap)->next = (*heap)->prev = blocks;
185 return 0;
186 }
187
188 /* Free all blocks associated with the releasing file.
189 */
190 void i915_mem_release(drm_device_t * dev, DRMFILE filp, struct mem_block *heap)
191 {
192 struct mem_block *p;
193
194 if (!heap || !heap->next)
195 return;
196
197 for (p = heap->next; p != heap; p = p->next) {
198 if (p->filp == filp) {
199 p->filp = NULL;
200 mark_block(dev, p, 0);
201 }
202 }
203
204 /* Assumes a single contiguous range. Needs a special filp in
205 * 'heap' to stop it being subsumed.
206 */
207 for (p = heap->next; p != heap; p = p->next) {
208 while (p->filp == NULL && p->next->filp == NULL) {
209 struct mem_block *q = p->next;
210 p->size += q->size;
211 p->next = q->next;
212 p->next->prev = p;
213 drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
214 }
215 }
216 }
217
218 /* Shutdown.
219 */
220 void i915_mem_takedown(struct mem_block **heap)
221 {
222 struct mem_block *p;
223
224 if (!*heap)
225 return;
226
227 for (p = (*heap)->next; p != *heap;) {
228 struct mem_block *q = p;
229 p = p->next;
230 drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
231 }
232
233 drm_free(*heap, sizeof(**heap), DRM_MEM_BUFLISTS);
234 *heap = NULL;
235 }
236
237 static struct mem_block **get_heap(drm_i915_private_t * dev_priv, int region)
238 {
239 switch (region) {
240 case I915_MEM_REGION_AGP:
241 return &dev_priv->agp_heap;
242 default:
243 return NULL;
244 }
245 }
246
247 /* IOCTL HANDLERS */
248
249 int i915_mem_alloc(DRM_IOCTL_ARGS)
250 {
251 DRM_DEVICE;
252 drm_i915_private_t *dev_priv = dev->dev_private;
253 drm_i915_mem_alloc_t alloc;
254 struct mem_block *block, **heap;
255
256 if (!dev_priv) {
257 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
258 return DRM_ERR(EINVAL);
259 }
260
261 DRM_COPY_FROM_USER_IOCTL(alloc, (drm_i915_mem_alloc_t __user *) data,
262 sizeof(alloc));
263
264 heap = get_heap(dev_priv, alloc.region);
265 if (!heap || !*heap)
266 return DRM_ERR(EFAULT);
267
268 /* Make things easier on ourselves: all allocations at least
269 * 4k aligned.
270 */
271 if (alloc.alignment < 12)
272 alloc.alignment = 12;
273
274 block = alloc_block(*heap, alloc.size, alloc.alignment, filp);
275
276 if (!block)
277 return DRM_ERR(ENOMEM);
278
279 mark_block(dev, block, 1);
280
281 if (DRM_COPY_TO_USER(alloc.region_offset, &block->start, sizeof(int))) {
282 DRM_ERROR("copy_to_user\n");
283 return DRM_ERR(EFAULT);
284 }
285
286 return 0;
287 }
288
289 int i915_mem_free(DRM_IOCTL_ARGS)
290 {
291 DRM_DEVICE;
292 drm_i915_private_t *dev_priv = dev->dev_private;
293 drm_i915_mem_free_t memfree;
294 struct mem_block *block, **heap;
295
296 if (!dev_priv) {
297 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
298 return DRM_ERR(EINVAL);
299 }
300
301 DRM_COPY_FROM_USER_IOCTL(memfree, (drm_i915_mem_free_t __user *) data,
302 sizeof(memfree));
303
304 heap = get_heap(dev_priv, memfree.region);
305 if (!heap || !*heap)
306 return DRM_ERR(EFAULT);
307
308 block = find_block(*heap, memfree.region_offset);
309 if (!block)
310 return DRM_ERR(EFAULT);
311
312 if (block->filp != filp)
313 return DRM_ERR(EPERM);
314
315 mark_block(dev, block, 0);
316 free_block(block);
317 return 0;
318 }
319
320 int i915_mem_init_heap(DRM_IOCTL_ARGS)
321 {
322 DRM_DEVICE;
323 drm_i915_private_t *dev_priv = dev->dev_private;
324 drm_i915_mem_init_heap_t initheap;
325 struct mem_block **heap;
326
327 if (!dev_priv) {
328 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
329 return DRM_ERR(EINVAL);
330 }
331
332 DRM_COPY_FROM_USER_IOCTL(initheap,
333 (drm_i915_mem_init_heap_t __user *) data,
334 sizeof(initheap));
335
336 heap = get_heap(dev_priv, initheap.region);
337 if (!heap)
338 return DRM_ERR(EFAULT);
339
340 if (*heap) {
341 DRM_ERROR("heap already initialized?");
342 return DRM_ERR(EFAULT);
343 }
344
345 return init_heap(heap, initheap.start, initheap.size);
346 }