]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/i915/i915_gem_stolen.c
drm/i915: add simple wrappers for stolen node insertion/removal
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / i915_gem_stolen.c
CommitLineData
9797fbfb
CW
1/*
2 * Copyright © 2008-2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 *
27 */
28
760285e7
DH
29#include <drm/drmP.h>
30#include <drm/i915_drm.h>
9797fbfb
CW
31#include "i915_drv.h"
32
33/*
34 * The BIOS typically reserves some of the system's memory for the exclusive
35 * use of the integrated graphics. This memory is no longer available for
36 * use by the OS and so the user finds that his system has less memory
37 * available than he put in. We refer to this memory as stolen.
38 *
39 * The BIOS will allocate its framebuffer from the stolen memory. Our
40 * goal is try to reuse that object for our own fbcon which must always
41 * be available for panics. Anything else we can reuse the stolen memory
42 * for is a boon.
43 */
44
d713fd49
PZ
45int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
46 struct drm_mm_node *node, u64 size,
47 unsigned alignment)
48{
49 if (!drm_mm_initialized(&dev_priv->mm.stolen))
50 return -ENODEV;
51
52 return drm_mm_insert_node(&dev_priv->mm.stolen, node, size, alignment,
53 DRM_MM_SEARCH_DEFAULT);
54}
55
56void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
57 struct drm_mm_node *node)
58{
59 drm_mm_remove_node(node);
60}
61
e12a2d53 62static unsigned long i915_stolen_to_physical(struct drm_device *dev)
9797fbfb
CW
63{
64 struct drm_i915_private *dev_priv = dev->dev_private;
eaba1b8f 65 struct resource *r;
9797fbfb
CW
66 u32 base;
67
17fec8a0
CW
68 /* Almost universally we can find the Graphics Base of Stolen Memory
69 * at offset 0x5c in the igfx configuration space. On a few (desktop)
70 * machines this is also mirrored in the bridge device at different
71 * locations, or in the MCHBAR. On gen2, the layout is again slightly
72 * different with the Graphics Segment immediately following Top of
73 * Memory (or Top of Usable DRAM). Note it appears that TOUD is only
74 * reported by 865g, so we just use the top of memory as determined
75 * by the e820 probe.
e12a2d53 76 *
17fec8a0 77 * XXX However gen2 requires an unavailable symbol.
9797fbfb 78 */
e12a2d53 79 base = 0;
17fec8a0
CW
80 if (INTEL_INFO(dev)->gen >= 3) {
81 /* Read Graphics Base of Stolen Memory directly */
c9cddffc
JB
82 pci_read_config_dword(dev->pdev, 0x5c, &base);
83 base &= ~((1<<20) - 1);
17fec8a0 84 } else { /* GEN2 */
e12a2d53 85#if 0
e12a2d53
CW
86 /* Stolen is immediately above Top of Memory */
87 base = max_low_pfn_mapped << PAGE_SHIFT;
9797fbfb 88#endif
e12a2d53 89 }
9797fbfb 90
eaba1b8f
CW
91 if (base == 0)
92 return 0;
93
f1e1c212
VS
94 /* make sure we don't clobber the GTT if it's within stolen memory */
95 if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) {
96 struct {
97 u32 start, end;
98 } stolen[2] = {
99 { .start = base, .end = base + dev_priv->gtt.stolen_size, },
100 { .start = base, .end = base + dev_priv->gtt.stolen_size, },
101 };
102 u64 gtt_start, gtt_end;
103
104 gtt_start = I915_READ(PGTBL_CTL);
105 if (IS_GEN4(dev))
106 gtt_start = (gtt_start & PGTBL_ADDRESS_LO_MASK) |
107 (gtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
108 else
109 gtt_start &= PGTBL_ADDRESS_LO_MASK;
110 gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4;
111
112 if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end)
113 stolen[0].end = gtt_start;
114 if (gtt_end > stolen[1].start && gtt_end <= stolen[1].end)
115 stolen[1].start = gtt_end;
116
117 /* pick the larger of the two chunks */
118 if (stolen[0].end - stolen[0].start >
119 stolen[1].end - stolen[1].start) {
120 base = stolen[0].start;
121 dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start;
122 } else {
123 base = stolen[1].start;
124 dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start;
125 }
126
127 if (stolen[0].start != stolen[1].start ||
128 stolen[0].end != stolen[1].end) {
129 DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
130 (unsigned long long) gtt_start,
131 (unsigned long long) gtt_end - 1);
132 DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
133 base, base + (u32) dev_priv->gtt.stolen_size - 1);
134 }
135 }
136
137
eaba1b8f
CW
138 /* Verify that nothing else uses this physical address. Stolen
139 * memory should be reserved by the BIOS and hidden from the
140 * kernel. So if the region is already marked as busy, something
141 * is seriously wrong.
142 */
143 r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
144 "Graphics Stolen Memory");
145 if (r == NULL) {
3617dc96
AG
146 /*
147 * One more attempt but this time requesting region from
148 * base + 1, as we have seen that this resolves the region
149 * conflict with the PCI Bus.
150 * This is a BIOS w/a: Some BIOS wrap stolen in the root
151 * PCI bus, but have an off-by-one error. Hence retry the
152 * reservation starting from 1 instead of 0.
153 */
154 r = devm_request_mem_region(dev->dev, base + 1,
155 dev_priv->gtt.stolen_size - 1,
156 "Graphics Stolen Memory");
0b6d24c0
DV
157 /*
158 * GEN3 firmware likes to smash pci bridges into the stolen
159 * range. Apparently this works.
160 */
161 if (r == NULL && !IS_GEN3(dev)) {
3617dc96
AG
162 DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
163 base, base + (uint32_t)dev_priv->gtt.stolen_size);
164 base = 0;
165 }
eaba1b8f
CW
166 }
167
e12a2d53 168 return base;
9797fbfb
CW
169}
170
edc0fdbb
BW
171static int find_compression_threshold(struct drm_device *dev,
172 struct drm_mm_node *node,
5e59f717
BW
173 int size,
174 int fb_cpp)
9797fbfb
CW
175{
176 struct drm_i915_private *dev_priv = dev->dev_private;
5e59f717 177 int compression_threshold = 1;
06e78edf 178 int ret;
9797fbfb 179
5e59f717
BW
180 /* HACK: This code depends on what we will do in *_enable_fbc. If that
181 * code changes, this code needs to change as well.
182 *
183 * The enable_fbc code will attempt to use one of our 2 compression
184 * thresholds, therefore, in that case, we only have 1 resort.
185 */
186
187 /* Try to over-allocate to reduce reallocations and fragmentation. */
d713fd49 188 ret = i915_gem_stolen_insert_node(dev_priv, node, size <<= 1, 4096);
5e59f717
BW
189 if (ret == 0)
190 return compression_threshold;
191
192again:
193 /* HW's ability to limit the CFB is 1:4 */
194 if (compression_threshold > 4 ||
195 (fb_cpp == 2 && compression_threshold == 2))
edc0fdbb 196 return 0;
5e59f717 197
d713fd49 198 ret = i915_gem_stolen_insert_node(dev_priv, node, size >>= 1, 4096);
5e59f717
BW
199 if (ret && INTEL_INFO(dev)->gen <= 4) {
200 return 0;
201 } else if (ret) {
202 compression_threshold <<= 1;
203 goto again;
204 } else {
edc0fdbb 205 return compression_threshold;
5e59f717 206 }
edc0fdbb
BW
207}
208
5e59f717 209static int i915_setup_compression(struct drm_device *dev, int size, int fb_cpp)
edc0fdbb
BW
210{
211 struct drm_i915_private *dev_priv = dev->dev_private;
212 struct drm_mm_node *uninitialized_var(compressed_llb);
213 int ret;
214
215 ret = find_compression_threshold(dev, &dev_priv->fbc.compressed_fb,
5e59f717 216 size, fb_cpp);
edc0fdbb 217 if (!ret)
06e78edf 218 goto err_llb;
5e59f717
BW
219 else if (ret > 1) {
220 DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
221
222 }
223
224 dev_priv->fbc.threshold = ret;
9797fbfb 225
46ec15f2 226 if (INTEL_INFO(dev_priv)->gen >= 5)
c4213885 227 I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
11be49eb 228 else if (IS_GM45(dev)) {
c4213885 229 I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
11be49eb 230 } else {
06e78edf 231 compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
9797fbfb
CW
232 if (!compressed_llb)
233 goto err_fb;
234
d713fd49
PZ
235 ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb,
236 4096, 4096);
06e78edf
DH
237 if (ret)
238 goto err_fb;
239
5c3fe8b0 240 dev_priv->fbc.compressed_llb = compressed_llb;
11be49eb
CW
241
242 I915_WRITE(FBC_CFB_BASE,
c4213885 243 dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
11be49eb
CW
244 I915_WRITE(FBC_LL_BASE,
245 dev_priv->mm.stolen_base + compressed_llb->start);
9797fbfb
CW
246 }
247
60ee5cd2 248 dev_priv->fbc.uncompressed_size = size;
9797fbfb 249
11be49eb
CW
250 DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
251 size);
9797fbfb 252
11be49eb 253 return 0;
9797fbfb 254
9797fbfb 255err_fb:
06e78edf 256 kfree(compressed_llb);
d713fd49 257 i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
06e78edf 258err_llb:
d8241785 259 pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
11be49eb
CW
260 return -ENOSPC;
261}
262
5e59f717 263int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp)
11be49eb
CW
264{
265 struct drm_i915_private *dev_priv = dev->dev_private;
266
446f8d81 267 if (!drm_mm_initialized(&dev_priv->mm.stolen))
11be49eb
CW
268 return -ENODEV;
269
cb0a08c1 270 if (size <= dev_priv->fbc.uncompressed_size)
11be49eb
CW
271 return 0;
272
273 /* Release any current block */
274 i915_gem_stolen_cleanup_compression(dev);
275
5e59f717 276 return i915_setup_compression(dev, size, fb_cpp);
9797fbfb
CW
277}
278
11be49eb 279void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
9797fbfb
CW
280{
281 struct drm_i915_private *dev_priv = dev->dev_private;
282
60ee5cd2 283 if (dev_priv->fbc.uncompressed_size == 0)
11be49eb
CW
284 return;
285
d713fd49 286 i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
11be49eb 287
06e78edf 288 if (dev_priv->fbc.compressed_llb) {
d713fd49
PZ
289 i915_gem_stolen_remove_node(dev_priv,
290 dev_priv->fbc.compressed_llb);
06e78edf
DH
291 kfree(dev_priv->fbc.compressed_llb);
292 }
11be49eb 293
60ee5cd2 294 dev_priv->fbc.uncompressed_size = 0;
9797fbfb
CW
295}
296
297void i915_gem_cleanup_stolen(struct drm_device *dev)
298{
4d7bb011
DV
299 struct drm_i915_private *dev_priv = dev->dev_private;
300
446f8d81
DV
301 if (!drm_mm_initialized(&dev_priv->mm.stolen))
302 return;
303
11be49eb 304 i915_gem_stolen_cleanup_compression(dev);
4d7bb011 305 drm_mm_takedown(&dev_priv->mm.stolen);
9797fbfb
CW
306}
307
308int i915_gem_init_stolen(struct drm_device *dev)
309{
310 struct drm_i915_private *dev_priv = dev->dev_private;
40bae736 311 u32 tmp;
c9cddffc 312 int bios_reserved = 0;
9797fbfb 313
0f4706d2 314#ifdef CONFIG_INTEL_IOMMU
fcc9fe1a 315 if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) {
0f4706d2
CW
316 DRM_INFO("DMAR active, disabling use of stolen memory\n");
317 return 0;
318 }
319#endif
320
6644a4e9
CW
321 if (dev_priv->gtt.stolen_size == 0)
322 return 0;
323
e12a2d53
CW
324 dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
325 if (dev_priv->mm.stolen_base == 0)
326 return 0;
327
a54c0c27
BW
328 DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
329 dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
e12a2d53 330
40bae736
DV
331 if (INTEL_INFO(dev)->gen >= 8) {
332 tmp = I915_READ(GEN7_BIOS_RESERVED);
333 tmp >>= GEN8_BIOS_RESERVED_SHIFT;
334 tmp &= GEN8_BIOS_RESERVED_MASK;
335 bios_reserved = (1024*1024) << tmp;
336 } else if (IS_GEN7(dev)) {
337 tmp = I915_READ(GEN7_BIOS_RESERVED);
338 bios_reserved = tmp & GEN7_BIOS_RESERVED_256K ?
339 256*1024 : 1024*1024;
340 }
c9cddffc 341
897f9ed0
DV
342 if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size))
343 return 0;
344
9797fbfb 345 /* Basic memrange allocator for stolen space */
c9cddffc
JB
346 drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size -
347 bios_reserved);
9797fbfb
CW
348
349 return 0;
350}
0104fdbb
CW
351
352static struct sg_table *
353i915_pages_create_for_stolen(struct drm_device *dev,
354 u32 offset, u32 size)
355{
356 struct drm_i915_private *dev_priv = dev->dev_private;
357 struct sg_table *st;
358 struct scatterlist *sg;
359
360 DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
a54c0c27 361 BUG_ON(offset > dev_priv->gtt.stolen_size - size);
0104fdbb
CW
362
363 /* We hide that we have no struct page backing our stolen object
364 * by wrapping the contiguous physical allocation with a fake
365 * dma mapping in a single scatterlist.
366 */
367
368 st = kmalloc(sizeof(*st), GFP_KERNEL);
369 if (st == NULL)
370 return NULL;
371
372 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
373 kfree(st);
374 return NULL;
375 }
376
377 sg = st->sgl;
ec14ba47 378 sg->offset = 0;
ed23abdd 379 sg->length = size;
0104fdbb
CW
380
381 sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
382 sg_dma_len(sg) = size;
383
384 return st;
385}
386
387static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
388{
389 BUG();
390 return -EINVAL;
391}
392
393static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
394{
395 /* Should only be called during free */
396 sg_free_table(obj->pages);
397 kfree(obj->pages);
398}
399
ef0cf27c
CW
400
401static void
402i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
403{
d713fd49
PZ
404 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
405
ef0cf27c 406 if (obj->stolen) {
d713fd49 407 i915_gem_stolen_remove_node(dev_priv, obj->stolen);
ef0cf27c
CW
408 kfree(obj->stolen);
409 obj->stolen = NULL;
410 }
411}
0104fdbb
CW
412static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
413 .get_pages = i915_gem_object_get_pages_stolen,
414 .put_pages = i915_gem_object_put_pages_stolen,
ef0cf27c 415 .release = i915_gem_object_release_stolen,
0104fdbb
CW
416};
417
418static struct drm_i915_gem_object *
419_i915_gem_object_create_stolen(struct drm_device *dev,
420 struct drm_mm_node *stolen)
421{
422 struct drm_i915_gem_object *obj;
423
42dcedd4 424 obj = i915_gem_object_alloc(dev);
0104fdbb
CW
425 if (obj == NULL)
426 return NULL;
427
89c8233f 428 drm_gem_private_object_init(dev, &obj->base, stolen->size);
0104fdbb
CW
429 i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
430
431 obj->pages = i915_pages_create_for_stolen(dev,
432 stolen->start, stolen->size);
433 if (obj->pages == NULL)
434 goto cleanup;
435
436 obj->has_dma_mapping = true;
dd53e1b0 437 i915_gem_object_pin_pages(obj);
0104fdbb
CW
438 obj->stolen = stolen;
439
d46f1c3f
CW
440 obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
441 obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
0104fdbb
CW
442
443 return obj;
444
445cleanup:
42dcedd4 446 i915_gem_object_free(obj);
0104fdbb
CW
447 return NULL;
448}
449
450struct drm_i915_gem_object *
451i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
452{
453 struct drm_i915_private *dev_priv = dev->dev_private;
454 struct drm_i915_gem_object *obj;
455 struct drm_mm_node *stolen;
06e78edf 456 int ret;
0104fdbb 457
446f8d81 458 if (!drm_mm_initialized(&dev_priv->mm.stolen))
0104fdbb
CW
459 return NULL;
460
461 DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
462 if (size == 0)
463 return NULL;
464
06e78edf
DH
465 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
466 if (!stolen)
0104fdbb
CW
467 return NULL;
468
d713fd49 469 ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
06e78edf
DH
470 if (ret) {
471 kfree(stolen);
472 return NULL;
473 }
474
0104fdbb
CW
475 obj = _i915_gem_object_create_stolen(dev, stolen);
476 if (obj)
477 return obj;
478
d713fd49 479 i915_gem_stolen_remove_node(dev_priv, stolen);
06e78edf 480 kfree(stolen);
0104fdbb
CW
481 return NULL;
482}
483
866d12b4
CW
484struct drm_i915_gem_object *
485i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
486 u32 stolen_offset,
487 u32 gtt_offset,
488 u32 size)
489{
490 struct drm_i915_private *dev_priv = dev->dev_private;
40d74980 491 struct i915_address_space *ggtt = &dev_priv->gtt.base;
866d12b4
CW
492 struct drm_i915_gem_object *obj;
493 struct drm_mm_node *stolen;
2f633156 494 struct i915_vma *vma;
b3a070cc 495 int ret;
866d12b4 496
446f8d81 497 if (!drm_mm_initialized(&dev_priv->mm.stolen))
866d12b4
CW
498 return NULL;
499
500 DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
501 stolen_offset, gtt_offset, size);
502
503 /* KISS and expect everything to be page-aligned */
f37b5c2b
DV
504 if (WARN_ON(size == 0) || WARN_ON(size & 4095) ||
505 WARN_ON(stolen_offset & 4095))
866d12b4
CW
506 return NULL;
507
b3a070cc
BW
508 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
509 if (!stolen)
510 return NULL;
511
338710e7
BW
512 stolen->start = stolen_offset;
513 stolen->size = size;
514 ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
b3a070cc 515 if (ret) {
866d12b4 516 DRM_DEBUG_KMS("failed to allocate stolen space\n");
b3a070cc 517 kfree(stolen);
866d12b4
CW
518 return NULL;
519 }
520
521 obj = _i915_gem_object_create_stolen(dev, stolen);
522 if (obj == NULL) {
523 DRM_DEBUG_KMS("failed to allocate stolen object\n");
d713fd49 524 i915_gem_stolen_remove_node(dev_priv, stolen);
06e78edf 525 kfree(stolen);
866d12b4
CW
526 return NULL;
527 }
528
3727d55e 529 /* Some objects just need physical mem from stolen space */
190d6cd5 530 if (gtt_offset == I915_GTT_OFFSET_NONE)
3727d55e
JB
531 return obj;
532
e656a6cb 533 vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
db473b36
DC
534 if (IS_ERR(vma)) {
535 ret = PTR_ERR(vma);
2f633156
BW
536 goto err_out;
537 }
538
866d12b4
CW
539 /* To simplify the initialisation sequence between KMS and GTT,
540 * we allow construction of the stolen object prior to
541 * setting up the GTT space. The actual reservation will occur
542 * later.
543 */
2f633156
BW
544 vma->node.start = gtt_offset;
545 vma->node.size = size;
40d74980
BW
546 if (drm_mm_initialized(&ggtt->mm)) {
547 ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
b3a070cc 548 if (ret) {
866d12b4 549 DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
4a025e26 550 goto err_vma;
866d12b4 551 }
edd41a87 552 }
866d12b4 553
aff43766 554 vma->bound |= GLOBAL_BIND;
866d12b4 555
35c20a60 556 list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
ca191b13 557 list_add_tail(&vma->mm_list, &ggtt->inactive_list);
d8ccba86 558 i915_gem_object_pin_pages(obj);
866d12b4
CW
559
560 return obj;
b3a070cc 561
4a025e26
DV
562err_vma:
563 i915_gem_vma_destroy(vma);
f7f18184 564err_out:
d713fd49 565 i915_gem_stolen_remove_node(dev_priv, stolen);
32c913e4 566 kfree(stolen);
b3a070cc
BW
567 drm_gem_object_unreference(&obj->base);
568 return NULL;
866d12b4 569}