]>
Commit | Line | Data |
---|---|---|
1 | /* i915_dma.c -- DMA support for the I915 -*- linux-c -*- | |
2 | */ | |
3 | /* | |
4 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. | |
5 | * All Rights Reserved. | |
6 | * | |
7 | * Permission is hereby granted, free of charge, to any person obtaining a | |
8 | * copy of this software and associated documentation files (the | |
9 | * "Software"), to deal in the Software without restriction, including | |
10 | * without limitation the rights to use, copy, modify, merge, publish, | |
11 | * distribute, sub license, and/or sell copies of the Software, and to | |
12 | * permit persons to whom the Software is furnished to do so, subject to | |
13 | * the following conditions: | |
14 | * | |
15 | * The above copyright notice and this permission notice (including the | |
16 | * next paragraph) shall be included in all copies or substantial portions | |
17 | * of the Software. | |
18 | * | |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | |
20 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
21 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. | |
22 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR | |
23 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, | |
24 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE | |
25 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |
26 | * | |
27 | */ | |
28 | ||
29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
30 | ||
31 | #include <drm/drmP.h> | |
32 | #include <drm/drm_crtc_helper.h> | |
33 | #include <drm/drm_fb_helper.h> | |
34 | #include "intel_drv.h" | |
35 | #include <drm/i915_drm.h> | |
36 | #include "i915_drv.h" | |
37 | #include "i915_trace.h" | |
38 | #include <linux/pci.h> | |
39 | #include <linux/vgaarb.h> | |
40 | #include <linux/acpi.h> | |
41 | #include <linux/pnp.h> | |
42 | #include <linux/vga_switcheroo.h> | |
43 | #include <linux/slab.h> | |
44 | #include <acpi/video.h> | |
45 | #include <linux/pm.h> | |
46 | #include <linux/pm_runtime.h> | |
47 | ||
48 | #define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS]) | |
49 | ||
50 | #define BEGIN_LP_RING(n) \ | |
51 | intel_ring_begin(LP_RING(dev_priv), (n)) | |
52 | ||
53 | #define OUT_RING(x) \ | |
54 | intel_ring_emit(LP_RING(dev_priv), x) | |
55 | ||
56 | #define ADVANCE_LP_RING() \ | |
57 | __intel_ring_advance(LP_RING(dev_priv)) | |
58 | ||
59 | /** | |
60 | * Lock test for when it's just for synchronization of ring access. | |
61 | * | |
62 | * In that case, we don't need to do it when GEM is initialized as nobody else | |
63 | * has access to the ring. | |
64 | */ | |
65 | #define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \ | |
66 | if (LP_RING(dev->dev_private)->obj == NULL) \ | |
67 | LOCK_TEST_WITH_RETURN(dev, file); \ | |
68 | } while (0) | |
69 | ||
70 | static inline u32 | |
71 | intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg) | |
72 | { | |
73 | if (I915_NEED_GFX_HWS(dev_priv->dev)) | |
74 | return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg); | |
75 | else | |
76 | return intel_read_status_page(LP_RING(dev_priv), reg); | |
77 | } | |
78 | ||
79 | #define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg) | |
80 | #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) | |
81 | #define I915_BREADCRUMB_INDEX 0x21 | |
82 | ||
83 | void i915_update_dri1_breadcrumb(struct drm_device *dev) | |
84 | { | |
85 | drm_i915_private_t *dev_priv = dev->dev_private; | |
86 | struct drm_i915_master_private *master_priv; | |
87 | ||
88 | /* | |
89 | * The dri breadcrumb update races against the drm master disappearing. | |
90 | * Instead of trying to fix this (this is by far not the only ums issue) | |
91 | * just don't do the update in kms mode. | |
92 | */ | |
93 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | |
94 | return; | |
95 | ||
96 | if (dev->primary->master) { | |
97 | master_priv = dev->primary->master->driver_priv; | |
98 | if (master_priv->sarea_priv) | |
99 | master_priv->sarea_priv->last_dispatch = | |
100 | READ_BREADCRUMB(dev_priv); | |
101 | } | |
102 | } | |
103 | ||
104 | static void i915_write_hws_pga(struct drm_device *dev) | |
105 | { | |
106 | drm_i915_private_t *dev_priv = dev->dev_private; | |
107 | u32 addr; | |
108 | ||
109 | addr = dev_priv->status_page_dmah->busaddr; | |
110 | if (INTEL_INFO(dev)->gen >= 4) | |
111 | addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; | |
112 | I915_WRITE(HWS_PGA, addr); | |
113 | } | |
114 | ||
115 | /** | |
116 | * Frees the hardware status page, whether it's a physical address or a virtual | |
117 | * address set up by the X Server. | |
118 | */ | |
119 | static void i915_free_hws(struct drm_device *dev) | |
120 | { | |
121 | drm_i915_private_t *dev_priv = dev->dev_private; | |
122 | struct intel_ring_buffer *ring = LP_RING(dev_priv); | |
123 | ||
124 | if (dev_priv->status_page_dmah) { | |
125 | drm_pci_free(dev, dev_priv->status_page_dmah); | |
126 | dev_priv->status_page_dmah = NULL; | |
127 | } | |
128 | ||
129 | if (ring->status_page.gfx_addr) { | |
130 | ring->status_page.gfx_addr = 0; | |
131 | iounmap(dev_priv->dri1.gfx_hws_cpu_addr); | |
132 | } | |
133 | ||
134 | /* Need to rewrite hardware status page */ | |
135 | I915_WRITE(HWS_PGA, 0x1ffff000); | |
136 | } | |
137 | ||
138 | void i915_kernel_lost_context(struct drm_device * dev) | |
139 | { | |
140 | drm_i915_private_t *dev_priv = dev->dev_private; | |
141 | struct drm_i915_master_private *master_priv; | |
142 | struct intel_ring_buffer *ring = LP_RING(dev_priv); | |
143 | ||
144 | /* | |
145 | * We should never lose context on the ring with modesetting | |
146 | * as we don't expose it to userspace | |
147 | */ | |
148 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | |
149 | return; | |
150 | ||
151 | ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; | |
152 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; | |
153 | ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE); | |
154 | if (ring->space < 0) | |
155 | ring->space += ring->size; | |
156 | ||
157 | if (!dev->primary->master) | |
158 | return; | |
159 | ||
160 | master_priv = dev->primary->master->driver_priv; | |
161 | if (ring->head == ring->tail && master_priv->sarea_priv) | |
162 | master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; | |
163 | } | |
164 | ||
165 | static int i915_dma_cleanup(struct drm_device * dev) | |
166 | { | |
167 | drm_i915_private_t *dev_priv = dev->dev_private; | |
168 | int i; | |
169 | ||
170 | /* Make sure interrupts are disabled here because the uninstall ioctl | |
171 | * may not have been called from userspace and after dev_private | |
172 | * is freed, it's too late. | |
173 | */ | |
174 | if (dev->irq_enabled) | |
175 | drm_irq_uninstall(dev); | |
176 | ||
177 | mutex_lock(&dev->struct_mutex); | |
178 | for (i = 0; i < I915_NUM_RINGS; i++) | |
179 | intel_cleanup_ring_buffer(&dev_priv->ring[i]); | |
180 | mutex_unlock(&dev->struct_mutex); | |
181 | ||
182 | /* Clear the HWS virtual address at teardown */ | |
183 | if (I915_NEED_GFX_HWS(dev)) | |
184 | i915_free_hws(dev); | |
185 | ||
186 | return 0; | |
187 | } | |
188 | ||
189 | static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) | |
190 | { | |
191 | drm_i915_private_t *dev_priv = dev->dev_private; | |
192 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | |
193 | int ret; | |
194 | ||
195 | master_priv->sarea = drm_getsarea(dev); | |
196 | if (master_priv->sarea) { | |
197 | master_priv->sarea_priv = (drm_i915_sarea_t *) | |
198 | ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset); | |
199 | } else { | |
200 | DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n"); | |
201 | } | |
202 | ||
203 | if (init->ring_size != 0) { | |
204 | if (LP_RING(dev_priv)->obj != NULL) { | |
205 | i915_dma_cleanup(dev); | |
206 | DRM_ERROR("Client tried to initialize ringbuffer in " | |
207 | "GEM mode\n"); | |
208 | return -EINVAL; | |
209 | } | |
210 | ||
211 | ret = intel_render_ring_init_dri(dev, | |
212 | init->ring_start, | |
213 | init->ring_size); | |
214 | if (ret) { | |
215 | i915_dma_cleanup(dev); | |
216 | return ret; | |
217 | } | |
218 | } | |
219 | ||
220 | dev_priv->dri1.cpp = init->cpp; | |
221 | dev_priv->dri1.back_offset = init->back_offset; | |
222 | dev_priv->dri1.front_offset = init->front_offset; | |
223 | dev_priv->dri1.current_page = 0; | |
224 | if (master_priv->sarea_priv) | |
225 | master_priv->sarea_priv->pf_current_page = 0; | |
226 | ||
227 | /* Allow hardware batchbuffers unless told otherwise. | |
228 | */ | |
229 | dev_priv->dri1.allow_batchbuffer = 1; | |
230 | ||
231 | return 0; | |
232 | } | |
233 | ||
234 | static int i915_dma_resume(struct drm_device * dev) | |
235 | { | |
236 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
237 | struct intel_ring_buffer *ring = LP_RING(dev_priv); | |
238 | ||
239 | DRM_DEBUG_DRIVER("%s\n", __func__); | |
240 | ||
241 | if (ring->virtual_start == NULL) { | |
242 | DRM_ERROR("can not ioremap virtual address for" | |
243 | " ring buffer\n"); | |
244 | return -ENOMEM; | |
245 | } | |
246 | ||
247 | /* Program Hardware Status Page */ | |
248 | if (!ring->status_page.page_addr) { | |
249 | DRM_ERROR("Can not find hardware status page\n"); | |
250 | return -EINVAL; | |
251 | } | |
252 | DRM_DEBUG_DRIVER("hw status page @ %p\n", | |
253 | ring->status_page.page_addr); | |
254 | if (ring->status_page.gfx_addr != 0) | |
255 | intel_ring_setup_status_page(ring); | |
256 | else | |
257 | i915_write_hws_pga(dev); | |
258 | ||
259 | DRM_DEBUG_DRIVER("Enabled hardware status page\n"); | |
260 | ||
261 | return 0; | |
262 | } | |
263 | ||
264 | static int i915_dma_init(struct drm_device *dev, void *data, | |
265 | struct drm_file *file_priv) | |
266 | { | |
267 | drm_i915_init_t *init = data; | |
268 | int retcode = 0; | |
269 | ||
270 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | |
271 | return -ENODEV; | |
272 | ||
273 | switch (init->func) { | |
274 | case I915_INIT_DMA: | |
275 | retcode = i915_initialize(dev, init); | |
276 | break; | |
277 | case I915_CLEANUP_DMA: | |
278 | retcode = i915_dma_cleanup(dev); | |
279 | break; | |
280 | case I915_RESUME_DMA: | |
281 | retcode = i915_dma_resume(dev); | |
282 | break; | |
283 | default: | |
284 | retcode = -EINVAL; | |
285 | break; | |
286 | } | |
287 | ||
288 | return retcode; | |
289 | } | |
290 | ||
291 | /* Implement basically the same security restrictions as hardware does | |
292 | * for MI_BATCH_NON_SECURE. These can be made stricter at any time. | |
293 | * | |
294 | * Most of the calculations below involve calculating the size of a | |
295 | * particular instruction. It's important to get the size right as | |
296 | * that tells us where the next instruction to check is. Any illegal | |
297 | * instruction detected will be given a size of zero, which is a | |
298 | * signal to abort the rest of the buffer. | |
299 | */ | |
300 | static int validate_cmd(int cmd) | |
301 | { | |
302 | switch (((cmd >> 29) & 0x7)) { | |
303 | case 0x0: | |
304 | switch ((cmd >> 23) & 0x3f) { | |
305 | case 0x0: | |
306 | return 1; /* MI_NOOP */ | |
307 | case 0x4: | |
308 | return 1; /* MI_FLUSH */ | |
309 | default: | |
310 | return 0; /* disallow everything else */ | |
311 | } | |
312 | break; | |
313 | case 0x1: | |
314 | return 0; /* reserved */ | |
315 | case 0x2: | |
316 | return (cmd & 0xff) + 2; /* 2d commands */ | |
317 | case 0x3: | |
318 | if (((cmd >> 24) & 0x1f) <= 0x18) | |
319 | return 1; | |
320 | ||
321 | switch ((cmd >> 24) & 0x1f) { | |
322 | case 0x1c: | |
323 | return 1; | |
324 | case 0x1d: | |
325 | switch ((cmd >> 16) & 0xff) { | |
326 | case 0x3: | |
327 | return (cmd & 0x1f) + 2; | |
328 | case 0x4: | |
329 | return (cmd & 0xf) + 2; | |
330 | default: | |
331 | return (cmd & 0xffff) + 2; | |
332 | } | |
333 | case 0x1e: | |
334 | if (cmd & (1 << 23)) | |
335 | return (cmd & 0xffff) + 1; | |
336 | else | |
337 | return 1; | |
338 | case 0x1f: | |
339 | if ((cmd & (1 << 23)) == 0) /* inline vertices */ | |
340 | return (cmd & 0x1ffff) + 2; | |
341 | else if (cmd & (1 << 17)) /* indirect random */ | |
342 | if ((cmd & 0xffff) == 0) | |
343 | return 0; /* unknown length, too hard */ | |
344 | else | |
345 | return (((cmd & 0xffff) + 1) / 2) + 1; | |
346 | else | |
347 | return 2; /* indirect sequential */ | |
348 | default: | |
349 | return 0; | |
350 | } | |
351 | default: | |
352 | return 0; | |
353 | } | |
354 | ||
355 | return 0; | |
356 | } | |
357 | ||
358 | static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) | |
359 | { | |
360 | drm_i915_private_t *dev_priv = dev->dev_private; | |
361 | int i, ret; | |
362 | ||
363 | if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8) | |
364 | return -EINVAL; | |
365 | ||
366 | for (i = 0; i < dwords;) { | |
367 | int sz = validate_cmd(buffer[i]); | |
368 | if (sz == 0 || i + sz > dwords) | |
369 | return -EINVAL; | |
370 | i += sz; | |
371 | } | |
372 | ||
373 | ret = BEGIN_LP_RING((dwords+1)&~1); | |
374 | if (ret) | |
375 | return ret; | |
376 | ||
377 | for (i = 0; i < dwords; i++) | |
378 | OUT_RING(buffer[i]); | |
379 | if (dwords & 1) | |
380 | OUT_RING(0); | |
381 | ||
382 | ADVANCE_LP_RING(); | |
383 | ||
384 | return 0; | |
385 | } | |
386 | ||
387 | int | |
388 | i915_emit_box(struct drm_device *dev, | |
389 | struct drm_clip_rect *box, | |
390 | int DR1, int DR4) | |
391 | { | |
392 | struct drm_i915_private *dev_priv = dev->dev_private; | |
393 | int ret; | |
394 | ||
395 | if (box->y2 <= box->y1 || box->x2 <= box->x1 || | |
396 | box->y2 <= 0 || box->x2 <= 0) { | |
397 | DRM_ERROR("Bad box %d,%d..%d,%d\n", | |
398 | box->x1, box->y1, box->x2, box->y2); | |
399 | return -EINVAL; | |
400 | } | |
401 | ||
402 | if (INTEL_INFO(dev)->gen >= 4) { | |
403 | ret = BEGIN_LP_RING(4); | |
404 | if (ret) | |
405 | return ret; | |
406 | ||
407 | OUT_RING(GFX_OP_DRAWRECT_INFO_I965); | |
408 | OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); | |
409 | OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); | |
410 | OUT_RING(DR4); | |
411 | } else { | |
412 | ret = BEGIN_LP_RING(6); | |
413 | if (ret) | |
414 | return ret; | |
415 | ||
416 | OUT_RING(GFX_OP_DRAWRECT_INFO); | |
417 | OUT_RING(DR1); | |
418 | OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); | |
419 | OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); | |
420 | OUT_RING(DR4); | |
421 | OUT_RING(0); | |
422 | } | |
423 | ADVANCE_LP_RING(); | |
424 | ||
425 | return 0; | |
426 | } | |
427 | ||
428 | /* XXX: Emitting the counter should really be moved to part of the IRQ | |
429 | * emit. For now, do it in both places: | |
430 | */ | |
431 | ||
432 | static void i915_emit_breadcrumb(struct drm_device *dev) | |
433 | { | |
434 | drm_i915_private_t *dev_priv = dev->dev_private; | |
435 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | |
436 | ||
437 | dev_priv->dri1.counter++; | |
438 | if (dev_priv->dri1.counter > 0x7FFFFFFFUL) | |
439 | dev_priv->dri1.counter = 0; | |
440 | if (master_priv->sarea_priv) | |
441 | master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter; | |
442 | ||
443 | if (BEGIN_LP_RING(4) == 0) { | |
444 | OUT_RING(MI_STORE_DWORD_INDEX); | |
445 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | |
446 | OUT_RING(dev_priv->dri1.counter); | |
447 | OUT_RING(0); | |
448 | ADVANCE_LP_RING(); | |
449 | } | |
450 | } | |
451 | ||
452 | static int i915_dispatch_cmdbuffer(struct drm_device * dev, | |
453 | drm_i915_cmdbuffer_t *cmd, | |
454 | struct drm_clip_rect *cliprects, | |
455 | void *cmdbuf) | |
456 | { | |
457 | int nbox = cmd->num_cliprects; | |
458 | int i = 0, count, ret; | |
459 | ||
460 | if (cmd->sz & 0x3) { | |
461 | DRM_ERROR("alignment"); | |
462 | return -EINVAL; | |
463 | } | |
464 | ||
465 | i915_kernel_lost_context(dev); | |
466 | ||
467 | count = nbox ? nbox : 1; | |
468 | ||
469 | for (i = 0; i < count; i++) { | |
470 | if (i < nbox) { | |
471 | ret = i915_emit_box(dev, &cliprects[i], | |
472 | cmd->DR1, cmd->DR4); | |
473 | if (ret) | |
474 | return ret; | |
475 | } | |
476 | ||
477 | ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4); | |
478 | if (ret) | |
479 | return ret; | |
480 | } | |
481 | ||
482 | i915_emit_breadcrumb(dev); | |
483 | return 0; | |
484 | } | |
485 | ||
486 | static int i915_dispatch_batchbuffer(struct drm_device * dev, | |
487 | drm_i915_batchbuffer_t * batch, | |
488 | struct drm_clip_rect *cliprects) | |
489 | { | |
490 | struct drm_i915_private *dev_priv = dev->dev_private; | |
491 | int nbox = batch->num_cliprects; | |
492 | int i, count, ret; | |
493 | ||
494 | if ((batch->start | batch->used) & 0x7) { | |
495 | DRM_ERROR("alignment"); | |
496 | return -EINVAL; | |
497 | } | |
498 | ||
499 | i915_kernel_lost_context(dev); | |
500 | ||
501 | count = nbox ? nbox : 1; | |
502 | for (i = 0; i < count; i++) { | |
503 | if (i < nbox) { | |
504 | ret = i915_emit_box(dev, &cliprects[i], | |
505 | batch->DR1, batch->DR4); | |
506 | if (ret) | |
507 | return ret; | |
508 | } | |
509 | ||
510 | if (!IS_I830(dev) && !IS_845G(dev)) { | |
511 | ret = BEGIN_LP_RING(2); | |
512 | if (ret) | |
513 | return ret; | |
514 | ||
515 | if (INTEL_INFO(dev)->gen >= 4) { | |
516 | OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); | |
517 | OUT_RING(batch->start); | |
518 | } else { | |
519 | OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); | |
520 | OUT_RING(batch->start | MI_BATCH_NON_SECURE); | |
521 | } | |
522 | } else { | |
523 | ret = BEGIN_LP_RING(4); | |
524 | if (ret) | |
525 | return ret; | |
526 | ||
527 | OUT_RING(MI_BATCH_BUFFER); | |
528 | OUT_RING(batch->start | MI_BATCH_NON_SECURE); | |
529 | OUT_RING(batch->start + batch->used - 4); | |
530 | OUT_RING(0); | |
531 | } | |
532 | ADVANCE_LP_RING(); | |
533 | } | |
534 | ||
535 | ||
536 | if (IS_G4X(dev) || IS_GEN5(dev)) { | |
537 | if (BEGIN_LP_RING(2) == 0) { | |
538 | OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); | |
539 | OUT_RING(MI_NOOP); | |
540 | ADVANCE_LP_RING(); | |
541 | } | |
542 | } | |
543 | ||
544 | i915_emit_breadcrumb(dev); | |
545 | return 0; | |
546 | } | |
547 | ||
548 | static int i915_dispatch_flip(struct drm_device * dev) | |
549 | { | |
550 | drm_i915_private_t *dev_priv = dev->dev_private; | |
551 | struct drm_i915_master_private *master_priv = | |
552 | dev->primary->master->driver_priv; | |
553 | int ret; | |
554 | ||
555 | if (!master_priv->sarea_priv) | |
556 | return -EINVAL; | |
557 | ||
558 | DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n", | |
559 | __func__, | |
560 | dev_priv->dri1.current_page, | |
561 | master_priv->sarea_priv->pf_current_page); | |
562 | ||
563 | i915_kernel_lost_context(dev); | |
564 | ||
565 | ret = BEGIN_LP_RING(10); | |
566 | if (ret) | |
567 | return ret; | |
568 | ||
569 | OUT_RING(MI_FLUSH | MI_READ_FLUSH); | |
570 | OUT_RING(0); | |
571 | ||
572 | OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); | |
573 | OUT_RING(0); | |
574 | if (dev_priv->dri1.current_page == 0) { | |
575 | OUT_RING(dev_priv->dri1.back_offset); | |
576 | dev_priv->dri1.current_page = 1; | |
577 | } else { | |
578 | OUT_RING(dev_priv->dri1.front_offset); | |
579 | dev_priv->dri1.current_page = 0; | |
580 | } | |
581 | OUT_RING(0); | |
582 | ||
583 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); | |
584 | OUT_RING(0); | |
585 | ||
586 | ADVANCE_LP_RING(); | |
587 | ||
588 | master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++; | |
589 | ||
590 | if (BEGIN_LP_RING(4) == 0) { | |
591 | OUT_RING(MI_STORE_DWORD_INDEX); | |
592 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | |
593 | OUT_RING(dev_priv->dri1.counter); | |
594 | OUT_RING(0); | |
595 | ADVANCE_LP_RING(); | |
596 | } | |
597 | ||
598 | master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page; | |
599 | return 0; | |
600 | } | |
601 | ||
602 | static int i915_quiescent(struct drm_device *dev) | |
603 | { | |
604 | i915_kernel_lost_context(dev); | |
605 | return intel_ring_idle(LP_RING(dev->dev_private)); | |
606 | } | |
607 | ||
608 | static int i915_flush_ioctl(struct drm_device *dev, void *data, | |
609 | struct drm_file *file_priv) | |
610 | { | |
611 | int ret; | |
612 | ||
613 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | |
614 | return -ENODEV; | |
615 | ||
616 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); | |
617 | ||
618 | mutex_lock(&dev->struct_mutex); | |
619 | ret = i915_quiescent(dev); | |
620 | mutex_unlock(&dev->struct_mutex); | |
621 | ||
622 | return ret; | |
623 | } | |
624 | ||
625 | static int i915_batchbuffer(struct drm_device *dev, void *data, | |
626 | struct drm_file *file_priv) | |
627 | { | |
628 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
629 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | |
630 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) | |
631 | master_priv->sarea_priv; | |
632 | drm_i915_batchbuffer_t *batch = data; | |
633 | int ret; | |
634 | struct drm_clip_rect *cliprects = NULL; | |
635 | ||
636 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | |
637 | return -ENODEV; | |
638 | ||
639 | if (!dev_priv->dri1.allow_batchbuffer) { | |
640 | DRM_ERROR("Batchbuffer ioctl disabled\n"); | |
641 | return -EINVAL; | |
642 | } | |
643 | ||
644 | DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n", | |
645 | batch->start, batch->used, batch->num_cliprects); | |
646 | ||
647 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); | |
648 | ||
649 | if (batch->num_cliprects < 0) | |
650 | return -EINVAL; | |
651 | ||
652 | if (batch->num_cliprects) { | |
653 | cliprects = kcalloc(batch->num_cliprects, | |
654 | sizeof(*cliprects), | |
655 | GFP_KERNEL); | |
656 | if (cliprects == NULL) | |
657 | return -ENOMEM; | |
658 | ||
659 | ret = copy_from_user(cliprects, batch->cliprects, | |
660 | batch->num_cliprects * | |
661 | sizeof(struct drm_clip_rect)); | |
662 | if (ret != 0) { | |
663 | ret = -EFAULT; | |
664 | goto fail_free; | |
665 | } | |
666 | } | |
667 | ||
668 | mutex_lock(&dev->struct_mutex); | |
669 | ret = i915_dispatch_batchbuffer(dev, batch, cliprects); | |
670 | mutex_unlock(&dev->struct_mutex); | |
671 | ||
672 | if (sarea_priv) | |
673 | sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); | |
674 | ||
675 | fail_free: | |
676 | kfree(cliprects); | |
677 | ||
678 | return ret; | |
679 | } | |
680 | ||
681 | static int i915_cmdbuffer(struct drm_device *dev, void *data, | |
682 | struct drm_file *file_priv) | |
683 | { | |
684 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
685 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | |
686 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) | |
687 | master_priv->sarea_priv; | |
688 | drm_i915_cmdbuffer_t *cmdbuf = data; | |
689 | struct drm_clip_rect *cliprects = NULL; | |
690 | void *batch_data; | |
691 | int ret; | |
692 | ||
693 | DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n", | |
694 | cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); | |
695 | ||
696 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | |
697 | return -ENODEV; | |
698 | ||
699 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); | |
700 | ||
701 | if (cmdbuf->num_cliprects < 0) | |
702 | return -EINVAL; | |
703 | ||
704 | batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL); | |
705 | if (batch_data == NULL) | |
706 | return -ENOMEM; | |
707 | ||
708 | ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz); | |
709 | if (ret != 0) { | |
710 | ret = -EFAULT; | |
711 | goto fail_batch_free; | |
712 | } | |
713 | ||
714 | if (cmdbuf->num_cliprects) { | |
715 | cliprects = kcalloc(cmdbuf->num_cliprects, | |
716 | sizeof(*cliprects), GFP_KERNEL); | |
717 | if (cliprects == NULL) { | |
718 | ret = -ENOMEM; | |
719 | goto fail_batch_free; | |
720 | } | |
721 | ||
722 | ret = copy_from_user(cliprects, cmdbuf->cliprects, | |
723 | cmdbuf->num_cliprects * | |
724 | sizeof(struct drm_clip_rect)); | |
725 | if (ret != 0) { | |
726 | ret = -EFAULT; | |
727 | goto fail_clip_free; | |
728 | } | |
729 | } | |
730 | ||
731 | mutex_lock(&dev->struct_mutex); | |
732 | ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data); | |
733 | mutex_unlock(&dev->struct_mutex); | |
734 | if (ret) { | |
735 | DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); | |
736 | goto fail_clip_free; | |
737 | } | |
738 | ||
739 | if (sarea_priv) | |
740 | sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); | |
741 | ||
742 | fail_clip_free: | |
743 | kfree(cliprects); | |
744 | fail_batch_free: | |
745 | kfree(batch_data); | |
746 | ||
747 | return ret; | |
748 | } | |
749 | ||
750 | static int i915_emit_irq(struct drm_device * dev) | |
751 | { | |
752 | drm_i915_private_t *dev_priv = dev->dev_private; | |
753 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | |
754 | ||
755 | i915_kernel_lost_context(dev); | |
756 | ||
757 | DRM_DEBUG_DRIVER("\n"); | |
758 | ||
759 | dev_priv->dri1.counter++; | |
760 | if (dev_priv->dri1.counter > 0x7FFFFFFFUL) | |
761 | dev_priv->dri1.counter = 1; | |
762 | if (master_priv->sarea_priv) | |
763 | master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter; | |
764 | ||
765 | if (BEGIN_LP_RING(4) == 0) { | |
766 | OUT_RING(MI_STORE_DWORD_INDEX); | |
767 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | |
768 | OUT_RING(dev_priv->dri1.counter); | |
769 | OUT_RING(MI_USER_INTERRUPT); | |
770 | ADVANCE_LP_RING(); | |
771 | } | |
772 | ||
773 | return dev_priv->dri1.counter; | |
774 | } | |
775 | ||
776 | static int i915_wait_irq(struct drm_device * dev, int irq_nr) | |
777 | { | |
778 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
779 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | |
780 | int ret = 0; | |
781 | struct intel_ring_buffer *ring = LP_RING(dev_priv); | |
782 | ||
783 | DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, | |
784 | READ_BREADCRUMB(dev_priv)); | |
785 | ||
786 | if (READ_BREADCRUMB(dev_priv) >= irq_nr) { | |
787 | if (master_priv->sarea_priv) | |
788 | master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); | |
789 | return 0; | |
790 | } | |
791 | ||
792 | if (master_priv->sarea_priv) | |
793 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; | |
794 | ||
795 | if (ring->irq_get(ring)) { | |
796 | DRM_WAIT_ON(ret, ring->irq_queue, 3 * HZ, | |
797 | READ_BREADCRUMB(dev_priv) >= irq_nr); | |
798 | ring->irq_put(ring); | |
799 | } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000)) | |
800 | ret = -EBUSY; | |
801 | ||
802 | if (ret == -EBUSY) { | |
803 | DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", | |
804 | READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter); | |
805 | } | |
806 | ||
807 | return ret; | |
808 | } | |
809 | ||
810 | /* Needs the lock as it touches the ring. | |
811 | */ | |
812 | static int i915_irq_emit(struct drm_device *dev, void *data, | |
813 | struct drm_file *file_priv) | |
814 | { | |
815 | drm_i915_private_t *dev_priv = dev->dev_private; | |
816 | drm_i915_irq_emit_t *emit = data; | |
817 | int result; | |
818 | ||
819 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | |
820 | return -ENODEV; | |
821 | ||
822 | if (!dev_priv || !LP_RING(dev_priv)->virtual_start) { | |
823 | DRM_ERROR("called with no initialization\n"); | |
824 | return -EINVAL; | |
825 | } | |
826 | ||
827 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); | |
828 | ||
829 | mutex_lock(&dev->struct_mutex); | |
830 | result = i915_emit_irq(dev); | |
831 | mutex_unlock(&dev->struct_mutex); | |
832 | ||
833 | if (copy_to_user(emit->irq_seq, &result, sizeof(int))) { | |
834 | DRM_ERROR("copy_to_user\n"); | |
835 | return -EFAULT; | |
836 | } | |
837 | ||
838 | return 0; | |
839 | } | |
840 | ||
841 | /* Doesn't need the hardware lock. | |
842 | */ | |
843 | static int i915_irq_wait(struct drm_device *dev, void *data, | |
844 | struct drm_file *file_priv) | |
845 | { | |
846 | drm_i915_private_t *dev_priv = dev->dev_private; | |
847 | drm_i915_irq_wait_t *irqwait = data; | |
848 | ||
849 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | |
850 | return -ENODEV; | |
851 | ||
852 | if (!dev_priv) { | |
853 | DRM_ERROR("called with no initialization\n"); | |
854 | return -EINVAL; | |
855 | } | |
856 | ||
857 | return i915_wait_irq(dev, irqwait->irq_seq); | |
858 | } | |
859 | ||
860 | static int i915_vblank_pipe_get(struct drm_device *dev, void *data, | |
861 | struct drm_file *file_priv) | |
862 | { | |
863 | drm_i915_private_t *dev_priv = dev->dev_private; | |
864 | drm_i915_vblank_pipe_t *pipe = data; | |
865 | ||
866 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | |
867 | return -ENODEV; | |
868 | ||
869 | if (!dev_priv) { | |
870 | DRM_ERROR("called with no initialization\n"); | |
871 | return -EINVAL; | |
872 | } | |
873 | ||
874 | pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; | |
875 | ||
876 | return 0; | |
877 | } | |
878 | ||
879 | /** | |
880 | * Schedule buffer swap at given vertical blank. | |
881 | */ | |
882 | static int i915_vblank_swap(struct drm_device *dev, void *data, | |
883 | struct drm_file *file_priv) | |
884 | { | |
885 | /* The delayed swap mechanism was fundamentally racy, and has been | |
886 | * removed. The model was that the client requested a delayed flip/swap | |
887 | * from the kernel, then waited for vblank before continuing to perform | |
888 | * rendering. The problem was that the kernel might wake the client | |
889 | * up before it dispatched the vblank swap (since the lock has to be | |
890 | * held while touching the ringbuffer), in which case the client would | |
891 | * clear and start the next frame before the swap occurred, and | |
892 | * flicker would occur in addition to likely missing the vblank. | |
893 | * | |
894 | * In the absence of this ioctl, userland falls back to a correct path | |
895 | * of waiting for a vblank, then dispatching the swap on its own. | |
896 | * Context switching to userland and back is plenty fast enough for | |
897 | * meeting the requirements of vblank swapping. | |
898 | */ | |
899 | return -EINVAL; | |
900 | } | |
901 | ||
902 | static int i915_flip_bufs(struct drm_device *dev, void *data, | |
903 | struct drm_file *file_priv) | |
904 | { | |
905 | int ret; | |
906 | ||
907 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | |
908 | return -ENODEV; | |
909 | ||
910 | DRM_DEBUG_DRIVER("%s\n", __func__); | |
911 | ||
912 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); | |
913 | ||
914 | mutex_lock(&dev->struct_mutex); | |
915 | ret = i915_dispatch_flip(dev); | |
916 | mutex_unlock(&dev->struct_mutex); | |
917 | ||
918 | return ret; | |
919 | } | |
920 | ||
921 | static int i915_getparam(struct drm_device *dev, void *data, | |
922 | struct drm_file *file_priv) | |
923 | { | |
924 | drm_i915_private_t *dev_priv = dev->dev_private; | |
925 | drm_i915_getparam_t *param = data; | |
926 | int value; | |
927 | ||
928 | if (!dev_priv) { | |
929 | DRM_ERROR("called with no initialization\n"); | |
930 | return -EINVAL; | |
931 | } | |
932 | ||
933 | switch (param->param) { | |
934 | case I915_PARAM_IRQ_ACTIVE: | |
935 | value = dev->pdev->irq ? 1 : 0; | |
936 | break; | |
937 | case I915_PARAM_ALLOW_BATCHBUFFER: | |
938 | value = dev_priv->dri1.allow_batchbuffer ? 1 : 0; | |
939 | break; | |
940 | case I915_PARAM_LAST_DISPATCH: | |
941 | value = READ_BREADCRUMB(dev_priv); | |
942 | break; | |
943 | case I915_PARAM_CHIPSET_ID: | |
944 | value = dev->pdev->device; | |
945 | break; | |
946 | case I915_PARAM_HAS_GEM: | |
947 | value = 1; | |
948 | break; | |
949 | case I915_PARAM_NUM_FENCES_AVAIL: | |
950 | value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; | |
951 | break; | |
952 | case I915_PARAM_HAS_OVERLAY: | |
953 | value = dev_priv->overlay ? 1 : 0; | |
954 | break; | |
955 | case I915_PARAM_HAS_PAGEFLIPPING: | |
956 | value = 1; | |
957 | break; | |
958 | case I915_PARAM_HAS_EXECBUF2: | |
959 | /* depends on GEM */ | |
960 | value = 1; | |
961 | break; | |
962 | case I915_PARAM_HAS_BSD: | |
963 | value = intel_ring_initialized(&dev_priv->ring[VCS]); | |
964 | break; | |
965 | case I915_PARAM_HAS_BLT: | |
966 | value = intel_ring_initialized(&dev_priv->ring[BCS]); | |
967 | break; | |
968 | case I915_PARAM_HAS_VEBOX: | |
969 | value = intel_ring_initialized(&dev_priv->ring[VECS]); | |
970 | break; | |
971 | case I915_PARAM_HAS_RELAXED_FENCING: | |
972 | value = 1; | |
973 | break; | |
974 | case I915_PARAM_HAS_COHERENT_RINGS: | |
975 | value = 1; | |
976 | break; | |
977 | case I915_PARAM_HAS_EXEC_CONSTANTS: | |
978 | value = INTEL_INFO(dev)->gen >= 4; | |
979 | break; | |
980 | case I915_PARAM_HAS_RELAXED_DELTA: | |
981 | value = 1; | |
982 | break; | |
983 | case I915_PARAM_HAS_GEN7_SOL_RESET: | |
984 | value = 1; | |
985 | break; | |
986 | case I915_PARAM_HAS_LLC: | |
987 | value = HAS_LLC(dev); | |
988 | break; | |
989 | case I915_PARAM_HAS_WT: | |
990 | value = HAS_WT(dev); | |
991 | break; | |
992 | case I915_PARAM_HAS_ALIASING_PPGTT: | |
993 | value = dev_priv->mm.aliasing_ppgtt || USES_FULL_PPGTT(dev); | |
994 | break; | |
995 | case I915_PARAM_HAS_WAIT_TIMEOUT: | |
996 | value = 1; | |
997 | break; | |
998 | case I915_PARAM_HAS_SEMAPHORES: | |
999 | value = i915_semaphore_is_enabled(dev); | |
1000 | break; | |
1001 | case I915_PARAM_HAS_PRIME_VMAP_FLUSH: | |
1002 | value = 1; | |
1003 | break; | |
1004 | case I915_PARAM_HAS_SECURE_BATCHES: | |
1005 | value = capable(CAP_SYS_ADMIN); | |
1006 | break; | |
1007 | case I915_PARAM_HAS_PINNED_BATCHES: | |
1008 | value = 1; | |
1009 | break; | |
1010 | case I915_PARAM_HAS_EXEC_NO_RELOC: | |
1011 | value = 1; | |
1012 | break; | |
1013 | case I915_PARAM_HAS_EXEC_HANDLE_LUT: | |
1014 | value = 1; | |
1015 | break; | |
1016 | default: | |
1017 | DRM_DEBUG("Unknown parameter %d\n", param->param); | |
1018 | return -EINVAL; | |
1019 | } | |
1020 | ||
1021 | if (copy_to_user(param->value, &value, sizeof(int))) { | |
1022 | DRM_ERROR("copy_to_user failed\n"); | |
1023 | return -EFAULT; | |
1024 | } | |
1025 | ||
1026 | return 0; | |
1027 | } | |
1028 | ||
1029 | static int i915_setparam(struct drm_device *dev, void *data, | |
1030 | struct drm_file *file_priv) | |
1031 | { | |
1032 | drm_i915_private_t *dev_priv = dev->dev_private; | |
1033 | drm_i915_setparam_t *param = data; | |
1034 | ||
1035 | if (!dev_priv) { | |
1036 | DRM_ERROR("called with no initialization\n"); | |
1037 | return -EINVAL; | |
1038 | } | |
1039 | ||
1040 | switch (param->param) { | |
1041 | case I915_SETPARAM_USE_MI_BATCHBUFFER_START: | |
1042 | break; | |
1043 | case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: | |
1044 | break; | |
1045 | case I915_SETPARAM_ALLOW_BATCHBUFFER: | |
1046 | dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0; | |
1047 | break; | |
1048 | case I915_SETPARAM_NUM_USED_FENCES: | |
1049 | if (param->value > dev_priv->num_fence_regs || | |
1050 | param->value < 0) | |
1051 | return -EINVAL; | |
1052 | /* Userspace can use first N regs */ | |
1053 | dev_priv->fence_reg_start = param->value; | |
1054 | break; | |
1055 | default: | |
1056 | DRM_DEBUG_DRIVER("unknown parameter %d\n", | |
1057 | param->param); | |
1058 | return -EINVAL; | |
1059 | } | |
1060 | ||
1061 | return 0; | |
1062 | } | |
1063 | ||
1064 | static int i915_set_status_page(struct drm_device *dev, void *data, | |
1065 | struct drm_file *file_priv) | |
1066 | { | |
1067 | drm_i915_private_t *dev_priv = dev->dev_private; | |
1068 | drm_i915_hws_addr_t *hws = data; | |
1069 | struct intel_ring_buffer *ring; | |
1070 | ||
1071 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | |
1072 | return -ENODEV; | |
1073 | ||
1074 | if (!I915_NEED_GFX_HWS(dev)) | |
1075 | return -EINVAL; | |
1076 | ||
1077 | if (!dev_priv) { | |
1078 | DRM_ERROR("called with no initialization\n"); | |
1079 | return -EINVAL; | |
1080 | } | |
1081 | ||
1082 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | |
1083 | WARN(1, "tried to set status page when mode setting active\n"); | |
1084 | return 0; | |
1085 | } | |
1086 | ||
1087 | DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); | |
1088 | ||
1089 | ring = LP_RING(dev_priv); | |
1090 | ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); | |
1091 | ||
1092 | dev_priv->dri1.gfx_hws_cpu_addr = | |
1093 | ioremap_wc(dev_priv->gtt.mappable_base + hws->addr, 4096); | |
1094 | if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) { | |
1095 | i915_dma_cleanup(dev); | |
1096 | ring->status_page.gfx_addr = 0; | |
1097 | DRM_ERROR("can not ioremap virtual address for" | |
1098 | " G33 hw status page\n"); | |
1099 | return -ENOMEM; | |
1100 | } | |
1101 | ||
1102 | memset_io(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE); | |
1103 | I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); | |
1104 | ||
1105 | DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", | |
1106 | ring->status_page.gfx_addr); | |
1107 | DRM_DEBUG_DRIVER("load hws at %p\n", | |
1108 | ring->status_page.page_addr); | |
1109 | return 0; | |
1110 | } | |
1111 | ||
1112 | static int i915_get_bridge_dev(struct drm_device *dev) | |
1113 | { | |
1114 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1115 | ||
1116 | dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); | |
1117 | if (!dev_priv->bridge_dev) { | |
1118 | DRM_ERROR("bridge device not found\n"); | |
1119 | return -1; | |
1120 | } | |
1121 | return 0; | |
1122 | } | |
1123 | ||
1124 | #define MCHBAR_I915 0x44 | |
1125 | #define MCHBAR_I965 0x48 | |
1126 | #define MCHBAR_SIZE (4*4096) | |
1127 | ||
1128 | #define DEVEN_REG 0x54 | |
1129 | #define DEVEN_MCHBAR_EN (1 << 28) | |
1130 | ||
1131 | /* Allocate space for the MCH regs if needed, return nonzero on error */ | |
1132 | static int | |
1133 | intel_alloc_mchbar_resource(struct drm_device *dev) | |
1134 | { | |
1135 | drm_i915_private_t *dev_priv = dev->dev_private; | |
1136 | int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; | |
1137 | u32 temp_lo, temp_hi = 0; | |
1138 | u64 mchbar_addr; | |
1139 | int ret; | |
1140 | ||
1141 | if (INTEL_INFO(dev)->gen >= 4) | |
1142 | pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); | |
1143 | pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); | |
1144 | mchbar_addr = ((u64)temp_hi << 32) | temp_lo; | |
1145 | ||
1146 | /* If ACPI doesn't have it, assume we need to allocate it ourselves */ | |
1147 | #ifdef CONFIG_PNP | |
1148 | if (mchbar_addr && | |
1149 | pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) | |
1150 | return 0; | |
1151 | #endif | |
1152 | ||
1153 | /* Get some space for it */ | |
1154 | dev_priv->mch_res.name = "i915 MCHBAR"; | |
1155 | dev_priv->mch_res.flags = IORESOURCE_MEM; | |
1156 | ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, | |
1157 | &dev_priv->mch_res, | |
1158 | MCHBAR_SIZE, MCHBAR_SIZE, | |
1159 | PCIBIOS_MIN_MEM, | |
1160 | 0, pcibios_align_resource, | |
1161 | dev_priv->bridge_dev); | |
1162 | if (ret) { | |
1163 | DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); | |
1164 | dev_priv->mch_res.start = 0; | |
1165 | return ret; | |
1166 | } | |
1167 | ||
1168 | if (INTEL_INFO(dev)->gen >= 4) | |
1169 | pci_write_config_dword(dev_priv->bridge_dev, reg + 4, | |
1170 | upper_32_bits(dev_priv->mch_res.start)); | |
1171 | ||
1172 | pci_write_config_dword(dev_priv->bridge_dev, reg, | |
1173 | lower_32_bits(dev_priv->mch_res.start)); | |
1174 | return 0; | |
1175 | } | |
1176 | ||
1177 | /* Setup MCHBAR if possible, return true if we should disable it again */ | |
1178 | static void | |
1179 | intel_setup_mchbar(struct drm_device *dev) | |
1180 | { | |
1181 | drm_i915_private_t *dev_priv = dev->dev_private; | |
1182 | int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; | |
1183 | u32 temp; | |
1184 | bool enabled; | |
1185 | ||
1186 | dev_priv->mchbar_need_disable = false; | |
1187 | ||
1188 | if (IS_I915G(dev) || IS_I915GM(dev)) { | |
1189 | pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); | |
1190 | enabled = !!(temp & DEVEN_MCHBAR_EN); | |
1191 | } else { | |
1192 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); | |
1193 | enabled = temp & 1; | |
1194 | } | |
1195 | ||
1196 | /* If it's already enabled, don't have to do anything */ | |
1197 | if (enabled) | |
1198 | return; | |
1199 | ||
1200 | if (intel_alloc_mchbar_resource(dev)) | |
1201 | return; | |
1202 | ||
1203 | dev_priv->mchbar_need_disable = true; | |
1204 | ||
1205 | /* Space is allocated or reserved, so enable it. */ | |
1206 | if (IS_I915G(dev) || IS_I915GM(dev)) { | |
1207 | pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, | |
1208 | temp | DEVEN_MCHBAR_EN); | |
1209 | } else { | |
1210 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); | |
1211 | pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); | |
1212 | } | |
1213 | } | |
1214 | ||
1215 | static void | |
1216 | intel_teardown_mchbar(struct drm_device *dev) | |
1217 | { | |
1218 | drm_i915_private_t *dev_priv = dev->dev_private; | |
1219 | int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; | |
1220 | u32 temp; | |
1221 | ||
1222 | if (dev_priv->mchbar_need_disable) { | |
1223 | if (IS_I915G(dev) || IS_I915GM(dev)) { | |
1224 | pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); | |
1225 | temp &= ~DEVEN_MCHBAR_EN; | |
1226 | pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp); | |
1227 | } else { | |
1228 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); | |
1229 | temp &= ~1; | |
1230 | pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp); | |
1231 | } | |
1232 | } | |
1233 | ||
1234 | if (dev_priv->mch_res.start) | |
1235 | release_resource(&dev_priv->mch_res); | |
1236 | } | |
1237 | ||
1238 | /* true = enable decode, false = disable decoder */ | |
1239 | static unsigned int i915_vga_set_decode(void *cookie, bool state) | |
1240 | { | |
1241 | struct drm_device *dev = cookie; | |
1242 | ||
1243 | intel_modeset_vga_set_state(dev, state); | |
1244 | if (state) | |
1245 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | | |
1246 | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | |
1247 | else | |
1248 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | |
1249 | } | |
1250 | ||
1251 | static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) | |
1252 | { | |
1253 | struct drm_device *dev = pci_get_drvdata(pdev); | |
1254 | pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; | |
1255 | if (state == VGA_SWITCHEROO_ON) { | |
1256 | pr_info("switched on\n"); | |
1257 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | |
1258 | /* i915 resume handler doesn't set to D0 */ | |
1259 | pci_set_power_state(dev->pdev, PCI_D0); | |
1260 | i915_resume(dev); | |
1261 | dev->switch_power_state = DRM_SWITCH_POWER_ON; | |
1262 | } else { | |
1263 | pr_err("switched off\n"); | |
1264 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | |
1265 | i915_suspend(dev, pmm); | |
1266 | dev->switch_power_state = DRM_SWITCH_POWER_OFF; | |
1267 | } | |
1268 | } | |
1269 | ||
1270 | static bool i915_switcheroo_can_switch(struct pci_dev *pdev) | |
1271 | { | |
1272 | struct drm_device *dev = pci_get_drvdata(pdev); | |
1273 | bool can_switch; | |
1274 | ||
1275 | spin_lock(&dev->count_lock); | |
1276 | can_switch = (dev->open_count == 0); | |
1277 | spin_unlock(&dev->count_lock); | |
1278 | return can_switch; | |
1279 | } | |
1280 | ||
1281 | static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { | |
1282 | .set_gpu_state = i915_switcheroo_set_state, | |
1283 | .reprobe = NULL, | |
1284 | .can_switch = i915_switcheroo_can_switch, | |
1285 | }; | |
1286 | ||
1287 | static int i915_load_modeset_init(struct drm_device *dev) | |
1288 | { | |
1289 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1290 | int ret; | |
1291 | ||
1292 | ret = intel_parse_bios(dev); | |
1293 | if (ret) | |
1294 | DRM_INFO("failed to find VBIOS tables\n"); | |
1295 | ||
1296 | /* If we have > 1 VGA cards, then we need to arbitrate access | |
1297 | * to the common VGA resources. | |
1298 | * | |
1299 | * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA), | |
1300 | * then we do not take part in VGA arbitration and the | |
1301 | * vga_client_register() fails with -ENODEV. | |
1302 | */ | |
1303 | ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); | |
1304 | if (ret && ret != -ENODEV) | |
1305 | goto out; | |
1306 | ||
1307 | intel_register_dsm_handler(); | |
1308 | ||
1309 | ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false); | |
1310 | if (ret) | |
1311 | goto cleanup_vga_client; | |
1312 | ||
1313 | /* Initialise stolen first so that we may reserve preallocated | |
1314 | * objects for the BIOS to KMS transition. | |
1315 | */ | |
1316 | ret = i915_gem_init_stolen(dev); | |
1317 | if (ret) | |
1318 | goto cleanup_vga_switcheroo; | |
1319 | ||
1320 | ret = drm_irq_install(dev); | |
1321 | if (ret) | |
1322 | goto cleanup_gem_stolen; | |
1323 | ||
1324 | intel_power_domains_init_hw(dev); | |
1325 | ||
1326 | /* Important: The output setup functions called by modeset_init need | |
1327 | * working irqs for e.g. gmbus and dp aux transfers. */ | |
1328 | intel_modeset_init(dev); | |
1329 | ||
1330 | ret = i915_gem_init(dev); | |
1331 | if (ret) | |
1332 | goto cleanup_power; | |
1333 | ||
1334 | INIT_WORK(&dev_priv->console_resume_work, intel_console_resume); | |
1335 | ||
1336 | intel_modeset_gem_init(dev); | |
1337 | ||
1338 | /* Always safe in the mode setting case. */ | |
1339 | /* FIXME: do pre/post-mode set stuff in core KMS code */ | |
1340 | dev->vblank_disable_allowed = true; | |
1341 | if (INTEL_INFO(dev)->num_pipes == 0) { | |
1342 | intel_display_power_put(dev, POWER_DOMAIN_VGA); | |
1343 | return 0; | |
1344 | } | |
1345 | ||
1346 | ret = intel_fbdev_init(dev); | |
1347 | if (ret) | |
1348 | goto cleanup_gem; | |
1349 | ||
1350 | /* Only enable hotplug handling once the fbdev is fully set up. */ | |
1351 | intel_hpd_init(dev); | |
1352 | ||
1353 | /* | |
1354 | * Some ports require correctly set-up hpd registers for detection to | |
1355 | * work properly (leading to ghost connected connector status), e.g. VGA | |
1356 | * on gm45. Hence we can only set up the initial fbdev config after hpd | |
1357 | * irqs are fully enabled. Now we should scan for the initial config | |
1358 | * only once hotplug handling is enabled, but due to screwed-up locking | |
1359 | * around kms/fbdev init we can't protect the fdbev initial config | |
1360 | * scanning against hotplug events. Hence do this first and ignore the | |
1361 | * tiny window where we will loose hotplug notifactions. | |
1362 | */ | |
1363 | intel_fbdev_initial_config(dev); | |
1364 | ||
1365 | /* Only enable hotplug handling once the fbdev is fully set up. */ | |
1366 | dev_priv->enable_hotplug_processing = true; | |
1367 | ||
1368 | drm_kms_helper_poll_init(dev); | |
1369 | ||
1370 | return 0; | |
1371 | ||
1372 | cleanup_gem: | |
1373 | mutex_lock(&dev->struct_mutex); | |
1374 | i915_gem_cleanup_ringbuffer(dev); | |
1375 | i915_gem_context_fini(dev); | |
1376 | mutex_unlock(&dev->struct_mutex); | |
1377 | WARN_ON(dev_priv->mm.aliasing_ppgtt); | |
1378 | drm_mm_takedown(&dev_priv->gtt.base.mm); | |
1379 | cleanup_power: | |
1380 | intel_display_power_put(dev, POWER_DOMAIN_VGA); | |
1381 | drm_irq_uninstall(dev); | |
1382 | cleanup_gem_stolen: | |
1383 | i915_gem_cleanup_stolen(dev); | |
1384 | cleanup_vga_switcheroo: | |
1385 | vga_switcheroo_unregister_client(dev->pdev); | |
1386 | cleanup_vga_client: | |
1387 | vga_client_register(dev->pdev, NULL, NULL, NULL); | |
1388 | out: | |
1389 | return ret; | |
1390 | } | |
1391 | ||
1392 | int i915_master_create(struct drm_device *dev, struct drm_master *master) | |
1393 | { | |
1394 | struct drm_i915_master_private *master_priv; | |
1395 | ||
1396 | master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL); | |
1397 | if (!master_priv) | |
1398 | return -ENOMEM; | |
1399 | ||
1400 | master->driver_priv = master_priv; | |
1401 | return 0; | |
1402 | } | |
1403 | ||
1404 | void i915_master_destroy(struct drm_device *dev, struct drm_master *master) | |
1405 | { | |
1406 | struct drm_i915_master_private *master_priv = master->driver_priv; | |
1407 | ||
1408 | if (!master_priv) | |
1409 | return; | |
1410 | ||
1411 | kfree(master_priv); | |
1412 | ||
1413 | master->driver_priv = NULL; | |
1414 | } | |
1415 | ||
1416 | #if IS_ENABLED(CONFIG_FB) | |
1417 | static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) | |
1418 | { | |
1419 | struct apertures_struct *ap; | |
1420 | struct pci_dev *pdev = dev_priv->dev->pdev; | |
1421 | bool primary; | |
1422 | ||
1423 | ap = alloc_apertures(1); | |
1424 | if (!ap) | |
1425 | return; | |
1426 | ||
1427 | ap->ranges[0].base = dev_priv->gtt.mappable_base; | |
1428 | ap->ranges[0].size = dev_priv->gtt.mappable_end; | |
1429 | ||
1430 | primary = | |
1431 | pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; | |
1432 | ||
1433 | remove_conflicting_framebuffers(ap, "inteldrmfb", primary); | |
1434 | ||
1435 | kfree(ap); | |
1436 | } | |
1437 | #else | |
1438 | static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) | |
1439 | { | |
1440 | } | |
1441 | #endif | |
1442 | ||
1443 | static void i915_dump_device_info(struct drm_i915_private *dev_priv) | |
1444 | { | |
1445 | const struct intel_device_info *info = &dev_priv->info; | |
1446 | ||
1447 | #define PRINT_S(name) "%s" | |
1448 | #define SEP_EMPTY | |
1449 | #define PRINT_FLAG(name) info->name ? #name "," : "" | |
1450 | #define SEP_COMMA , | |
1451 | DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags=" | |
1452 | DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY), | |
1453 | info->gen, | |
1454 | dev_priv->dev->pdev->device, | |
1455 | DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA)); | |
1456 | #undef PRINT_S | |
1457 | #undef SEP_EMPTY | |
1458 | #undef PRINT_FLAG | |
1459 | #undef SEP_COMMA | |
1460 | } | |
1461 | ||
1462 | /* | |
1463 | * Determine various intel_device_info fields at runtime. | |
1464 | * | |
1465 | * Use it when either: | |
1466 | * - it's judged too laborious to fill n static structures with the limit | |
1467 | * when a simple if statement does the job, | |
1468 | * - run-time checks (eg read fuse/strap registers) are needed. | |
1469 | * | |
1470 | * This function needs to be called: | |
1471 | * - after the MMIO has been setup as we are reading registers, | |
1472 | * - after the PCH has been detected, | |
1473 | * - before the first usage of the fields it can tweak. | |
1474 | */ | |
1475 | static void intel_device_info_runtime_init(struct drm_device *dev) | |
1476 | { | |
1477 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1478 | struct intel_device_info *info; | |
1479 | ||
1480 | info = (struct intel_device_info *)&dev_priv->info; | |
1481 | ||
1482 | info->num_sprites = 1; | |
1483 | if (IS_VALLEYVIEW(dev)) | |
1484 | info->num_sprites = 2; | |
1485 | ||
1486 | if (i915.disable_display) { | |
1487 | DRM_INFO("Display disabled (module parameter)\n"); | |
1488 | info->num_pipes = 0; | |
1489 | } else if (info->num_pipes > 0 && | |
1490 | (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) && | |
1491 | !IS_VALLEYVIEW(dev)) { | |
1492 | u32 fuse_strap = I915_READ(FUSE_STRAP); | |
1493 | u32 sfuse_strap = I915_READ(SFUSE_STRAP); | |
1494 | ||
1495 | /* | |
1496 | * SFUSE_STRAP is supposed to have a bit signalling the display | |
1497 | * is fused off. Unfortunately it seems that, at least in | |
1498 | * certain cases, fused off display means that PCH display | |
1499 | * reads don't land anywhere. In that case, we read 0s. | |
1500 | * | |
1501 | * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK | |
1502 | * should be set when taking over after the firmware. | |
1503 | */ | |
1504 | if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE || | |
1505 | sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED || | |
1506 | (dev_priv->pch_type == PCH_CPT && | |
1507 | !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) { | |
1508 | DRM_INFO("Display fused off, disabling\n"); | |
1509 | info->num_pipes = 0; | |
1510 | } | |
1511 | } | |
1512 | } | |
1513 | ||
1514 | /** | |
1515 | * i915_driver_load - setup chip and create an initial config | |
1516 | * @dev: DRM device | |
1517 | * @flags: startup flags | |
1518 | * | |
1519 | * The driver load routine has to do several things: | |
1520 | * - drive output discovery via intel_modeset_init() | |
1521 | * - initialize the memory manager | |
1522 | * - allocate initial config memory | |
1523 | * - setup the DRM framebuffer with the allocated memory | |
1524 | */ | |
1525 | int i915_driver_load(struct drm_device *dev, unsigned long flags) | |
1526 | { | |
1527 | struct drm_i915_private *dev_priv; | |
1528 | struct intel_device_info *info, *device_info; | |
1529 | int ret = 0, mmio_bar, mmio_size; | |
1530 | uint32_t aperture_size; | |
1531 | ||
1532 | info = (struct intel_device_info *) flags; | |
1533 | ||
1534 | /* Refuse to load on gen6+ without kms enabled. */ | |
1535 | if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) { | |
1536 | DRM_INFO("Your hardware requires kernel modesetting (KMS)\n"); | |
1537 | DRM_INFO("See CONFIG_DRM_I915_KMS, nomodeset, and i915.modeset parameters\n"); | |
1538 | return -ENODEV; | |
1539 | } | |
1540 | ||
1541 | /* UMS needs agp support. */ | |
1542 | if (!drm_core_check_feature(dev, DRIVER_MODESET) && !dev->agp) | |
1543 | return -EINVAL; | |
1544 | ||
1545 | dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); | |
1546 | if (dev_priv == NULL) | |
1547 | return -ENOMEM; | |
1548 | ||
1549 | dev->dev_private = (void *)dev_priv; | |
1550 | dev_priv->dev = dev; | |
1551 | ||
1552 | /* copy initial configuration to dev_priv->info */ | |
1553 | device_info = (struct intel_device_info *)&dev_priv->info; | |
1554 | *device_info = *info; | |
1555 | ||
1556 | spin_lock_init(&dev_priv->irq_lock); | |
1557 | spin_lock_init(&dev_priv->gpu_error.lock); | |
1558 | spin_lock_init(&dev_priv->backlight_lock); | |
1559 | spin_lock_init(&dev_priv->uncore.lock); | |
1560 | spin_lock_init(&dev_priv->mm.object_stat_lock); | |
1561 | mutex_init(&dev_priv->dpio_lock); | |
1562 | mutex_init(&dev_priv->modeset_restore_lock); | |
1563 | ||
1564 | intel_pm_setup(dev); | |
1565 | ||
1566 | intel_display_crc_init(dev); | |
1567 | ||
1568 | i915_dump_device_info(dev_priv); | |
1569 | ||
1570 | /* Not all pre-production machines fall into this category, only the | |
1571 | * very first ones. Almost everything should work, except for maybe | |
1572 | * suspend/resume. And we don't implement workarounds that affect only | |
1573 | * pre-production machines. */ | |
1574 | if (IS_HSW_EARLY_SDV(dev)) | |
1575 | DRM_INFO("This is an early pre-production Haswell machine. " | |
1576 | "It may not be fully functional.\n"); | |
1577 | ||
1578 | if (i915_get_bridge_dev(dev)) { | |
1579 | ret = -EIO; | |
1580 | goto free_priv; | |
1581 | } | |
1582 | ||
1583 | mmio_bar = IS_GEN2(dev) ? 1 : 0; | |
1584 | /* Before gen4, the registers and the GTT are behind different BARs. | |
1585 | * However, from gen4 onwards, the registers and the GTT are shared | |
1586 | * in the same BAR, so we want to restrict this ioremap from | |
1587 | * clobbering the GTT which we want ioremap_wc instead. Fortunately, | |
1588 | * the register BAR remains the same size for all the earlier | |
1589 | * generations up to Ironlake. | |
1590 | */ | |
1591 | if (info->gen < 5) | |
1592 | mmio_size = 512*1024; | |
1593 | else | |
1594 | mmio_size = 2*1024*1024; | |
1595 | ||
1596 | dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size); | |
1597 | if (!dev_priv->regs) { | |
1598 | DRM_ERROR("failed to map registers\n"); | |
1599 | ret = -EIO; | |
1600 | goto put_bridge; | |
1601 | } | |
1602 | ||
1603 | intel_uncore_early_sanitize(dev); | |
1604 | ||
1605 | /* This must be called before any calls to HAS_PCH_* */ | |
1606 | intel_detect_pch(dev); | |
1607 | ||
1608 | intel_uncore_init(dev); | |
1609 | ||
1610 | ret = i915_gem_gtt_init(dev); | |
1611 | if (ret) | |
1612 | goto out_regs; | |
1613 | ||
1614 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | |
1615 | i915_kick_out_firmware_fb(dev_priv); | |
1616 | ||
1617 | pci_set_master(dev->pdev); | |
1618 | ||
1619 | /* overlay on gen2 is broken and can't address above 1G */ | |
1620 | if (IS_GEN2(dev)) | |
1621 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); | |
1622 | ||
1623 | /* 965GM sometimes incorrectly writes to hardware status page (HWS) | |
1624 | * using 32bit addressing, overwriting memory if HWS is located | |
1625 | * above 4GB. | |
1626 | * | |
1627 | * The documentation also mentions an issue with undefined | |
1628 | * behaviour if any general state is accessed within a page above 4GB, | |
1629 | * which also needs to be handled carefully. | |
1630 | */ | |
1631 | if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) | |
1632 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); | |
1633 | ||
1634 | aperture_size = dev_priv->gtt.mappable_end; | |
1635 | ||
1636 | dev_priv->gtt.mappable = | |
1637 | io_mapping_create_wc(dev_priv->gtt.mappable_base, | |
1638 | aperture_size); | |
1639 | if (dev_priv->gtt.mappable == NULL) { | |
1640 | ret = -EIO; | |
1641 | goto out_gtt; | |
1642 | } | |
1643 | ||
1644 | dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base, | |
1645 | aperture_size); | |
1646 | ||
1647 | /* The i915 workqueue is primarily used for batched retirement of | |
1648 | * requests (and thus managing bo) once the task has been completed | |
1649 | * by the GPU. i915_gem_retire_requests() is called directly when we | |
1650 | * need high-priority retirement, such as waiting for an explicit | |
1651 | * bo. | |
1652 | * | |
1653 | * It is also used for periodic low-priority events, such as | |
1654 | * idle-timers and recording error state. | |
1655 | * | |
1656 | * All tasks on the workqueue are expected to acquire the dev mutex | |
1657 | * so there is no point in running more than one instance of the | |
1658 | * workqueue at any time. Use an ordered one. | |
1659 | */ | |
1660 | dev_priv->wq = alloc_ordered_workqueue("i915", 0); | |
1661 | if (dev_priv->wq == NULL) { | |
1662 | DRM_ERROR("Failed to create our workqueue.\n"); | |
1663 | ret = -ENOMEM; | |
1664 | goto out_mtrrfree; | |
1665 | } | |
1666 | ||
1667 | intel_irq_init(dev); | |
1668 | intel_uncore_sanitize(dev); | |
1669 | ||
1670 | /* Try to make sure MCHBAR is enabled before poking at it */ | |
1671 | intel_setup_mchbar(dev); | |
1672 | intel_setup_gmbus(dev); | |
1673 | intel_opregion_setup(dev); | |
1674 | ||
1675 | intel_setup_bios(dev); | |
1676 | ||
1677 | i915_gem_load(dev); | |
1678 | ||
1679 | /* On the 945G/GM, the chipset reports the MSI capability on the | |
1680 | * integrated graphics even though the support isn't actually there | |
1681 | * according to the published specs. It doesn't appear to function | |
1682 | * correctly in testing on 945G. | |
1683 | * This may be a side effect of MSI having been made available for PEG | |
1684 | * and the registers being closely associated. | |
1685 | * | |
1686 | * According to chipset errata, on the 965GM, MSI interrupts may | |
1687 | * be lost or delayed, but we use them anyways to avoid | |
1688 | * stuck interrupts on some machines. | |
1689 | */ | |
1690 | if (!IS_I945G(dev) && !IS_I945GM(dev)) | |
1691 | pci_enable_msi(dev->pdev); | |
1692 | ||
1693 | intel_device_info_runtime_init(dev); | |
1694 | ||
1695 | if (INTEL_INFO(dev)->num_pipes) { | |
1696 | ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes); | |
1697 | if (ret) | |
1698 | goto out_gem_unload; | |
1699 | } | |
1700 | ||
1701 | intel_power_domains_init(dev); | |
1702 | ||
1703 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | |
1704 | ret = i915_load_modeset_init(dev); | |
1705 | if (ret < 0) { | |
1706 | DRM_ERROR("failed to init modeset\n"); | |
1707 | goto out_power_well; | |
1708 | } | |
1709 | } else { | |
1710 | /* Start out suspended in ums mode. */ | |
1711 | dev_priv->ums.mm_suspended = 1; | |
1712 | } | |
1713 | ||
1714 | i915_setup_sysfs(dev); | |
1715 | ||
1716 | if (INTEL_INFO(dev)->num_pipes) { | |
1717 | /* Must be done after probing outputs */ | |
1718 | intel_opregion_init(dev); | |
1719 | acpi_video_register(); | |
1720 | } | |
1721 | ||
1722 | if (IS_GEN5(dev)) | |
1723 | intel_gpu_ips_init(dev_priv); | |
1724 | ||
1725 | intel_init_runtime_pm(dev_priv); | |
1726 | ||
1727 | return 0; | |
1728 | ||
1729 | out_power_well: | |
1730 | intel_power_domains_remove(dev); | |
1731 | drm_vblank_cleanup(dev); | |
1732 | out_gem_unload: | |
1733 | if (dev_priv->mm.inactive_shrinker.scan_objects) | |
1734 | unregister_shrinker(&dev_priv->mm.inactive_shrinker); | |
1735 | ||
1736 | if (dev->pdev->msi_enabled) | |
1737 | pci_disable_msi(dev->pdev); | |
1738 | ||
1739 | intel_teardown_gmbus(dev); | |
1740 | intel_teardown_mchbar(dev); | |
1741 | pm_qos_remove_request(&dev_priv->pm_qos); | |
1742 | destroy_workqueue(dev_priv->wq); | |
1743 | out_mtrrfree: | |
1744 | arch_phys_wc_del(dev_priv->gtt.mtrr); | |
1745 | io_mapping_free(dev_priv->gtt.mappable); | |
1746 | out_gtt: | |
1747 | list_del(&dev_priv->gtt.base.global_link); | |
1748 | drm_mm_takedown(&dev_priv->gtt.base.mm); | |
1749 | dev_priv->gtt.base.cleanup(&dev_priv->gtt.base); | |
1750 | out_regs: | |
1751 | intel_uncore_fini(dev); | |
1752 | pci_iounmap(dev->pdev, dev_priv->regs); | |
1753 | put_bridge: | |
1754 | pci_dev_put(dev_priv->bridge_dev); | |
1755 | free_priv: | |
1756 | if (dev_priv->slab) | |
1757 | kmem_cache_destroy(dev_priv->slab); | |
1758 | kfree(dev_priv); | |
1759 | return ret; | |
1760 | } | |
1761 | ||
1762 | int i915_driver_unload(struct drm_device *dev) | |
1763 | { | |
1764 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1765 | int ret; | |
1766 | ||
1767 | ret = i915_gem_suspend(dev); | |
1768 | if (ret) { | |
1769 | DRM_ERROR("failed to idle hardware: %d\n", ret); | |
1770 | return ret; | |
1771 | } | |
1772 | ||
1773 | intel_fini_runtime_pm(dev_priv); | |
1774 | ||
1775 | intel_gpu_ips_teardown(); | |
1776 | ||
1777 | /* The i915.ko module is still not prepared to be loaded when | |
1778 | * the power well is not enabled, so just enable it in case | |
1779 | * we're going to unload/reload. */ | |
1780 | intel_display_set_init_power(dev, true); | |
1781 | intel_power_domains_remove(dev); | |
1782 | ||
1783 | i915_teardown_sysfs(dev); | |
1784 | ||
1785 | if (dev_priv->mm.inactive_shrinker.scan_objects) | |
1786 | unregister_shrinker(&dev_priv->mm.inactive_shrinker); | |
1787 | ||
1788 | io_mapping_free(dev_priv->gtt.mappable); | |
1789 | arch_phys_wc_del(dev_priv->gtt.mtrr); | |
1790 | ||
1791 | acpi_video_unregister(); | |
1792 | ||
1793 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | |
1794 | intel_fbdev_fini(dev); | |
1795 | intel_modeset_cleanup(dev); | |
1796 | cancel_work_sync(&dev_priv->console_resume_work); | |
1797 | ||
1798 | /* | |
1799 | * free the memory space allocated for the child device | |
1800 | * config parsed from VBT | |
1801 | */ | |
1802 | if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) { | |
1803 | kfree(dev_priv->vbt.child_dev); | |
1804 | dev_priv->vbt.child_dev = NULL; | |
1805 | dev_priv->vbt.child_dev_num = 0; | |
1806 | } | |
1807 | ||
1808 | vga_switcheroo_unregister_client(dev->pdev); | |
1809 | vga_client_register(dev->pdev, NULL, NULL, NULL); | |
1810 | } | |
1811 | ||
1812 | /* Free error state after interrupts are fully disabled. */ | |
1813 | del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); | |
1814 | cancel_work_sync(&dev_priv->gpu_error.work); | |
1815 | i915_destroy_error_state(dev); | |
1816 | ||
1817 | cancel_delayed_work_sync(&dev_priv->pc8.enable_work); | |
1818 | ||
1819 | if (dev->pdev->msi_enabled) | |
1820 | pci_disable_msi(dev->pdev); | |
1821 | ||
1822 | intel_opregion_fini(dev); | |
1823 | ||
1824 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | |
1825 | /* Flush any outstanding unpin_work. */ | |
1826 | flush_workqueue(dev_priv->wq); | |
1827 | ||
1828 | mutex_lock(&dev->struct_mutex); | |
1829 | i915_gem_free_all_phys_object(dev); | |
1830 | i915_gem_cleanup_ringbuffer(dev); | |
1831 | i915_gem_context_fini(dev); | |
1832 | WARN_ON(dev_priv->mm.aliasing_ppgtt); | |
1833 | mutex_unlock(&dev->struct_mutex); | |
1834 | i915_gem_cleanup_stolen(dev); | |
1835 | ||
1836 | if (!I915_NEED_GFX_HWS(dev)) | |
1837 | i915_free_hws(dev); | |
1838 | } | |
1839 | ||
1840 | list_del(&dev_priv->gtt.base.global_link); | |
1841 | WARN_ON(!list_empty(&dev_priv->vm_list)); | |
1842 | ||
1843 | drm_vblank_cleanup(dev); | |
1844 | ||
1845 | intel_teardown_gmbus(dev); | |
1846 | intel_teardown_mchbar(dev); | |
1847 | ||
1848 | destroy_workqueue(dev_priv->wq); | |
1849 | pm_qos_remove_request(&dev_priv->pm_qos); | |
1850 | ||
1851 | dev_priv->gtt.base.cleanup(&dev_priv->gtt.base); | |
1852 | ||
1853 | intel_uncore_fini(dev); | |
1854 | if (dev_priv->regs != NULL) | |
1855 | pci_iounmap(dev->pdev, dev_priv->regs); | |
1856 | ||
1857 | if (dev_priv->slab) | |
1858 | kmem_cache_destroy(dev_priv->slab); | |
1859 | ||
1860 | pci_dev_put(dev_priv->bridge_dev); | |
1861 | kfree(dev->dev_private); | |
1862 | ||
1863 | return 0; | |
1864 | } | |
1865 | ||
1866 | int i915_driver_open(struct drm_device *dev, struct drm_file *file) | |
1867 | { | |
1868 | int ret; | |
1869 | ||
1870 | ret = i915_gem_open(dev, file); | |
1871 | if (ret) | |
1872 | return ret; | |
1873 | ||
1874 | return 0; | |
1875 | } | |
1876 | ||
1877 | /** | |
1878 | * i915_driver_lastclose - clean up after all DRM clients have exited | |
1879 | * @dev: DRM device | |
1880 | * | |
1881 | * Take care of cleaning up after all DRM clients have exited. In the | |
1882 | * mode setting case, we want to restore the kernel's initial mode (just | |
1883 | * in case the last client left us in a bad state). | |
1884 | * | |
1885 | * Additionally, in the non-mode setting case, we'll tear down the GTT | |
1886 | * and DMA structures, since the kernel won't be using them, and clea | |
1887 | * up any GEM state. | |
1888 | */ | |
1889 | void i915_driver_lastclose(struct drm_device * dev) | |
1890 | { | |
1891 | drm_i915_private_t *dev_priv = dev->dev_private; | |
1892 | ||
1893 | /* On gen6+ we refuse to init without kms enabled, but then the drm core | |
1894 | * goes right around and calls lastclose. Check for this and don't clean | |
1895 | * up anything. */ | |
1896 | if (!dev_priv) | |
1897 | return; | |
1898 | ||
1899 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | |
1900 | intel_fbdev_restore_mode(dev); | |
1901 | vga_switcheroo_process_delayed_switch(); | |
1902 | return; | |
1903 | } | |
1904 | ||
1905 | i915_gem_lastclose(dev); | |
1906 | ||
1907 | i915_dma_cleanup(dev); | |
1908 | } | |
1909 | ||
1910 | void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) | |
1911 | { | |
1912 | mutex_lock(&dev->struct_mutex); | |
1913 | i915_gem_context_close(dev, file_priv); | |
1914 | i915_gem_release(dev, file_priv); | |
1915 | mutex_unlock(&dev->struct_mutex); | |
1916 | } | |
1917 | ||
1918 | void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) | |
1919 | { | |
1920 | struct drm_i915_file_private *file_priv = file->driver_priv; | |
1921 | ||
1922 | kfree(file_priv); | |
1923 | } | |
1924 | ||
1925 | const struct drm_ioctl_desc i915_ioctls[] = { | |
1926 | DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
1927 | DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH), | |
1928 | DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH), | |
1929 | DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), | |
1930 | DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), | |
1931 | DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), | |
1932 | DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW), | |
1933 | DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
1934 | DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), | |
1935 | DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), | |
1936 | DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
1937 | DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), | |
1938 | DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
1939 | DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
1940 | DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH), | |
1941 | DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), | |
1942 | DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
1943 | DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), | |
1944 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), | |
1945 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), | |
1946 | DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), | |
1947 | DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), | |
1948 | DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), | |
1949 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), | |
1950 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), | |
1951 | DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), | |
1952 | DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), | |
1953 | DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), | |
1954 | DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), | |
1955 | DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), | |
1956 | DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), | |
1957 | DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), | |
1958 | DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), | |
1959 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), | |
1960 | DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), | |
1961 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW), | |
1962 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW), | |
1963 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), | |
1964 | DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED), | |
1965 | DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), | |
1966 | DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | |
1967 | DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | |
1968 | DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | |
1969 | DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | |
1970 | DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), | |
1971 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), | |
1972 | DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), | |
1973 | DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), | |
1974 | DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), | |
1975 | }; | |
1976 | ||
1977 | int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); | |
1978 | ||
1979 | /* | |
1980 | * This is really ugly: Because old userspace abused the linux agp interface to | |
1981 | * manage the gtt, we need to claim that all intel devices are agp. For | |
1982 | * otherwise the drm core refuses to initialize the agp support code. | |
1983 | */ | |
1984 | int i915_driver_device_is_agp(struct drm_device * dev) | |
1985 | { | |
1986 | return 1; | |
1987 | } |