]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/gpu/drm/i915/i915_drv.h
drm/i915: Enable pipe CRCs
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / i915 / i915_drv.h
CommitLineData
1da177e4
LT
1/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
2 */
0d6aa60b 3/*
bc54fd1a 4 *
1da177e4
LT
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
bc54fd1a
DA
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
0d6aa60b 28 */
1da177e4
LT
29
30#ifndef _I915_DRV_H_
31#define _I915_DRV_H_
32
e9b73c67
CW
33#include <uapi/drm/i915_drm.h>
34
585fb111 35#include "i915_reg.h"
79e53945 36#include "intel_bios.h"
8187a2b7 37#include "intel_ringbuffer.h"
0839ccb8 38#include <linux/io-mapping.h>
f899fc64 39#include <linux/i2c.h>
c167a6fc 40#include <linux/i2c-algo-bit.h>
0ade6386 41#include <drm/intel-gtt.h>
aaa6fd2a 42#include <linux/backlight.h>
2911a35b 43#include <linux/intel-iommu.h>
742cbee8 44#include <linux/kref.h>
9ee32fea 45#include <linux/pm_qos.h>
585fb111 46
1da177e4
LT
47/* General customization:
48 */
49
50#define DRIVER_AUTHOR "Tungsten Graphics, Inc."
51
52#define DRIVER_NAME "i915"
53#define DRIVER_DESC "Intel Graphics"
673a394b 54#define DRIVER_DATE "20080730"
1da177e4 55
317c35d1
JB
56enum pipe {
57 PIPE_A = 0,
58 PIPE_B,
9db4a9c7
JB
59 PIPE_C,
60 I915_MAX_PIPES
317c35d1 61};
9db4a9c7 62#define pipe_name(p) ((p) + 'A')
317c35d1 63
a5c961d1
PZ
64enum transcoder {
65 TRANSCODER_A = 0,
66 TRANSCODER_B,
67 TRANSCODER_C,
68 TRANSCODER_EDP = 0xF,
69};
70#define transcoder_name(t) ((t) + 'A')
71
80824003
JB
72enum plane {
73 PLANE_A = 0,
74 PLANE_B,
9db4a9c7 75 PLANE_C,
80824003 76};
9db4a9c7 77#define plane_name(p) ((p) + 'A')
52440211 78
06da8da2
VS
79#define sprite_name(p, s) ((p) * dev_priv->num_plane + (s) + 'A')
80
2b139522
ED
81enum port {
82 PORT_A = 0,
83 PORT_B,
84 PORT_C,
85 PORT_D,
86 PORT_E,
87 I915_MAX_PORTS
88};
89#define port_name(p) ((p) + 'A')
90
b97186f0
PZ
91enum intel_display_power_domain {
92 POWER_DOMAIN_PIPE_A,
93 POWER_DOMAIN_PIPE_B,
94 POWER_DOMAIN_PIPE_C,
95 POWER_DOMAIN_PIPE_A_PANEL_FITTER,
96 POWER_DOMAIN_PIPE_B_PANEL_FITTER,
97 POWER_DOMAIN_PIPE_C_PANEL_FITTER,
98 POWER_DOMAIN_TRANSCODER_A,
99 POWER_DOMAIN_TRANSCODER_B,
100 POWER_DOMAIN_TRANSCODER_C,
101 POWER_DOMAIN_TRANSCODER_EDP = POWER_DOMAIN_TRANSCODER_A + 0xF,
cdf8dd7f 102 POWER_DOMAIN_VGA,
b97186f0
PZ
103};
104
105#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
106#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
107 ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
108#define POWER_DOMAIN_TRANSCODER(tran) ((tran) + POWER_DOMAIN_TRANSCODER_A)
109
1d843f9d
EE
110enum hpd_pin {
111 HPD_NONE = 0,
112 HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
113 HPD_TV = HPD_NONE, /* TV is known to be unreliable */
114 HPD_CRT,
115 HPD_SDVO_B,
116 HPD_SDVO_C,
117 HPD_PORT_B,
118 HPD_PORT_C,
119 HPD_PORT_D,
120 HPD_NUM_PINS
121};
122
2a2d5482
CW
123#define I915_GEM_GPU_DOMAINS \
124 (I915_GEM_DOMAIN_RENDER | \
125 I915_GEM_DOMAIN_SAMPLER | \
126 I915_GEM_DOMAIN_COMMAND | \
127 I915_GEM_DOMAIN_INSTRUCTION | \
128 I915_GEM_DOMAIN_VERTEX)
62fdfeaf 129
7eb552ae 130#define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
9db4a9c7 131
6c2b7c12
DV
132#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
133 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
134 if ((intel_encoder)->base.crtc == (__crtc))
135
e7b903d2
DV
136struct drm_i915_private;
137
46edb027
DV
138enum intel_dpll_id {
139 DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
140 /* real shared dpll ids must be >= 0 */
141 DPLL_ID_PCH_PLL_A,
142 DPLL_ID_PCH_PLL_B,
143};
144#define I915_NUM_PLLS 2
145
5358901f 146struct intel_dpll_hw_state {
66e985c0 147 uint32_t dpll;
8bcc2795 148 uint32_t dpll_md;
66e985c0
DV
149 uint32_t fp0;
150 uint32_t fp1;
5358901f
DV
151};
152
e72f9fbf 153struct intel_shared_dpll {
ee7b9f93
JB
154 int refcount; /* count of number of CRTCs sharing this PLL */
155 int active; /* count of number of active CRTCs (i.e. DPMS on) */
156 bool on; /* is the PLL actually active? Disabled during modeset */
46edb027
DV
157 const char *name;
158 /* should match the index in the dev_priv->shared_dplls array */
159 enum intel_dpll_id id;
5358901f 160 struct intel_dpll_hw_state hw_state;
15bdd4cf
DV
161 void (*mode_set)(struct drm_i915_private *dev_priv,
162 struct intel_shared_dpll *pll);
e7b903d2
DV
163 void (*enable)(struct drm_i915_private *dev_priv,
164 struct intel_shared_dpll *pll);
165 void (*disable)(struct drm_i915_private *dev_priv,
166 struct intel_shared_dpll *pll);
5358901f
DV
167 bool (*get_hw_state)(struct drm_i915_private *dev_priv,
168 struct intel_shared_dpll *pll,
169 struct intel_dpll_hw_state *hw_state);
ee7b9f93 170};
ee7b9f93 171
e69d0bc1
DV
172/* Used by dp and fdi links */
173struct intel_link_m_n {
174 uint32_t tu;
175 uint32_t gmch_m;
176 uint32_t gmch_n;
177 uint32_t link_m;
178 uint32_t link_n;
179};
180
181void intel_link_compute_m_n(int bpp, int nlanes,
182 int pixel_clock, int link_clock,
183 struct intel_link_m_n *m_n);
184
6441ab5f
PZ
185struct intel_ddi_plls {
186 int spll_refcount;
187 int wrpll1_refcount;
188 int wrpll2_refcount;
189};
190
1da177e4
LT
191/* Interface history:
192 *
193 * 1.1: Original.
0d6aa60b
DA
194 * 1.2: Add Power Management
195 * 1.3: Add vblank support
de227f5f 196 * 1.4: Fix cmdbuffer path, add heap destroy
702880f2 197 * 1.5: Add vblank pipe configuration
2228ed67
MD
198 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
199 * - Support vertical blank on secondary display pipe
1da177e4
LT
200 */
201#define DRIVER_MAJOR 1
2228ed67 202#define DRIVER_MINOR 6
1da177e4
LT
203#define DRIVER_PATCHLEVEL 0
204
23bc5982 205#define WATCH_LISTS 0
42d6ab48 206#define WATCH_GTT 0
673a394b 207
71acb5eb
DA
208#define I915_GEM_PHYS_CURSOR_0 1
209#define I915_GEM_PHYS_CURSOR_1 2
210#define I915_GEM_PHYS_OVERLAY_REGS 3
211#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
212
213struct drm_i915_gem_phys_object {
214 int id;
215 struct page **page_list;
216 drm_dma_handle_t *handle;
05394f39 217 struct drm_i915_gem_object *cur_obj;
71acb5eb
DA
218};
219
0a3e67a4
JB
220struct opregion_header;
221struct opregion_acpi;
222struct opregion_swsci;
223struct opregion_asle;
224
8ee1c3db 225struct intel_opregion {
5bc4418b
BW
226 struct opregion_header __iomem *header;
227 struct opregion_acpi __iomem *acpi;
228 struct opregion_swsci __iomem *swsci;
ebde53c7
JN
229 u32 swsci_gbda_sub_functions;
230 u32 swsci_sbcb_sub_functions;
5bc4418b
BW
231 struct opregion_asle __iomem *asle;
232 void __iomem *vbt;
01fe9dbd 233 u32 __iomem *lid_state;
8ee1c3db 234};
44834a67 235#define OPREGION_SIZE (8*1024)
8ee1c3db 236
6ef3d427
CW
237struct intel_overlay;
238struct intel_overlay_error_state;
239
7c1c2871
DA
240struct drm_i915_master_private {
241 drm_local_map_t *sarea;
242 struct _drm_i915_sarea *sarea_priv;
243};
de151cf6 244#define I915_FENCE_REG_NONE -1
42b5aeab
VS
245#define I915_MAX_NUM_FENCES 32
246/* 32 fences + sign bit for FENCE_REG_NONE */
247#define I915_MAX_NUM_FENCE_BITS 6
de151cf6
JB
248
249struct drm_i915_fence_reg {
007cc8ac 250 struct list_head lru_list;
caea7476 251 struct drm_i915_gem_object *obj;
1690e1eb 252 int pin_count;
de151cf6 253};
7c1c2871 254
9b9d172d 255struct sdvo_device_mapping {
e957d772 256 u8 initialized;
9b9d172d 257 u8 dvo_port;
258 u8 slave_addr;
259 u8 dvo_wiring;
e957d772 260 u8 i2c_pin;
b1083333 261 u8 ddc_pin;
9b9d172d 262};
263
c4a1d9e4
CW
264struct intel_display_error_state;
265
63eeaf38 266struct drm_i915_error_state {
742cbee8 267 struct kref ref;
63eeaf38
JB
268 u32 eir;
269 u32 pgtbl_er;
be998e2e 270 u32 ier;
b9a3906b 271 u32 ccid;
0f3b6849
CW
272 u32 derrmr;
273 u32 forcewake;
9574b3fe 274 bool waiting[I915_NUM_RINGS];
9db4a9c7 275 u32 pipestat[I915_MAX_PIPES];
c1cd90ed
DV
276 u32 tail[I915_NUM_RINGS];
277 u32 head[I915_NUM_RINGS];
0f3b6849 278 u32 ctl[I915_NUM_RINGS];
d27b1e0e
DV
279 u32 ipeir[I915_NUM_RINGS];
280 u32 ipehr[I915_NUM_RINGS];
281 u32 instdone[I915_NUM_RINGS];
282 u32 acthd[I915_NUM_RINGS];
7e3b8737 283 u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
df2b23d9 284 u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
12f55818 285 u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
7e3b8737
DV
286 /* our own tracking of ring head and tail */
287 u32 cpu_ring_head[I915_NUM_RINGS];
288 u32 cpu_ring_tail[I915_NUM_RINGS];
1d8f38f4 289 u32 error; /* gen6+ */
71e172e8 290 u32 err_int; /* gen7 */
c1cd90ed
DV
291 u32 instpm[I915_NUM_RINGS];
292 u32 instps[I915_NUM_RINGS];
050ee91f 293 u32 extra_instdone[I915_NUM_INSTDONE_REG];
d27b1e0e 294 u32 seqno[I915_NUM_RINGS];
9df30794 295 u64 bbaddr;
33f3f518
DV
296 u32 fault_reg[I915_NUM_RINGS];
297 u32 done_reg;
c1cd90ed 298 u32 faddr[I915_NUM_RINGS];
4b9de737 299 u64 fence[I915_MAX_NUM_FENCES];
63eeaf38 300 struct timeval time;
52d39a21
CW
301 struct drm_i915_error_ring {
302 struct drm_i915_error_object {
303 int page_count;
304 u32 gtt_offset;
305 u32 *pages[0];
8c123e54 306 } *ringbuffer, *batchbuffer, *ctx;
52d39a21
CW
307 struct drm_i915_error_request {
308 long jiffies;
309 u32 seqno;
ee4f42b1 310 u32 tail;
52d39a21
CW
311 } *requests;
312 int num_requests;
313 } ring[I915_NUM_RINGS];
9df30794 314 struct drm_i915_error_buffer {
a779e5ab 315 u32 size;
9df30794 316 u32 name;
0201f1ec 317 u32 rseqno, wseqno;
9df30794
CW
318 u32 gtt_offset;
319 u32 read_domains;
320 u32 write_domain;
4b9de737 321 s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
9df30794
CW
322 s32 pinned:2;
323 u32 tiling:2;
324 u32 dirty:1;
325 u32 purgeable:1;
5d1333fc 326 s32 ring:4;
f56383cb 327 u32 cache_level:3;
95f5301d
BW
328 } **active_bo, **pinned_bo;
329 u32 *active_bo_count, *pinned_bo_count;
6ef3d427 330 struct intel_overlay_error_state *overlay;
c4a1d9e4 331 struct intel_display_error_state *display;
da661464
MK
332 int hangcheck_score[I915_NUM_RINGS];
333 enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
63eeaf38
JB
334};
335
b8cecdf5 336struct intel_crtc_config;
0e8ffe1b 337struct intel_crtc;
ee9300bb
DV
338struct intel_limit;
339struct dpll;
b8cecdf5 340
e70236a8 341struct drm_i915_display_funcs {
ee5382ae 342 bool (*fbc_enabled)(struct drm_device *dev);
e70236a8
JB
343 void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
344 void (*disable_fbc)(struct drm_device *dev);
345 int (*get_display_clock_speed)(struct drm_device *dev);
346 int (*get_fifo_size)(struct drm_device *dev, int plane);
ee9300bb
DV
347 /**
348 * find_dpll() - Find the best values for the PLL
349 * @limit: limits for the PLL
350 * @crtc: current CRTC
351 * @target: target frequency in kHz
352 * @refclk: reference clock frequency in kHz
353 * @match_clock: if provided, @best_clock P divider must
354 * match the P divider from @match_clock
355 * used for LVDS downclocking
356 * @best_clock: best PLL values found
357 *
358 * Returns true on success, false on failure.
359 */
360 bool (*find_dpll)(const struct intel_limit *limit,
361 struct drm_crtc *crtc,
362 int target, int refclk,
363 struct dpll *match_clock,
364 struct dpll *best_clock);
46ba614c 365 void (*update_wm)(struct drm_crtc *crtc);
adf3d35e
VS
366 void (*update_sprite_wm)(struct drm_plane *plane,
367 struct drm_crtc *crtc,
4c4ff43a 368 uint32_t sprite_width, int pixel_size,
bdd57d03 369 bool enable, bool scaled);
47fab737 370 void (*modeset_global_resources)(struct drm_device *dev);
0e8ffe1b
DV
371 /* Returns the active state of the crtc, and if the crtc is active,
372 * fills out the pipe-config with the hw state. */
373 bool (*get_pipe_config)(struct intel_crtc *,
374 struct intel_crtc_config *);
f564048e 375 int (*crtc_mode_set)(struct drm_crtc *crtc,
f564048e
EA
376 int x, int y,
377 struct drm_framebuffer *old_fb);
76e5a89c
DV
378 void (*crtc_enable)(struct drm_crtc *crtc);
379 void (*crtc_disable)(struct drm_crtc *crtc);
ee7b9f93 380 void (*off)(struct drm_crtc *crtc);
e0dac65e
WF
381 void (*write_eld)(struct drm_connector *connector,
382 struct drm_crtc *crtc);
674cf967 383 void (*fdi_link_train)(struct drm_crtc *crtc);
6067aaea 384 void (*init_clock_gating)(struct drm_device *dev);
8c9f3aaf
JB
385 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
386 struct drm_framebuffer *fb,
ed8d1975
KP
387 struct drm_i915_gem_object *obj,
388 uint32_t flags);
17638cd6
JB
389 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
390 int x, int y);
20afbda2 391 void (*hpd_irq_setup)(struct drm_device *dev);
e70236a8
JB
392 /* clock updates for mode set */
393 /* cursor updates */
394 /* render clock increase/decrease */
395 /* display clock increase/decrease */
396 /* pll clock increase/decrease */
e70236a8
JB
397};
398
907b28c5 399struct intel_uncore_funcs {
990bbdad
CW
400 void (*force_wake_get)(struct drm_i915_private *dev_priv);
401 void (*force_wake_put)(struct drm_i915_private *dev_priv);
0b274481
BW
402
403 uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
404 uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
405 uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
406 uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
407
408 void (*mmio_writeb)(struct drm_i915_private *dev_priv, off_t offset,
409 uint8_t val, bool trace);
410 void (*mmio_writew)(struct drm_i915_private *dev_priv, off_t offset,
411 uint16_t val, bool trace);
412 void (*mmio_writel)(struct drm_i915_private *dev_priv, off_t offset,
413 uint32_t val, bool trace);
414 void (*mmio_writeq)(struct drm_i915_private *dev_priv, off_t offset,
415 uint64_t val, bool trace);
990bbdad
CW
416};
417
907b28c5
CW
418struct intel_uncore {
419 spinlock_t lock; /** lock is also taken in irq contexts. */
420
421 struct intel_uncore_funcs funcs;
422
423 unsigned fifo_count;
424 unsigned forcewake_count;
aec347ab
CW
425
426 struct delayed_work force_wake_work;
907b28c5
CW
427};
428
79fc46df
DL
429#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
430 func(is_mobile) sep \
431 func(is_i85x) sep \
432 func(is_i915g) sep \
433 func(is_i945gm) sep \
434 func(is_g33) sep \
435 func(need_gfx_hws) sep \
436 func(is_g4x) sep \
437 func(is_pineview) sep \
438 func(is_broadwater) sep \
439 func(is_crestline) sep \
440 func(is_ivybridge) sep \
441 func(is_valleyview) sep \
442 func(is_haswell) sep \
b833d685 443 func(is_preliminary) sep \
79fc46df
DL
444 func(has_fbc) sep \
445 func(has_pipe_cxsr) sep \
446 func(has_hotplug) sep \
447 func(cursor_needs_physical) sep \
448 func(has_overlay) sep \
449 func(overlay_needs_physical) sep \
450 func(supports_tv) sep \
dd93be58 451 func(has_llc) sep \
30568c45
DL
452 func(has_ddi) sep \
453 func(has_fpga_dbg)
c96ea64e 454
a587f779
DL
455#define DEFINE_FLAG(name) u8 name:1
456#define SEP_SEMICOLON ;
c96ea64e 457
cfdf1fa2 458struct intel_device_info {
10fce67a 459 u32 display_mmio_offset;
7eb552ae 460 u8 num_pipes:3;
c96c3a8c 461 u8 gen;
73ae478c 462 u8 ring_mask; /* Rings supported by the HW */
a587f779 463 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
cfdf1fa2
KH
464};
465
a587f779
DL
466#undef DEFINE_FLAG
467#undef SEP_SEMICOLON
468
7faf1ab2
DV
469enum i915_cache_level {
470 I915_CACHE_NONE = 0,
350ec881
CW
471 I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
472 I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc
473 caches, eg sampler/render caches, and the
474 large Last-Level-Cache. LLC is coherent with
475 the CPU, but L3 is only visible to the GPU. */
651d794f 476 I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
7faf1ab2
DV
477};
478
2d04befb
KG
479typedef uint32_t gen6_gtt_pte_t;
480
853ba5d2 481struct i915_address_space {
93bd8649 482 struct drm_mm mm;
853ba5d2 483 struct drm_device *dev;
a7bbbd63 484 struct list_head global_link;
853ba5d2
BW
485 unsigned long start; /* Start offset always 0 for dri2 */
486 size_t total; /* size addr space maps (ex. 2GB for ggtt) */
487
488 struct {
489 dma_addr_t addr;
490 struct page *page;
491 } scratch;
492
5cef07e1
BW
493 /**
494 * List of objects currently involved in rendering.
495 *
496 * Includes buffers having the contents of their GPU caches
497 * flushed, not necessarily primitives. last_rendering_seqno
498 * represents when the rendering involved will be completed.
499 *
500 * A reference is held on the buffer while on this list.
501 */
502 struct list_head active_list;
503
504 /**
505 * LRU list of objects which are not in the ringbuffer and
506 * are ready to unbind, but are still in the GTT.
507 *
508 * last_rendering_seqno is 0 while an object is in this list.
509 *
510 * A reference is not held on the buffer while on this list,
511 * as merely being GTT-bound shouldn't prevent its being
512 * freed, and we'll pull it off the list in the free path.
513 */
514 struct list_head inactive_list;
515
853ba5d2
BW
516 /* FIXME: Need a more generic return type */
517 gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
518 enum i915_cache_level level);
519 void (*clear_range)(struct i915_address_space *vm,
520 unsigned int first_entry,
521 unsigned int num_entries);
522 void (*insert_entries)(struct i915_address_space *vm,
523 struct sg_table *st,
524 unsigned int first_entry,
525 enum i915_cache_level cache_level);
526 void (*cleanup)(struct i915_address_space *vm);
527};
528
5d4545ae
BW
529/* The Graphics Translation Table is the way in which GEN hardware translates a
530 * Graphics Virtual Address into a Physical Address. In addition to the normal
531 * collateral associated with any va->pa translations GEN hardware also has a
532 * portion of the GTT which can be mapped by the CPU and remain both coherent
533 * and correct (in cases like swizzling). That region is referred to as GMADR in
534 * the spec.
535 */
536struct i915_gtt {
853ba5d2 537 struct i915_address_space base;
baa09f5f 538 size_t stolen_size; /* Total size of stolen memory */
5d4545ae
BW
539
540 unsigned long mappable_end; /* End offset that we can CPU map */
541 struct io_mapping *mappable; /* Mapping to our CPU mappable region */
542 phys_addr_t mappable_base; /* PA of our GMADR */
543
544 /** "Graphics Stolen Memory" holds the global PTEs */
545 void __iomem *gsm;
a81cc00c
BW
546
547 bool do_idle_maps;
7faf1ab2 548
911bdf0a 549 int mtrr;
7faf1ab2
DV
550
551 /* global gtt ops */
baa09f5f 552 int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
41907ddc
BW
553 size_t *stolen, phys_addr_t *mappable_base,
554 unsigned long *mappable_end);
5d4545ae 555};
853ba5d2 556#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
5d4545ae 557
1d2a314c 558struct i915_hw_ppgtt {
853ba5d2 559 struct i915_address_space base;
1d2a314c
DV
560 unsigned num_pd_entries;
561 struct page **pt_pages;
562 uint32_t pd_offset;
563 dma_addr_t *pt_dma_addr;
def886c3 564
b7c36d25 565 int (*enable)(struct drm_device *dev);
1d2a314c
DV
566};
567
0b02e798
BW
568/**
569 * A VMA represents a GEM BO that is bound into an address space. Therefore, a
570 * VMA's presence cannot be guaranteed before binding, or after unbinding the
571 * object into/from the address space.
572 *
573 * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
2f633156
BW
574 * will always be <= an objects lifetime. So object refcounting should cover us.
575 */
576struct i915_vma {
577 struct drm_mm_node node;
578 struct drm_i915_gem_object *obj;
579 struct i915_address_space *vm;
580
ca191b13
BW
581 /** This object's place on the active/inactive lists */
582 struct list_head mm_list;
583
2f633156 584 struct list_head vma_link; /* Link in the object's VMA list */
82a55ad1
BW
585
586 /** This vma's place in the batchbuffer or on the eviction list */
587 struct list_head exec_list;
588
27173f1f
BW
589 /**
590 * Used for performing relocations during execbuffer insertion.
591 */
592 struct hlist_node exec_node;
593 unsigned long exec_handle;
594 struct drm_i915_gem_exec_object2 *exec_entry;
595
1d2a314c
DV
596};
597
e59ec13d
MK
598struct i915_ctx_hang_stats {
599 /* This context had batch pending when hang was declared */
600 unsigned batch_pending;
601
602 /* This context had batch active when hang was declared */
603 unsigned batch_active;
be62acb4
MK
604
605 /* Time when this context was last blamed for a GPU reset */
606 unsigned long guilty_ts;
607
608 /* This context is banned to submit more work */
609 bool banned;
e59ec13d 610};
40521054
BW
611
612/* This must match up with the value previously used for execbuf2.rsvd1. */
613#define DEFAULT_CONTEXT_ID 0
614struct i915_hw_context {
dce3271b 615 struct kref ref;
40521054 616 int id;
e0556841 617 bool is_initialized;
3ccfd19d 618 uint8_t remap_slice;
40521054
BW
619 struct drm_i915_file_private *file_priv;
620 struct intel_ring_buffer *ring;
621 struct drm_i915_gem_object *obj;
e59ec13d 622 struct i915_ctx_hang_stats hang_stats;
a33afea5
BW
623
624 struct list_head link;
40521054
BW
625};
626
5c3fe8b0
BW
627struct i915_fbc {
628 unsigned long size;
629 unsigned int fb_id;
630 enum plane plane;
631 int y;
632
633 struct drm_mm_node *compressed_fb;
634 struct drm_mm_node *compressed_llb;
635
636 struct intel_fbc_work {
637 struct delayed_work work;
638 struct drm_crtc *crtc;
639 struct drm_framebuffer *fb;
640 int interval;
641 } *fbc_work;
642
29ebf90f
CW
643 enum no_fbc_reason {
644 FBC_OK, /* FBC is enabled */
645 FBC_UNSUPPORTED, /* FBC is not supported by this chipset */
5c3fe8b0
BW
646 FBC_NO_OUTPUT, /* no outputs enabled to compress */
647 FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */
648 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
649 FBC_MODE_TOO_LARGE, /* mode too large for compression */
650 FBC_BAD_PLANE, /* fbc not supported on plane */
651 FBC_NOT_TILED, /* buffer not tiled */
652 FBC_MULTIPLE_PIPES, /* more than one pipe active */
653 FBC_MODULE_PARAM,
654 FBC_CHIP_DEFAULT, /* disabled by default on this chip */
655 } no_fbc_reason;
b5e50c3f
JB
656};
657
a031d709
RV
658struct i915_psr {
659 bool sink_support;
660 bool source_ok;
3f51e471 661};
5c3fe8b0 662
3bad0781 663enum intel_pch {
f0350830 664 PCH_NONE = 0, /* No PCH present */
3bad0781
ZW
665 PCH_IBX, /* Ibexpeak PCH */
666 PCH_CPT, /* Cougarpoint PCH */
eb877ebf 667 PCH_LPT, /* Lynxpoint PCH */
40c7ead9 668 PCH_NOP,
3bad0781
ZW
669};
670
988d6ee8
PZ
671enum intel_sbi_destination {
672 SBI_ICLK,
673 SBI_MPHY,
674};
675
b690e96c 676#define QUIRK_PIPEA_FORCE (1<<0)
435793df 677#define QUIRK_LVDS_SSC_DISABLE (1<<1)
4dca20ef 678#define QUIRK_INVERT_BRIGHTNESS (1<<2)
e85843be 679#define QUIRK_NO_PCH_PWM_ENABLE (1<<3)
b690e96c 680
8be48d92 681struct intel_fbdev;
1630fe75 682struct intel_fbc_work;
38651674 683
c2b9152f
DV
684struct intel_gmbus {
685 struct i2c_adapter adapter;
f2ce9faf 686 u32 force_bit;
c2b9152f 687 u32 reg0;
36c785f0 688 u32 gpio_reg;
c167a6fc 689 struct i2c_algo_bit_data bit_algo;
c2b9152f
DV
690 struct drm_i915_private *dev_priv;
691};
692
f4c956ad 693struct i915_suspend_saved_registers {
ba8bbcf6
JB
694 u8 saveLBB;
695 u32 saveDSPACNTR;
696 u32 saveDSPBCNTR;
e948e994 697 u32 saveDSPARB;
ba8bbcf6
JB
698 u32 savePIPEACONF;
699 u32 savePIPEBCONF;
700 u32 savePIPEASRC;
701 u32 savePIPEBSRC;
702 u32 saveFPA0;
703 u32 saveFPA1;
704 u32 saveDPLL_A;
705 u32 saveDPLL_A_MD;
706 u32 saveHTOTAL_A;
707 u32 saveHBLANK_A;
708 u32 saveHSYNC_A;
709 u32 saveVTOTAL_A;
710 u32 saveVBLANK_A;
711 u32 saveVSYNC_A;
712 u32 saveBCLRPAT_A;
5586c8bc 713 u32 saveTRANSACONF;
42048781
ZW
714 u32 saveTRANS_HTOTAL_A;
715 u32 saveTRANS_HBLANK_A;
716 u32 saveTRANS_HSYNC_A;
717 u32 saveTRANS_VTOTAL_A;
718 u32 saveTRANS_VBLANK_A;
719 u32 saveTRANS_VSYNC_A;
0da3ea12 720 u32 savePIPEASTAT;
ba8bbcf6
JB
721 u32 saveDSPASTRIDE;
722 u32 saveDSPASIZE;
723 u32 saveDSPAPOS;
585fb111 724 u32 saveDSPAADDR;
ba8bbcf6
JB
725 u32 saveDSPASURF;
726 u32 saveDSPATILEOFF;
727 u32 savePFIT_PGM_RATIOS;
0eb96d6e 728 u32 saveBLC_HIST_CTL;
ba8bbcf6
JB
729 u32 saveBLC_PWM_CTL;
730 u32 saveBLC_PWM_CTL2;
42048781
ZW
731 u32 saveBLC_CPU_PWM_CTL;
732 u32 saveBLC_CPU_PWM_CTL2;
ba8bbcf6
JB
733 u32 saveFPB0;
734 u32 saveFPB1;
735 u32 saveDPLL_B;
736 u32 saveDPLL_B_MD;
737 u32 saveHTOTAL_B;
738 u32 saveHBLANK_B;
739 u32 saveHSYNC_B;
740 u32 saveVTOTAL_B;
741 u32 saveVBLANK_B;
742 u32 saveVSYNC_B;
743 u32 saveBCLRPAT_B;
5586c8bc 744 u32 saveTRANSBCONF;
42048781
ZW
745 u32 saveTRANS_HTOTAL_B;
746 u32 saveTRANS_HBLANK_B;
747 u32 saveTRANS_HSYNC_B;
748 u32 saveTRANS_VTOTAL_B;
749 u32 saveTRANS_VBLANK_B;
750 u32 saveTRANS_VSYNC_B;
0da3ea12 751 u32 savePIPEBSTAT;
ba8bbcf6
JB
752 u32 saveDSPBSTRIDE;
753 u32 saveDSPBSIZE;
754 u32 saveDSPBPOS;
585fb111 755 u32 saveDSPBADDR;
ba8bbcf6
JB
756 u32 saveDSPBSURF;
757 u32 saveDSPBTILEOFF;
585fb111
JB
758 u32 saveVGA0;
759 u32 saveVGA1;
760 u32 saveVGA_PD;
ba8bbcf6
JB
761 u32 saveVGACNTRL;
762 u32 saveADPA;
763 u32 saveLVDS;
585fb111
JB
764 u32 savePP_ON_DELAYS;
765 u32 savePP_OFF_DELAYS;
ba8bbcf6
JB
766 u32 saveDVOA;
767 u32 saveDVOB;
768 u32 saveDVOC;
769 u32 savePP_ON;
770 u32 savePP_OFF;
771 u32 savePP_CONTROL;
585fb111 772 u32 savePP_DIVISOR;
ba8bbcf6
JB
773 u32 savePFIT_CONTROL;
774 u32 save_palette_a[256];
775 u32 save_palette_b[256];
06027f91 776 u32 saveDPFC_CB_BASE;
ba8bbcf6
JB
777 u32 saveFBC_CFB_BASE;
778 u32 saveFBC_LL_BASE;
779 u32 saveFBC_CONTROL;
780 u32 saveFBC_CONTROL2;
0da3ea12
JB
781 u32 saveIER;
782 u32 saveIIR;
783 u32 saveIMR;
42048781
ZW
784 u32 saveDEIER;
785 u32 saveDEIMR;
786 u32 saveGTIER;
787 u32 saveGTIMR;
788 u32 saveFDI_RXA_IMR;
789 u32 saveFDI_RXB_IMR;
1f84e550 790 u32 saveCACHE_MODE_0;
1f84e550 791 u32 saveMI_ARB_STATE;
ba8bbcf6
JB
792 u32 saveSWF0[16];
793 u32 saveSWF1[16];
794 u32 saveSWF2[3];
795 u8 saveMSR;
796 u8 saveSR[8];
123f794f 797 u8 saveGR[25];
ba8bbcf6 798 u8 saveAR_INDEX;
a59e122a 799 u8 saveAR[21];
ba8bbcf6 800 u8 saveDACMASK;
a59e122a 801 u8 saveCR[37];
4b9de737 802 uint64_t saveFENCE[I915_MAX_NUM_FENCES];
1fd1c624
EA
803 u32 saveCURACNTR;
804 u32 saveCURAPOS;
805 u32 saveCURABASE;
806 u32 saveCURBCNTR;
807 u32 saveCURBPOS;
808 u32 saveCURBBASE;
809 u32 saveCURSIZE;
a4fc5ed6
KP
810 u32 saveDP_B;
811 u32 saveDP_C;
812 u32 saveDP_D;
813 u32 savePIPEA_GMCH_DATA_M;
814 u32 savePIPEB_GMCH_DATA_M;
815 u32 savePIPEA_GMCH_DATA_N;
816 u32 savePIPEB_GMCH_DATA_N;
817 u32 savePIPEA_DP_LINK_M;
818 u32 savePIPEB_DP_LINK_M;
819 u32 savePIPEA_DP_LINK_N;
820 u32 savePIPEB_DP_LINK_N;
42048781
ZW
821 u32 saveFDI_RXA_CTL;
822 u32 saveFDI_TXA_CTL;
823 u32 saveFDI_RXB_CTL;
824 u32 saveFDI_TXB_CTL;
825 u32 savePFA_CTL_1;
826 u32 savePFB_CTL_1;
827 u32 savePFA_WIN_SZ;
828 u32 savePFB_WIN_SZ;
829 u32 savePFA_WIN_POS;
830 u32 savePFB_WIN_POS;
5586c8bc
ZW
831 u32 savePCH_DREF_CONTROL;
832 u32 saveDISP_ARB_CTL;
833 u32 savePIPEA_DATA_M1;
834 u32 savePIPEA_DATA_N1;
835 u32 savePIPEA_LINK_M1;
836 u32 savePIPEA_LINK_N1;
837 u32 savePIPEB_DATA_M1;
838 u32 savePIPEB_DATA_N1;
839 u32 savePIPEB_LINK_M1;
840 u32 savePIPEB_LINK_N1;
b5b72e89 841 u32 saveMCHBAR_RENDER_STANDBY;
cda2bb78 842 u32 savePCH_PORT_HOTPLUG;
f4c956ad 843};
c85aa885
DV
844
845struct intel_gen6_power_mgmt {
59cdb63d 846 /* work and pm_iir are protected by dev_priv->irq_lock */
c85aa885
DV
847 struct work_struct work;
848 u32 pm_iir;
59cdb63d 849
c85aa885
DV
850 /* The below variables an all the rps hw state are protected by
851 * dev->struct mutext. */
852 u8 cur_delay;
853 u8 min_delay;
854 u8 max_delay;
52ceb908 855 u8 rpe_delay;
dd75fdc8
CW
856 u8 rp1_delay;
857 u8 rp0_delay;
31c77388 858 u8 hw_max;
1a01ab3b 859
dd75fdc8
CW
860 int last_adj;
861 enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
862
c0951f0c 863 bool enabled;
1a01ab3b 864 struct delayed_work delayed_resume_work;
4fc688ce
JB
865
866 /*
867 * Protects RPS/RC6 register access and PCU communication.
868 * Must be taken after struct_mutex if nested.
869 */
870 struct mutex hw_lock;
c85aa885
DV
871};
872
1a240d4d
DV
873/* defined intel_pm.c */
874extern spinlock_t mchdev_lock;
875
c85aa885
DV
876struct intel_ilk_power_mgmt {
877 u8 cur_delay;
878 u8 min_delay;
879 u8 max_delay;
880 u8 fmax;
881 u8 fstart;
882
883 u64 last_count1;
884 unsigned long last_time1;
885 unsigned long chipset_power;
886 u64 last_count2;
887 struct timespec last_time2;
888 unsigned long gfx_power;
889 u8 corr;
890
891 int c_m;
892 int r_t;
3e373948
DV
893
894 struct drm_i915_gem_object *pwrctx;
895 struct drm_i915_gem_object *renderctx;
c85aa885
DV
896};
897
a38911a3
WX
898/* Power well structure for haswell */
899struct i915_power_well {
900 struct drm_device *device;
901 spinlock_t lock;
902 /* power well enable/disable usage count */
903 int count;
904 int i915_request;
905};
906
231f42a4
DV
907struct i915_dri1_state {
908 unsigned allow_batchbuffer : 1;
909 u32 __iomem *gfx_hws_cpu_addr;
910
911 unsigned int cpp;
912 int back_offset;
913 int front_offset;
914 int current_page;
915 int page_flipping;
916
917 uint32_t counter;
918};
919
db1b76ca
DV
920struct i915_ums_state {
921 /**
922 * Flag if the X Server, and thus DRM, is not currently in
923 * control of the device.
924 *
925 * This is set between LeaveVT and EnterVT. It needs to be
926 * replaced with a semaphore. It also needs to be
927 * transitioned away from for kernel modesetting.
928 */
929 int mm_suspended;
930};
931
35a85ac6 932#define MAX_L3_SLICES 2
a4da4fa4 933struct intel_l3_parity {
35a85ac6 934 u32 *remap_info[MAX_L3_SLICES];
a4da4fa4 935 struct work_struct error_work;
35a85ac6 936 int which_slice;
a4da4fa4
DV
937};
938
4b5aed62 939struct i915_gem_mm {
4b5aed62
DV
940 /** Memory allocator for GTT stolen memory */
941 struct drm_mm stolen;
4b5aed62
DV
942 /** List of all objects in gtt_space. Used to restore gtt
943 * mappings on resume */
944 struct list_head bound_list;
945 /**
946 * List of objects which are not bound to the GTT (thus
947 * are idle and not used by the GPU) but still have
948 * (presumably uncached) pages still attached.
949 */
950 struct list_head unbound_list;
951
952 /** Usable portion of the GTT for GEM */
953 unsigned long stolen_base; /* limited to low memory (32-bit) */
954
4b5aed62
DV
955 /** PPGTT used for aliasing the PPGTT with the GTT */
956 struct i915_hw_ppgtt *aliasing_ppgtt;
957
958 struct shrinker inactive_shrinker;
959 bool shrinker_no_lock_stealing;
960
4b5aed62
DV
961 /** LRU list of objects with fence regs on them. */
962 struct list_head fence_list;
963
964 /**
965 * We leave the user IRQ off as much as possible,
966 * but this means that requests will finish and never
967 * be retired once the system goes idle. Set a timer to
968 * fire periodically while the ring is running. When it
969 * fires, go retire requests.
970 */
971 struct delayed_work retire_work;
972
b29c19b6
CW
973 /**
974 * When we detect an idle GPU, we want to turn on
975 * powersaving features. So once we see that there
976 * are no more requests outstanding and no more
977 * arrive within a small period of time, we fire
978 * off the idle_work.
979 */
980 struct delayed_work idle_work;
981
4b5aed62
DV
982 /**
983 * Are we in a non-interruptible section of code like
984 * modesetting?
985 */
986 bool interruptible;
987
4b5aed62
DV
988 /** Bit 6 swizzling required for X tiling */
989 uint32_t bit_6_swizzle_x;
990 /** Bit 6 swizzling required for Y tiling */
991 uint32_t bit_6_swizzle_y;
992
993 /* storage for physical objects */
994 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
995
996 /* accounting, useful for userland debugging */
c20e8355 997 spinlock_t object_stat_lock;
4b5aed62
DV
998 size_t object_memory;
999 u32 object_count;
1000};
1001
edc3d884
MK
1002struct drm_i915_error_state_buf {
1003 unsigned bytes;
1004 unsigned size;
1005 int err;
1006 u8 *buf;
1007 loff_t start;
1008 loff_t pos;
1009};
1010
fc16b48b
MK
1011struct i915_error_state_file_priv {
1012 struct drm_device *dev;
1013 struct drm_i915_error_state *error;
1014};
1015
99584db3
DV
1016struct i915_gpu_error {
1017 /* For hangcheck timer */
1018#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
1019#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
be62acb4
MK
1020 /* Hang gpu twice in this window and your context gets banned */
1021#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
1022
99584db3 1023 struct timer_list hangcheck_timer;
99584db3
DV
1024
1025 /* For reset and error_state handling. */
1026 spinlock_t lock;
1027 /* Protected by the above dev->gpu_error.lock. */
1028 struct drm_i915_error_state *first_error;
1029 struct work_struct work;
99584db3 1030
094f9a54
CW
1031
1032 unsigned long missed_irq_rings;
1033
1f83fee0 1034 /**
f69061be 1035 * State variable and reset counter controlling the reset flow
1f83fee0 1036 *
f69061be
DV
1037 * Upper bits are for the reset counter. This counter is used by the
1038 * wait_seqno code to race-free noticed that a reset event happened and
1039 * that it needs to restart the entire ioctl (since most likely the
1040 * seqno it waited for won't ever signal anytime soon).
1041 *
1042 * This is important for lock-free wait paths, where no contended lock
1043 * naturally enforces the correct ordering between the bail-out of the
1044 * waiter and the gpu reset work code.
1f83fee0
DV
1045 *
1046 * Lowest bit controls the reset state machine: Set means a reset is in
1047 * progress. This state will (presuming we don't have any bugs) decay
1048 * into either unset (successful reset) or the special WEDGED value (hw
1049 * terminally sour). All waiters on the reset_queue will be woken when
1050 * that happens.
1051 */
1052 atomic_t reset_counter;
1053
1054 /**
1055 * Special values/flags for reset_counter
1056 *
1057 * Note that the code relies on
1058 * I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG
1059 * being true.
1060 */
1061#define I915_RESET_IN_PROGRESS_FLAG 1
1062#define I915_WEDGED 0xffffffff
1063
1064 /**
1065 * Waitqueue to signal when the reset has completed. Used by clients
1066 * that wait for dev_priv->mm.wedged to settle.
1067 */
1068 wait_queue_head_t reset_queue;
33196ded 1069
99584db3
DV
1070 /* For gpu hang simulation. */
1071 unsigned int stop_rings;
094f9a54
CW
1072
1073 /* For missed irq/seqno simulation. */
1074 unsigned int test_irq_rings;
99584db3
DV
1075};
1076
b8efb17b
ZR
1077enum modeset_restore {
1078 MODESET_ON_LID_OPEN,
1079 MODESET_DONE,
1080 MODESET_SUSPENDED,
1081};
1082
6acab15a
PZ
1083struct ddi_vbt_port_info {
1084 uint8_t hdmi_level_shift;
311a2094
PZ
1085
1086 uint8_t supports_dvi:1;
1087 uint8_t supports_hdmi:1;
1088 uint8_t supports_dp:1;
6acab15a
PZ
1089};
1090
41aa3448
RV
1091struct intel_vbt_data {
1092 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
1093 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
1094
1095 /* Feature bits */
1096 unsigned int int_tv_support:1;
1097 unsigned int lvds_dither:1;
1098 unsigned int lvds_vbt:1;
1099 unsigned int int_crt_support:1;
1100 unsigned int lvds_use_ssc:1;
1101 unsigned int display_clock_mode:1;
1102 unsigned int fdi_rx_polarity_inverted:1;
1103 int lvds_ssc_freq;
1104 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
1105
1106 /* eDP */
1107 int edp_rate;
1108 int edp_lanes;
1109 int edp_preemphasis;
1110 int edp_vswing;
1111 bool edp_initialized;
1112 bool edp_support;
1113 int edp_bpp;
1114 struct edp_power_seq edp_pps;
1115
d17c5443
SK
1116 /* MIPI DSI */
1117 struct {
1118 u16 panel_id;
1119 } dsi;
1120
41aa3448
RV
1121 int crt_ddc_pin;
1122
1123 int child_dev_num;
768f69c9 1124 union child_device_config *child_dev;
6acab15a
PZ
1125
1126 struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
41aa3448
RV
1127};
1128
77c122bc
VS
1129enum intel_ddb_partitioning {
1130 INTEL_DDB_PART_1_2,
1131 INTEL_DDB_PART_5_6, /* IVB+ */
1132};
1133
1fd527cc
VS
1134struct intel_wm_level {
1135 bool enable;
1136 uint32_t pri_val;
1137 uint32_t spr_val;
1138 uint32_t cur_val;
1139 uint32_t fbc_val;
1140};
1141
609cedef
VS
1142struct hsw_wm_values {
1143 uint32_t wm_pipe[3];
1144 uint32_t wm_lp[3];
1145 uint32_t wm_lp_spr[3];
1146 uint32_t wm_linetime[3];
1147 bool enable_fbc_wm;
1148 enum intel_ddb_partitioning partitioning;
1149};
1150
c67a470b
PZ
1151/*
1152 * This struct tracks the state needed for the Package C8+ feature.
1153 *
1154 * Package states C8 and deeper are really deep PC states that can only be
1155 * reached when all the devices on the system allow it, so even if the graphics
1156 * device allows PC8+, it doesn't mean the system will actually get to these
1157 * states.
1158 *
1159 * Our driver only allows PC8+ when all the outputs are disabled, the power well
1160 * is disabled and the GPU is idle. When these conditions are met, we manually
1161 * do the other conditions: disable the interrupts, clocks and switch LCPLL
1162 * refclk to Fclk.
1163 *
1164 * When we really reach PC8 or deeper states (not just when we allow it) we lose
1165 * the state of some registers, so when we come back from PC8+ we need to
1166 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
1167 * need to take care of the registers kept by RC6.
1168 *
1169 * The interrupt disabling is part of the requirements. We can only leave the
1170 * PCH HPD interrupts enabled. If we're in PC8+ and we get another interrupt we
1171 * can lock the machine.
1172 *
1173 * Ideally every piece of our code that needs PC8+ disabled would call
1174 * hsw_disable_package_c8, which would increment disable_count and prevent the
1175 * system from reaching PC8+. But we don't have a symmetric way to do this for
1176 * everything, so we have the requirements_met and gpu_idle variables. When we
1177 * switch requirements_met or gpu_idle to true we decrease disable_count, and
1178 * increase it in the opposite case. The requirements_met variable is true when
1179 * all the CRTCs, encoders and the power well are disabled. The gpu_idle
1180 * variable is true when the GPU is idle.
1181 *
1182 * In addition to everything, we only actually enable PC8+ if disable_count
1183 * stays at zero for at least some seconds. This is implemented with the
1184 * enable_work variable. We do this so we don't enable/disable PC8 dozens of
1185 * consecutive times when all screens are disabled and some background app
1186 * queries the state of our connectors, or we have some application constantly
1187 * waking up to use the GPU. Only after the enable_work function actually
1188 * enables PC8+ the "enable" variable will become true, which means that it can
1189 * be false even if disable_count is 0.
1190 *
1191 * The irqs_disabled variable becomes true exactly after we disable the IRQs and
1192 * goes back to false exactly before we reenable the IRQs. We use this variable
1193 * to check if someone is trying to enable/disable IRQs while they're supposed
1194 * to be disabled. This shouldn't happen and we'll print some error messages in
1195 * case it happens, but if it actually happens we'll also update the variables
1196 * inside struct regsave so when we restore the IRQs they will contain the
1197 * latest expected values.
1198 *
1199 * For more, read "Display Sequences for Package C8" on our documentation.
1200 */
1201struct i915_package_c8 {
1202 bool requirements_met;
1203 bool gpu_idle;
1204 bool irqs_disabled;
1205 /* Only true after the delayed work task actually enables it. */
1206 bool enabled;
1207 int disable_count;
1208 struct mutex lock;
1209 struct delayed_work enable_work;
1210
1211 struct {
1212 uint32_t deimr;
1213 uint32_t sdeimr;
1214 uint32_t gtimr;
1215 uint32_t gtier;
1216 uint32_t gen6_pmimr;
1217 } regsave;
1218};
1219
926321d5
DV
1220enum intel_pipe_crc_source {
1221 INTEL_PIPE_CRC_SOURCE_NONE,
1222 INTEL_PIPE_CRC_SOURCE_PLANE1,
1223 INTEL_PIPE_CRC_SOURCE_PLANE2,
1224 INTEL_PIPE_CRC_SOURCE_PF,
1225 INTEL_PIPE_CRC_SOURCE_MAX,
1226};
1227
8bf1e9f1 1228struct intel_pipe_crc_entry {
ac2300d4 1229 uint32_t frame;
8bf1e9f1
SH
1230 uint32_t crc[5];
1231};
1232
b2c88f5b 1233#define INTEL_PIPE_CRC_ENTRIES_NR 128
8bf1e9f1 1234struct intel_pipe_crc {
be5c7a90 1235 atomic_t available; /* exclusive access to the device */
e5f75aca 1236 struct intel_pipe_crc_entry *entries;
926321d5 1237 enum intel_pipe_crc_source source;
b2c88f5b 1238 atomic_t head, tail;
07144428 1239 wait_queue_head_t wq;
8bf1e9f1
SH
1240};
1241
f4c956ad
DV
1242typedef struct drm_i915_private {
1243 struct drm_device *dev;
42dcedd4 1244 struct kmem_cache *slab;
f4c956ad
DV
1245
1246 const struct intel_device_info *info;
1247
1248 int relative_constants_mode;
1249
1250 void __iomem *regs;
1251
907b28c5 1252 struct intel_uncore uncore;
f4c956ad
DV
1253
1254 struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
1255
28c70f16 1256
f4c956ad
DV
1257 /** gmbus_mutex protects against concurrent usage of the single hw gmbus
1258 * controller on different i2c buses. */
1259 struct mutex gmbus_mutex;
1260
1261 /**
1262 * Base address of the gmbus and gpio block.
1263 */
1264 uint32_t gpio_mmio_base;
1265
28c70f16
DV
1266 wait_queue_head_t gmbus_wait_queue;
1267
f4c956ad
DV
1268 struct pci_dev *bridge_dev;
1269 struct intel_ring_buffer ring[I915_NUM_RINGS];
f72b3435 1270 uint32_t last_seqno, next_seqno;
f4c956ad
DV
1271
1272 drm_dma_handle_t *status_page_dmah;
f4c956ad
DV
1273 struct resource mch_res;
1274
1275 atomic_t irq_received;
1276
1277 /* protects the irq masks */
1278 spinlock_t irq_lock;
1279
9ee32fea
DV
1280 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
1281 struct pm_qos_request pm_qos;
1282
f4c956ad 1283 /* DPIO indirect register protection */
09153000 1284 struct mutex dpio_lock;
f4c956ad
DV
1285
1286 /** Cached value of IMR to avoid reads in updating the bitfield */
f4c956ad
DV
1287 u32 irq_mask;
1288 u32 gt_irq_mask;
605cd25b 1289 u32 pm_irq_mask;
f4c956ad 1290
f4c956ad 1291 struct work_struct hotplug_work;
52d7eced 1292 bool enable_hotplug_processing;
b543fb04
EE
1293 struct {
1294 unsigned long hpd_last_jiffies;
1295 int hpd_cnt;
1296 enum {
1297 HPD_ENABLED = 0,
1298 HPD_DISABLED = 1,
1299 HPD_MARK_DISABLED = 2
1300 } hpd_mark;
1301 } hpd_stats[HPD_NUM_PINS];
142e2398 1302 u32 hpd_event_bits;
ac4c16c5 1303 struct timer_list hotplug_reenable_timer;
f4c956ad 1304
7f1f3851 1305 int num_plane;
f4c956ad 1306
5c3fe8b0 1307 struct i915_fbc fbc;
f4c956ad 1308 struct intel_opregion opregion;
41aa3448 1309 struct intel_vbt_data vbt;
f4c956ad
DV
1310
1311 /* overlay */
1312 struct intel_overlay *overlay;
2c6602df 1313 unsigned int sprite_scaling_enabled;
f4c956ad 1314
31ad8ec6
JN
1315 /* backlight */
1316 struct {
1317 int level;
1318 bool enabled;
8ba2d185 1319 spinlock_t lock; /* bl registers and the above bl fields */
31ad8ec6
JN
1320 struct backlight_device *device;
1321 } backlight;
1322
f4c956ad 1323 /* LVDS info */
f4c956ad
DV
1324 bool no_aux_handshake;
1325
f4c956ad
DV
1326 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
1327 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
1328 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
1329
1330 unsigned int fsb_freq, mem_freq, is_ddr3;
1331
645416f5
DV
1332 /**
1333 * wq - Driver workqueue for GEM.
1334 *
1335 * NOTE: Work items scheduled here are not allowed to grab any modeset
1336 * locks, for otherwise the flushing done in the pageflip code will
1337 * result in deadlocks.
1338 */
f4c956ad
DV
1339 struct workqueue_struct *wq;
1340
1341 /* Display functions */
1342 struct drm_i915_display_funcs display;
1343
1344 /* PCH chipset type */
1345 enum intel_pch pch_type;
17a303ec 1346 unsigned short pch_id;
f4c956ad
DV
1347
1348 unsigned long quirks;
1349
b8efb17b
ZR
1350 enum modeset_restore modeset_restore;
1351 struct mutex modeset_restore_lock;
673a394b 1352
a7bbbd63 1353 struct list_head vm_list; /* Global list of all address spaces */
853ba5d2 1354 struct i915_gtt gtt; /* VMA representing the global address space */
5d4545ae 1355
4b5aed62 1356 struct i915_gem_mm mm;
8781342d 1357
8781342d
DV
1358 /* Kernel Modesetting */
1359
9b9d172d 1360 struct sdvo_device_mapping sdvo_mappings[2];
652c393a 1361
27f8227b
JB
1362 struct drm_crtc *plane_to_crtc_mapping[3];
1363 struct drm_crtc *pipe_to_crtc_mapping[3];
6b95a207
KH
1364 wait_queue_head_t pending_flip_queue;
1365
e72f9fbf
DV
1366 int num_shared_dpll;
1367 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
6441ab5f 1368 struct intel_ddi_plls ddi_plls;
ee7b9f93 1369
652c393a
JB
1370 /* Reclocking support */
1371 bool render_reclock_avail;
1372 bool lvds_downclock_avail;
18f9ed12
ZY
1373 /* indicates the reduced downclock for LVDS*/
1374 int lvds_downclock;
652c393a 1375 u16 orig_clock;
f97108d1 1376
c4804411 1377 bool mchbar_need_disable;
f97108d1 1378
a4da4fa4
DV
1379 struct intel_l3_parity l3_parity;
1380
59124506
BW
1381 /* Cannot be determined by PCIID. You must always read a register. */
1382 size_t ellc_size;
1383
c6a828d3 1384 /* gen6+ rps state */
c85aa885 1385 struct intel_gen6_power_mgmt rps;
c6a828d3 1386
20e4d407
DV
1387 /* ilk-only ips/rps state. Everything in here is protected by the global
1388 * mchdev_lock in intel_pm.c */
c85aa885 1389 struct intel_ilk_power_mgmt ips;
b5e50c3f 1390
a38911a3
WX
1391 /* Haswell power well */
1392 struct i915_power_well power_well;
1393
a031d709 1394 struct i915_psr psr;
3f51e471 1395
99584db3 1396 struct i915_gpu_error gpu_error;
ae681d96 1397
c9cddffc
JB
1398 struct drm_i915_gem_object *vlv_pctx;
1399
4520f53a 1400#ifdef CONFIG_DRM_I915_FBDEV
8be48d92
DA
1401 /* list of fbdev register on this device */
1402 struct intel_fbdev *fbdev;
4520f53a 1403#endif
e953fd7b 1404
073f34d9
JB
1405 /*
1406 * The console may be contended at resume, but we don't
1407 * want it to block on it.
1408 */
1409 struct work_struct console_resume_work;
1410
e953fd7b 1411 struct drm_property *broadcast_rgb_property;
3f43c48d 1412 struct drm_property *force_audio_property;
e3689190 1413
254f965c
BW
1414 bool hw_contexts_disabled;
1415 uint32_t hw_context_size;
a33afea5 1416 struct list_head context_list;
f4c956ad 1417
3e68320e 1418 u32 fdi_rx_config;
68d18ad7 1419
f4c956ad 1420 struct i915_suspend_saved_registers regfile;
231f42a4 1421
53615a5e
VS
1422 struct {
1423 /*
1424 * Raw watermark latency values:
1425 * in 0.1us units for WM0,
1426 * in 0.5us units for WM1+.
1427 */
1428 /* primary */
1429 uint16_t pri_latency[5];
1430 /* sprite */
1431 uint16_t spr_latency[5];
1432 /* cursor */
1433 uint16_t cur_latency[5];
609cedef
VS
1434
1435 /* current hardware state */
1436 struct hsw_wm_values hw;
53615a5e
VS
1437 } wm;
1438
c67a470b
PZ
1439 struct i915_package_c8 pc8;
1440
231f42a4
DV
1441 /* Old dri1 support infrastructure, beware the dragons ya fools entering
1442 * here! */
1443 struct i915_dri1_state dri1;
db1b76ca
DV
1444 /* Old ums support infrastructure, same warning applies. */
1445 struct i915_ums_state ums;
8bf1e9f1
SH
1446
1447#ifdef CONFIG_DEBUG_FS
1448 struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
1449#endif
1da177e4
LT
1450} drm_i915_private_t;
1451
2c1792a1
CW
1452static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
1453{
1454 return dev->dev_private;
1455}
1456
b4519513
CW
1457/* Iterate over initialised rings */
1458#define for_each_ring(ring__, dev_priv__, i__) \
1459 for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
1460 if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
1461
b1d7e4b4
WF
1462enum hdmi_force_audio {
1463 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
1464 HDMI_AUDIO_OFF, /* force turn off HDMI audio */
1465 HDMI_AUDIO_AUTO, /* trust EDID */
1466 HDMI_AUDIO_ON, /* force turn on HDMI audio */
1467};
1468
190d6cd5 1469#define I915_GTT_OFFSET_NONE ((u32)-1)
ed2f3452 1470
37e680a1
CW
1471struct drm_i915_gem_object_ops {
1472 /* Interface between the GEM object and its backing storage.
1473 * get_pages() is called once prior to the use of the associated set
1474 * of pages before to binding them into the GTT, and put_pages() is
1475 * called after we no longer need them. As we expect there to be
1476 * associated cost with migrating pages between the backing storage
1477 * and making them available for the GPU (e.g. clflush), we may hold
1478 * onto the pages after they are no longer referenced by the GPU
1479 * in case they may be used again shortly (for example migrating the
1480 * pages to a different memory domain within the GTT). put_pages()
1481 * will therefore most likely be called when the object itself is
1482 * being released or under memory pressure (where we attempt to
1483 * reap pages for the shrinker).
1484 */
1485 int (*get_pages)(struct drm_i915_gem_object *);
1486 void (*put_pages)(struct drm_i915_gem_object *);
1487};
1488
673a394b 1489struct drm_i915_gem_object {
c397b908 1490 struct drm_gem_object base;
673a394b 1491
37e680a1
CW
1492 const struct drm_i915_gem_object_ops *ops;
1493
2f633156
BW
1494 /** List of VMAs backed by this object */
1495 struct list_head vma_list;
1496
c1ad11fc
CW
1497 /** Stolen memory for this object, instead of being backed by shmem. */
1498 struct drm_mm_node *stolen;
35c20a60 1499 struct list_head global_list;
673a394b 1500
69dc4987 1501 struct list_head ring_list;
b25cb2f8
BW
1502 /** Used in execbuf to temporarily hold a ref */
1503 struct list_head obj_exec_link;
673a394b
EA
1504
1505 /**
65ce3027
CW
1506 * This is set if the object is on the active lists (has pending
1507 * rendering and so a non-zero seqno), and is not set if it i s on
1508 * inactive (ready to be unbound) list.
673a394b 1509 */
0206e353 1510 unsigned int active:1;
673a394b
EA
1511
1512 /**
1513 * This is set if the object has been written to since last bound
1514 * to the GTT
1515 */
0206e353 1516 unsigned int dirty:1;
778c3544
DV
1517
1518 /**
1519 * Fence register bits (if any) for this object. Will be set
1520 * as needed when mapped into the GTT.
1521 * Protected by dev->struct_mutex.
778c3544 1522 */
4b9de737 1523 signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
778c3544 1524
778c3544
DV
1525 /**
1526 * Advice: are the backing pages purgeable?
1527 */
0206e353 1528 unsigned int madv:2;
778c3544 1529
778c3544
DV
1530 /**
1531 * Current tiling mode for the object.
1532 */
0206e353 1533 unsigned int tiling_mode:2;
5d82e3e6
CW
1534 /**
1535 * Whether the tiling parameters for the currently associated fence
1536 * register have changed. Note that for the purposes of tracking
1537 * tiling changes we also treat the unfenced register, the register
1538 * slot that the object occupies whilst it executes a fenced
1539 * command (such as BLT on gen2/3), as a "fence".
1540 */
1541 unsigned int fence_dirty:1;
778c3544
DV
1542
1543 /** How many users have pinned this object in GTT space. The following
1544 * users can each hold at most one reference: pwrite/pread, pin_ioctl
1545 * (via user_pin_count), execbuffer (objects are not allowed multiple
1546 * times for the same batchbuffer), and the framebuffer code. When
1547 * switching/pageflipping, the framebuffer code has at most two buffers
1548 * pinned per crtc.
1549 *
1550 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
1551 * bits with absolutely no headroom. So use 4 bits. */
0206e353 1552 unsigned int pin_count:4;
778c3544 1553#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
673a394b 1554
75e9e915
DV
1555 /**
1556 * Is the object at the current location in the gtt mappable and
1557 * fenceable? Used to avoid costly recalculations.
1558 */
0206e353 1559 unsigned int map_and_fenceable:1;
75e9e915 1560
fb7d516a
DV
1561 /**
1562 * Whether the current gtt mapping needs to be mappable (and isn't just
1563 * mappable by accident). Track pin and fault separate for a more
1564 * accurate mappable working set.
1565 */
0206e353
AJ
1566 unsigned int fault_mappable:1;
1567 unsigned int pin_mappable:1;
cc98b413 1568 unsigned int pin_display:1;
fb7d516a 1569
caea7476
CW
1570 /*
1571 * Is the GPU currently using a fence to access this buffer,
1572 */
1573 unsigned int pending_fenced_gpu_access:1;
1574 unsigned int fenced_gpu_access:1;
1575
651d794f 1576 unsigned int cache_level:3;
93dfb40c 1577
7bddb01f 1578 unsigned int has_aliasing_ppgtt_mapping:1;
74898d7e 1579 unsigned int has_global_gtt_mapping:1;
9da3da66 1580 unsigned int has_dma_mapping:1;
7bddb01f 1581
9da3da66 1582 struct sg_table *pages;
a5570178 1583 int pages_pin_count;
673a394b 1584
1286ff73 1585 /* prime dma-buf support */
9a70cc2a
DA
1586 void *dma_buf_vmapping;
1587 int vmapping_count;
1588
caea7476
CW
1589 struct intel_ring_buffer *ring;
1590
1c293ea3 1591 /** Breadcrumb of last rendering to the buffer. */
0201f1ec
CW
1592 uint32_t last_read_seqno;
1593 uint32_t last_write_seqno;
caea7476
CW
1594 /** Breadcrumb of last fenced GPU access to the buffer. */
1595 uint32_t last_fenced_seqno;
673a394b 1596
778c3544 1597 /** Current tiling stride for the object, if it's tiled. */
de151cf6 1598 uint32_t stride;
673a394b 1599
280b713b 1600 /** Record of address bit 17 of each page at last unbind. */
d312ec25 1601 unsigned long *bit_17;
280b713b 1602
79e53945
JB
1603 /** User space pin count and filp owning the pin */
1604 uint32_t user_pin_count;
1605 struct drm_file *pin_filp;
71acb5eb
DA
1606
1607 /** for phy allocated objects */
1608 struct drm_i915_gem_phys_object *phys_obj;
673a394b 1609};
b45305fc 1610#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
673a394b 1611
62b8b215 1612#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
23010e43 1613
673a394b
EA
1614/**
1615 * Request queue structure.
1616 *
1617 * The request queue allows us to note sequence numbers that have been emitted
1618 * and may be associated with active buffers to be retired.
1619 *
1620 * By keeping this list, we can avoid having to do questionable
1621 * sequence-number comparisons on buffer last_rendering_seqnos, and associate
1622 * an emission time with seqnos for tracking how far ahead of the GPU we are.
1623 */
1624struct drm_i915_gem_request {
852835f3
ZN
1625 /** On Which ring this request was generated */
1626 struct intel_ring_buffer *ring;
1627
673a394b
EA
1628 /** GEM sequence number associated with this request. */
1629 uint32_t seqno;
1630
7d736f4f
MK
1631 /** Position in the ringbuffer of the start of the request */
1632 u32 head;
1633
1634 /** Position in the ringbuffer of the end of the request */
a71d8d94
CW
1635 u32 tail;
1636
0e50e96b
MK
1637 /** Context related to this request */
1638 struct i915_hw_context *ctx;
1639
7d736f4f
MK
1640 /** Batch buffer related to this request if any */
1641 struct drm_i915_gem_object *batch_obj;
1642
673a394b
EA
1643 /** Time at which this request was emitted, in jiffies. */
1644 unsigned long emitted_jiffies;
1645
b962442e 1646 /** global list entry for this request */
673a394b 1647 struct list_head list;
b962442e 1648
f787a5f5 1649 struct drm_i915_file_private *file_priv;
b962442e
EA
1650 /** file_priv list entry for this request */
1651 struct list_head client_list;
673a394b
EA
1652};
1653
1654struct drm_i915_file_private {
b29c19b6
CW
1655 struct drm_i915_private *dev_priv;
1656
673a394b 1657 struct {
99057c81 1658 spinlock_t lock;
b962442e 1659 struct list_head request_list;
b29c19b6 1660 struct delayed_work idle_work;
673a394b 1661 } mm;
40521054 1662 struct idr context_idr;
e59ec13d
MK
1663
1664 struct i915_ctx_hang_stats hang_stats;
b29c19b6 1665 atomic_t rps_wait_boost;
673a394b
EA
1666};
1667
2c1792a1 1668#define INTEL_INFO(dev) (to_i915(dev)->info)
cae5852d 1669
ffbab09b
VS
1670#define IS_I830(dev) ((dev)->pdev->device == 0x3577)
1671#define IS_845G(dev) ((dev)->pdev->device == 0x2562)
cae5852d 1672#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
ffbab09b 1673#define IS_I865G(dev) ((dev)->pdev->device == 0x2572)
cae5852d 1674#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
ffbab09b
VS
1675#define IS_I915GM(dev) ((dev)->pdev->device == 0x2592)
1676#define IS_I945G(dev) ((dev)->pdev->device == 0x2772)
cae5852d
ZN
1677#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
1678#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
1679#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
ffbab09b 1680#define IS_GM45(dev) ((dev)->pdev->device == 0x2A42)
cae5852d 1681#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
ffbab09b
VS
1682#define IS_PINEVIEW_G(dev) ((dev)->pdev->device == 0xa001)
1683#define IS_PINEVIEW_M(dev) ((dev)->pdev->device == 0xa011)
cae5852d
ZN
1684#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
1685#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
ffbab09b 1686#define IS_IRONLAKE_M(dev) ((dev)->pdev->device == 0x0046)
4b65177b 1687#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
ffbab09b
VS
1688#define IS_IVB_GT1(dev) ((dev)->pdev->device == 0x0156 || \
1689 (dev)->pdev->device == 0x0152 || \
1690 (dev)->pdev->device == 0x015a)
1691#define IS_SNB_GT1(dev) ((dev)->pdev->device == 0x0102 || \
1692 (dev)->pdev->device == 0x0106 || \
1693 (dev)->pdev->device == 0x010A)
70a3eb7a 1694#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
4cae9ae0 1695#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
cae5852d 1696#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
ed1c9e2c 1697#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
ffbab09b 1698 ((dev)->pdev->device & 0xFF00) == 0x0C00)
d567b07f 1699#define IS_ULT(dev) (IS_HASWELL(dev) && \
ffbab09b 1700 ((dev)->pdev->device & 0xFF00) == 0x0A00)
9435373e 1701#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
ffbab09b 1702 ((dev)->pdev->device & 0x00F0) == 0x0020)
b833d685 1703#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
cae5852d 1704
85436696
JB
1705/*
1706 * The genX designation typically refers to the render engine, so render
1707 * capability related checks should use IS_GEN, while display and other checks
1708 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
1709 * chips, etc.).
1710 */
cae5852d
ZN
1711#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
1712#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
1713#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
1714#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
1715#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
85436696 1716#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
cae5852d 1717
73ae478c
BW
1718#define RENDER_RING (1<<RCS)
1719#define BSD_RING (1<<VCS)
1720#define BLT_RING (1<<BCS)
1721#define VEBOX_RING (1<<VECS)
1722#define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING)
1723#define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING)
1724#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
3d29b842 1725#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
651d794f 1726#define HAS_WT(dev) (IS_HASWELL(dev) && to_i915(dev)->ellc_size)
cae5852d
ZN
1727#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1728
254f965c 1729#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
93553609 1730#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev))
1d2a314c 1731
05394f39 1732#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
cae5852d
ZN
1733#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
1734
b45305fc
DV
1735/* Early gen2 have a totally busted CS tlb and require pinned batches. */
1736#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
1737
cae5852d
ZN
1738/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1739 * rows, which changed the alignment requirements and fence programming.
1740 */
1741#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
1742 IS_I915GM(dev)))
1743#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
1744#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
1745#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
cae5852d
ZN
1746#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
1747#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
cae5852d
ZN
1748
1749#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
1750#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
1751#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
cae5852d 1752
f5adf94e
DL
1753#define HAS_IPS(dev) (IS_ULT(dev))
1754
dd93be58 1755#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
86d52df6 1756#define HAS_POWER_WELL(dev) (IS_HASWELL(dev))
30568c45 1757#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
18b5992c 1758#define HAS_PSR(dev) (IS_HASWELL(dev))
affa9354 1759
17a303ec
PZ
1760#define INTEL_PCH_DEVICE_ID_MASK 0xff00
1761#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
1762#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
1763#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
1764#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
1765#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
1766
2c1792a1 1767#define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type)
eb877ebf 1768#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
cae5852d
ZN
1769#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1770#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
40c7ead9 1771#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
45e6e3a1 1772#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
cae5852d 1773
040d2baa
BW
1774/* DPF == dynamic parity feature */
1775#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
1776#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
e1ef7cc2 1777
c8735b0c
BW
1778#define GT_FREQUENCY_MULTIPLIER 50
1779
05394f39
CW
1780#include "i915_trace.h"
1781
83b7f9ac
ED
1782/**
1783 * RC6 is a special power stage which allows the GPU to enter an very
1784 * low-voltage mode when idle, using down to 0V while at this stage. This
1785 * stage is entered automatically when the GPU is idle when RC6 support is
1786 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
1787 *
1788 * There are different RC6 modes available in Intel GPU, which differentiate
1789 * among each other with the latency required to enter and leave RC6 and
1790 * voltage consumed by the GPU in different states.
1791 *
1792 * The combination of the following flags define which states GPU is allowed
1793 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
1794 * RC6pp is deepest RC6. Their support by hardware varies according to the
1795 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
1796 * which brings the most power savings; deeper states save more power, but
1797 * require higher latency to switch to and wake up.
1798 */
1799#define INTEL_RC6_ENABLE (1<<0)
1800#define INTEL_RC6p_ENABLE (1<<1)
1801#define INTEL_RC6pp_ENABLE (1<<2)
1802
baa70943 1803extern const struct drm_ioctl_desc i915_ioctls[];
b3a83639 1804extern int i915_max_ioctl;
a35d9d3c
BW
1805extern unsigned int i915_fbpercrtc __always_unused;
1806extern int i915_panel_ignore_lid __read_mostly;
1807extern unsigned int i915_powersave __read_mostly;
f45b5557 1808extern int i915_semaphores __read_mostly;
a35d9d3c 1809extern unsigned int i915_lvds_downclock __read_mostly;
121d527a 1810extern int i915_lvds_channel_mode __read_mostly;
4415e63b 1811extern int i915_panel_use_ssc __read_mostly;
a35d9d3c 1812extern int i915_vbt_sdvo_panel_type __read_mostly;
c0f372b3 1813extern int i915_enable_rc6 __read_mostly;
4415e63b 1814extern int i915_enable_fbc __read_mostly;
a35d9d3c 1815extern bool i915_enable_hangcheck __read_mostly;
650dc07e 1816extern int i915_enable_ppgtt __read_mostly;
105b7c11 1817extern int i915_enable_psr __read_mostly;
0a3af268 1818extern unsigned int i915_preliminary_hw_support __read_mostly;
2124b72e 1819extern int i915_disable_power_well __read_mostly;
3c4ca58c 1820extern int i915_enable_ips __read_mostly;
2385bdf0 1821extern bool i915_fastboot __read_mostly;
c67a470b 1822extern int i915_enable_pc8 __read_mostly;
90058745 1823extern int i915_pc8_timeout __read_mostly;
0b74b508 1824extern bool i915_prefault_disable __read_mostly;
b3a83639 1825
6a9ee8af
DA
1826extern int i915_suspend(struct drm_device *dev, pm_message_t state);
1827extern int i915_resume(struct drm_device *dev);
7c1c2871
DA
1828extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
1829extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
1830
1da177e4 1831 /* i915_dma.c */
d05c617e 1832void i915_update_dri1_breadcrumb(struct drm_device *dev);
84b1fd10 1833extern void i915_kernel_lost_context(struct drm_device * dev);
22eae947 1834extern int i915_driver_load(struct drm_device *, unsigned long flags);
ba8bbcf6 1835extern int i915_driver_unload(struct drm_device *);
673a394b 1836extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
84b1fd10 1837extern void i915_driver_lastclose(struct drm_device * dev);
6c340eac
EA
1838extern void i915_driver_preclose(struct drm_device *dev,
1839 struct drm_file *file_priv);
673a394b
EA
1840extern void i915_driver_postclose(struct drm_device *dev,
1841 struct drm_file *file_priv);
84b1fd10 1842extern int i915_driver_device_is_agp(struct drm_device * dev);
c43b5634 1843#ifdef CONFIG_COMPAT
0d6aa60b
DA
1844extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
1845 unsigned long arg);
c43b5634 1846#endif
673a394b 1847extern int i915_emit_box(struct drm_device *dev,
c4e7a414
CW
1848 struct drm_clip_rect *box,
1849 int DR1, int DR4);
8e96d9c4 1850extern int intel_gpu_reset(struct drm_device *dev);
d4b8bb2a 1851extern int i915_reset(struct drm_device *dev);
7648fa99
JB
1852extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
1853extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
1854extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
1855extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
1856
073f34d9 1857extern void intel_console_resume(struct work_struct *work);
af6061af 1858
1da177e4 1859/* i915_irq.c */
10cd45b6 1860void i915_queue_hangcheck(struct drm_device *dev);
527f9e90 1861void i915_handle_error(struct drm_device *dev, bool wedged);
1da177e4 1862
f71d4af4 1863extern void intel_irq_init(struct drm_device *dev);
e1b4d303 1864extern void intel_pm_init(struct drm_device *dev);
20afbda2 1865extern void intel_hpd_init(struct drm_device *dev);
907b28c5
CW
1866extern void intel_pm_init(struct drm_device *dev);
1867
1868extern void intel_uncore_sanitize(struct drm_device *dev);
1869extern void intel_uncore_early_sanitize(struct drm_device *dev);
1870extern void intel_uncore_init(struct drm_device *dev);
907b28c5
CW
1871extern void intel_uncore_clear_errors(struct drm_device *dev);
1872extern void intel_uncore_check_errors(struct drm_device *dev);
aec347ab 1873extern void intel_uncore_fini(struct drm_device *dev);
b1f14ad0 1874
7c463586
KP
1875void
1876i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1877
1878void
1879i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1880
673a394b
EA
1881/* i915_gem.c */
1882int i915_gem_init_ioctl(struct drm_device *dev, void *data,
1883 struct drm_file *file_priv);
1884int i915_gem_create_ioctl(struct drm_device *dev, void *data,
1885 struct drm_file *file_priv);
1886int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1887 struct drm_file *file_priv);
1888int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1889 struct drm_file *file_priv);
1890int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1891 struct drm_file *file_priv);
de151cf6
JB
1892int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1893 struct drm_file *file_priv);
673a394b
EA
1894int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1895 struct drm_file *file_priv);
1896int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1897 struct drm_file *file_priv);
1898int i915_gem_execbuffer(struct drm_device *dev, void *data,
1899 struct drm_file *file_priv);
76446cac
JB
1900int i915_gem_execbuffer2(struct drm_device *dev, void *data,
1901 struct drm_file *file_priv);
673a394b
EA
1902int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
1903 struct drm_file *file_priv);
1904int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
1905 struct drm_file *file_priv);
1906int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
1907 struct drm_file *file_priv);
199adf40
BW
1908int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
1909 struct drm_file *file);
1910int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
1911 struct drm_file *file);
673a394b
EA
1912int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
1913 struct drm_file *file_priv);
3ef94daa
CW
1914int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1915 struct drm_file *file_priv);
673a394b
EA
1916int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
1917 struct drm_file *file_priv);
1918int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
1919 struct drm_file *file_priv);
1920int i915_gem_set_tiling(struct drm_device *dev, void *data,
1921 struct drm_file *file_priv);
1922int i915_gem_get_tiling(struct drm_device *dev, void *data,
1923 struct drm_file *file_priv);
5a125c3c
EA
1924int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
1925 struct drm_file *file_priv);
23ba4fd0
BW
1926int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
1927 struct drm_file *file_priv);
673a394b 1928void i915_gem_load(struct drm_device *dev);
42dcedd4
CW
1929void *i915_gem_object_alloc(struct drm_device *dev);
1930void i915_gem_object_free(struct drm_i915_gem_object *obj);
37e680a1
CW
1931void i915_gem_object_init(struct drm_i915_gem_object *obj,
1932 const struct drm_i915_gem_object_ops *ops);
05394f39
CW
1933struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
1934 size_t size);
673a394b 1935void i915_gem_free_object(struct drm_gem_object *obj);
2f633156 1936void i915_gem_vma_destroy(struct i915_vma *vma);
42dcedd4 1937
2021746e 1938int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
c37e2204 1939 struct i915_address_space *vm,
2021746e 1940 uint32_t alignment,
86a1ee26
CW
1941 bool map_and_fenceable,
1942 bool nonblocking);
05394f39 1943void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
07fe0b12
BW
1944int __must_check i915_vma_unbind(struct i915_vma *vma);
1945int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);
dd624afd 1946int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
05394f39 1947void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
673a394b 1948void i915_gem_lastclose(struct drm_device *dev);
f787a5f5 1949
37e680a1 1950int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
9da3da66
CW
1951static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
1952{
67d5a50c
ID
1953 struct sg_page_iter sg_iter;
1954
1955 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n)
2db76d7c 1956 return sg_page_iter_page(&sg_iter);
67d5a50c
ID
1957
1958 return NULL;
9da3da66 1959}
a5570178
CW
1960static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
1961{
1962 BUG_ON(obj->pages == NULL);
1963 obj->pages_pin_count++;
1964}
1965static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
1966{
1967 BUG_ON(obj->pages_pin_count == 0);
1968 obj->pages_pin_count--;
1969}
1970
54cf91dc 1971int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
2911a35b
BW
1972int i915_gem_object_sync(struct drm_i915_gem_object *obj,
1973 struct intel_ring_buffer *to);
e2d05a8b
BW
1974void i915_vma_move_to_active(struct i915_vma *vma,
1975 struct intel_ring_buffer *ring);
ff72145b
DA
1976int i915_gem_dumb_create(struct drm_file *file_priv,
1977 struct drm_device *dev,
1978 struct drm_mode_create_dumb *args);
1979int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
1980 uint32_t handle, uint64_t *offset);
f787a5f5
CW
1981/**
1982 * Returns true if seq1 is later than seq2.
1983 */
1984static inline bool
1985i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1986{
1987 return (int32_t)(seq1 - seq2) >= 0;
1988}
1989
fca26bb4
MK
1990int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
1991int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
06d98131 1992int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
d9e86c0e 1993int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
2021746e 1994
9a5a53b3 1995static inline bool
1690e1eb
CW
1996i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
1997{
1998 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1999 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2000 dev_priv->fence_regs[obj->fence_reg].pin_count++;
9a5a53b3
CW
2001 return true;
2002 } else
2003 return false;
1690e1eb
CW
2004}
2005
2006static inline void
2007i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
2008{
2009 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2010 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
b8c3af76 2011 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
1690e1eb
CW
2012 dev_priv->fence_regs[obj->fence_reg].pin_count--;
2013 }
2014}
2015
b29c19b6 2016bool i915_gem_retire_requests(struct drm_device *dev);
a71d8d94 2017void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
33196ded 2018int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
d6b2c790 2019 bool interruptible);
1f83fee0
DV
2020static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
2021{
2022 return unlikely(atomic_read(&error->reset_counter)
2023 & I915_RESET_IN_PROGRESS_FLAG);
2024}
2025
2026static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
2027{
2028 return atomic_read(&error->reset_counter) == I915_WEDGED;
2029}
a71d8d94 2030
069efc1d 2031void i915_gem_reset(struct drm_device *dev);
000433b6 2032bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
a8198eea 2033int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
1070a42b 2034int __must_check i915_gem_init(struct drm_device *dev);
f691e2f4 2035int __must_check i915_gem_init_hw(struct drm_device *dev);
c3787e2e 2036int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice);
f691e2f4 2037void i915_gem_init_swizzling(struct drm_device *dev);
79e53945 2038void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
b2da9fe5 2039int __must_check i915_gpu_idle(struct drm_device *dev);
2021746e 2040int __must_check i915_gem_idle(struct drm_device *dev);
0025c077
MK
2041int __i915_add_request(struct intel_ring_buffer *ring,
2042 struct drm_file *file,
7d736f4f 2043 struct drm_i915_gem_object *batch_obj,
0025c077
MK
2044 u32 *seqno);
2045#define i915_add_request(ring, seqno) \
854c94a7 2046 __i915_add_request(ring, NULL, NULL, seqno)
199b2bc2
BW
2047int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
2048 uint32_t seqno);
de151cf6 2049int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
2021746e
CW
2050int __must_check
2051i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
2052 bool write);
2053int __must_check
dabdfe02
CW
2054i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
2055int __must_check
2da3b9b9
CW
2056i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
2057 u32 alignment,
2021746e 2058 struct intel_ring_buffer *pipelined);
cc98b413 2059void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
71acb5eb 2060int i915_gem_attach_phys_object(struct drm_device *dev,
05394f39 2061 struct drm_i915_gem_object *obj,
6eeefaf3
CW
2062 int id,
2063 int align);
71acb5eb 2064void i915_gem_detach_phys_object(struct drm_device *dev,
05394f39 2065 struct drm_i915_gem_object *obj);
71acb5eb 2066void i915_gem_free_all_phys_object(struct drm_device *dev);
b29c19b6 2067int i915_gem_open(struct drm_device *dev, struct drm_file *file);
05394f39 2068void i915_gem_release(struct drm_device *dev, struct drm_file *file);
673a394b 2069
0fa87796
ID
2070uint32_t
2071i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
467cffba 2072uint32_t
d865110c
ID
2073i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
2074 int tiling_mode, bool fenced);
467cffba 2075
e4ffd173
CW
2076int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
2077 enum i915_cache_level cache_level);
2078
1286ff73
DV
2079struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
2080 struct dma_buf *dma_buf);
2081
2082struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
2083 struct drm_gem_object *gem_obj, int flags);
2084
19b2dbde
CW
2085void i915_gem_restore_fences(struct drm_device *dev);
2086
a70a3148
BW
2087unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
2088 struct i915_address_space *vm);
2089bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
2090bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
2091 struct i915_address_space *vm);
2092unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
2093 struct i915_address_space *vm);
2094struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
2095 struct i915_address_space *vm);
accfef2e
BW
2096struct i915_vma *
2097i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
2098 struct i915_address_space *vm);
5c2abbea
BW
2099
2100struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
2101
a70a3148
BW
2102/* Some GGTT VM helpers */
2103#define obj_to_ggtt(obj) \
2104 (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
2105static inline bool i915_is_ggtt(struct i915_address_space *vm)
2106{
2107 struct i915_address_space *ggtt =
2108 &((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base;
2109 return vm == ggtt;
2110}
2111
2112static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
2113{
2114 return i915_gem_obj_bound(obj, obj_to_ggtt(obj));
2115}
2116
2117static inline unsigned long
2118i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj)
2119{
2120 return i915_gem_obj_offset(obj, obj_to_ggtt(obj));
2121}
2122
2123static inline unsigned long
2124i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
2125{
2126 return i915_gem_obj_size(obj, obj_to_ggtt(obj));
2127}
c37e2204
BW
2128
2129static inline int __must_check
2130i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
2131 uint32_t alignment,
2132 bool map_and_fenceable,
2133 bool nonblocking)
2134{
2135 return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment,
2136 map_and_fenceable, nonblocking);
2137}
a70a3148 2138
254f965c
BW
2139/* i915_gem_context.c */
2140void i915_gem_context_init(struct drm_device *dev);
2141void i915_gem_context_fini(struct drm_device *dev);
254f965c 2142void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
e0556841
BW
2143int i915_switch_context(struct intel_ring_buffer *ring,
2144 struct drm_file *file, int to_id);
dce3271b
MK
2145void i915_gem_context_free(struct kref *ctx_ref);
2146static inline void i915_gem_context_reference(struct i915_hw_context *ctx)
2147{
2148 kref_get(&ctx->ref);
2149}
2150
2151static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
2152{
2153 kref_put(&ctx->ref, i915_gem_context_free);
2154}
2155
c0bb617a 2156struct i915_ctx_hang_stats * __must_check
11fa3384 2157i915_gem_context_get_hang_stats(struct drm_device *dev,
c0bb617a
MK
2158 struct drm_file *file,
2159 u32 id);
84624813
BW
2160int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2161 struct drm_file *file);
2162int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2163 struct drm_file *file);
1286ff73 2164
76aaf220 2165/* i915_gem_gtt.c */
1d2a314c 2166void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
7bddb01f
DV
2167void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
2168 struct drm_i915_gem_object *obj,
2169 enum i915_cache_level cache_level);
2170void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
2171 struct drm_i915_gem_object *obj);
1d2a314c 2172
76aaf220 2173void i915_gem_restore_gtt_mappings(struct drm_device *dev);
74163907
DV
2174int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
2175void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
e4ffd173 2176 enum i915_cache_level cache_level);
05394f39 2177void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
74163907 2178void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
d7e5008f
BW
2179void i915_gem_init_global_gtt(struct drm_device *dev);
2180void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
2181 unsigned long mappable_end, unsigned long end);
e76e9aeb 2182int i915_gem_gtt_init(struct drm_device *dev);
d09105c6 2183static inline void i915_gem_chipset_flush(struct drm_device *dev)
e76e9aeb
BW
2184{
2185 if (INTEL_INFO(dev)->gen < 6)
2186 intel_gtt_chipset_flush();
2187}
2188
76aaf220 2189
b47eb4a2 2190/* i915_gem_evict.c */
f6cd1f15
BW
2191int __must_check i915_gem_evict_something(struct drm_device *dev,
2192 struct i915_address_space *vm,
2193 int min_size,
42d6ab48
CW
2194 unsigned alignment,
2195 unsigned cache_level,
86a1ee26
CW
2196 bool mappable,
2197 bool nonblock);
68c8c17f 2198int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
6c085a72 2199int i915_gem_evict_everything(struct drm_device *dev);
b47eb4a2 2200
9797fbfb
CW
2201/* i915_gem_stolen.c */
2202int i915_gem_init_stolen(struct drm_device *dev);
11be49eb
CW
2203int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
2204void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
9797fbfb 2205void i915_gem_cleanup_stolen(struct drm_device *dev);
0104fdbb
CW
2206struct drm_i915_gem_object *
2207i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
866d12b4
CW
2208struct drm_i915_gem_object *
2209i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
2210 u32 stolen_offset,
2211 u32 gtt_offset,
2212 u32 size);
0104fdbb 2213void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
9797fbfb 2214
673a394b 2215/* i915_gem_tiling.c */
2c1792a1 2216static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
e9b73c67
CW
2217{
2218 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2219
2220 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
2221 obj->tiling_mode != I915_TILING_NONE;
2222}
2223
673a394b 2224void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
05394f39
CW
2225void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
2226void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
673a394b
EA
2227
2228/* i915_gem_debug.c */
23bc5982
CW
2229#if WATCH_LISTS
2230int i915_verify_lists(struct drm_device *dev);
673a394b 2231#else
23bc5982 2232#define i915_verify_lists(dev) 0
673a394b 2233#endif
1da177e4 2234
2017263e 2235/* i915_debugfs.c */
27c202ad
BG
2236int i915_debugfs_init(struct drm_minor *minor);
2237void i915_debugfs_cleanup(struct drm_minor *minor);
07144428
DL
2238#if defined(CONFIG_DEBUG_FS)
2239void intel_display_crc_init(struct drm_device *dev);
2240#else
2241void intel_display_crc_init(struct drm_device *dev) {}
2242#endif
84734a04
MK
2243
2244/* i915_gpu_error.c */
edc3d884
MK
2245__printf(2, 3)
2246void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
fc16b48b
MK
2247int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
2248 const struct i915_error_state_file_priv *error);
4dc955f7
MK
2249int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
2250 size_t count, loff_t pos);
2251static inline void i915_error_state_buf_release(
2252 struct drm_i915_error_state_buf *eb)
2253{
2254 kfree(eb->buf);
2255}
84734a04
MK
2256void i915_capture_error_state(struct drm_device *dev);
2257void i915_error_state_get(struct drm_device *dev,
2258 struct i915_error_state_file_priv *error_priv);
2259void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
2260void i915_destroy_error_state(struct drm_device *dev);
2261
2262void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
2263const char *i915_cache_level_str(int type);
2017263e 2264
317c35d1
JB
2265/* i915_suspend.c */
2266extern int i915_save_state(struct drm_device *dev);
2267extern int i915_restore_state(struct drm_device *dev);
0a3e67a4 2268
d8157a36
DV
2269/* i915_ums.c */
2270void i915_save_display_reg(struct drm_device *dev);
2271void i915_restore_display_reg(struct drm_device *dev);
317c35d1 2272
0136db58
BW
2273/* i915_sysfs.c */
2274void i915_setup_sysfs(struct drm_device *dev_priv);
2275void i915_teardown_sysfs(struct drm_device *dev_priv);
2276
f899fc64
CW
2277/* intel_i2c.c */
2278extern int intel_setup_gmbus(struct drm_device *dev);
2279extern void intel_teardown_gmbus(struct drm_device *dev);
8f375e10 2280static inline bool intel_gmbus_is_port_valid(unsigned port)
3bd7d909 2281{
2ed06c93 2282 return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
3bd7d909
DK
2283}
2284
2285extern struct i2c_adapter *intel_gmbus_get_adapter(
2286 struct drm_i915_private *dev_priv, unsigned port);
e957d772
CW
2287extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
2288extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
8f375e10 2289static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
b8232e90
CW
2290{
2291 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
2292}
f899fc64
CW
2293extern void intel_i2c_reset(struct drm_device *dev);
2294
3b617967 2295/* intel_opregion.c */
9c4b0a68 2296struct intel_encoder;
44834a67
CW
2297extern int intel_opregion_setup(struct drm_device *dev);
2298#ifdef CONFIG_ACPI
2299extern void intel_opregion_init(struct drm_device *dev);
2300extern void intel_opregion_fini(struct drm_device *dev);
3b617967 2301extern void intel_opregion_asle_intr(struct drm_device *dev);
9c4b0a68
JN
2302extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
2303 bool enable);
ecbc5cf3
JN
2304extern int intel_opregion_notify_adapter(struct drm_device *dev,
2305 pci_power_t state);
65e082c9 2306#else
44834a67
CW
2307static inline void intel_opregion_init(struct drm_device *dev) { return; }
2308static inline void intel_opregion_fini(struct drm_device *dev) { return; }
3b617967 2309static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
9c4b0a68
JN
2310static inline int
2311intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
2312{
2313 return 0;
2314}
ecbc5cf3
JN
2315static inline int
2316intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
2317{
2318 return 0;
2319}
65e082c9 2320#endif
8ee1c3db 2321
723bfd70
JB
2322/* intel_acpi.c */
2323#ifdef CONFIG_ACPI
2324extern void intel_register_dsm_handler(void);
2325extern void intel_unregister_dsm_handler(void);
2326#else
2327static inline void intel_register_dsm_handler(void) { return; }
2328static inline void intel_unregister_dsm_handler(void) { return; }
2329#endif /* CONFIG_ACPI */
2330
79e53945 2331/* modesetting */
f817586c 2332extern void intel_modeset_init_hw(struct drm_device *dev);
7d708ee4 2333extern void intel_modeset_suspend_hw(struct drm_device *dev);
79e53945 2334extern void intel_modeset_init(struct drm_device *dev);
2c7111db 2335extern void intel_modeset_gem_init(struct drm_device *dev);
79e53945 2336extern void intel_modeset_cleanup(struct drm_device *dev);
28d52043 2337extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
45e2b5f6
DV
2338extern void intel_modeset_setup_hw_state(struct drm_device *dev,
2339 bool force_restore);
44cec740 2340extern void i915_redisable_vga(struct drm_device *dev);
ee5382ae 2341extern bool intel_fbc_enabled(struct drm_device *dev);
43a9539f 2342extern void intel_disable_fbc(struct drm_device *dev);
7648fa99 2343extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
dde86e2d 2344extern void intel_init_pch_refclk(struct drm_device *dev);
3b8d8d91 2345extern void gen6_set_rps(struct drm_device *dev, u8 val);
0a073b84
JB
2346extern void valleyview_set_rps(struct drm_device *dev, u8 val);
2347extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv);
2348extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv);
0206e353
AJ
2349extern void intel_detect_pch(struct drm_device *dev);
2350extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
0136db58 2351extern int intel_enable_rc6(const struct drm_device *dev);
3bad0781 2352
2911a35b 2353extern bool i915_semaphore_is_enabled(struct drm_device *dev);
c0c7babc
BW
2354int i915_reg_read_ioctl(struct drm_device *dev, void *data,
2355 struct drm_file *file);
575155a9 2356
6ef3d427
CW
2357/* overlay */
2358extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
edc3d884
MK
2359extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
2360 struct intel_overlay_error_state *error);
c4a1d9e4
CW
2361
2362extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
edc3d884 2363extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
c4a1d9e4
CW
2364 struct drm_device *dev,
2365 struct intel_display_error_state *error);
6ef3d427 2366
b7287d80
BW
2367/* On SNB platform, before reading ring registers forcewake bit
2368 * must be set to prevent GT core from power down and stale values being
2369 * returned.
2370 */
fcca7926
BW
2371void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
2372void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
b7287d80 2373
42c0526c
BW
2374int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
2375int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
59de0813
JN
2376
2377/* intel_sideband.c */
64936258
JN
2378u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
2379void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
2380u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
e9f882a3
JN
2381u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg);
2382void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2383u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
2384void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2385u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
2386void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2387u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
2388void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
5e69f97f
CML
2389u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
2390void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
59de0813
JN
2391u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
2392 enum intel_sbi_destination destination);
2393void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
2394 enum intel_sbi_destination destination);
0a073b84 2395
855ba3be
JB
2396int vlv_gpu_freq(int ddr_freq, int val);
2397int vlv_freq_opcode(int ddr_freq, int val);
42c0526c 2398
0b274481
BW
2399#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
2400#define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
2401
2402#define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true)
2403#define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true)
2404#define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false)
2405#define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false)
2406
2407#define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true)
2408#define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true)
2409#define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
2410#define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
2411
2412#define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true)
2413#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
cae5852d
ZN
2414
2415#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
2416#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
2417
55bc60db
VS
2418/* "Broadcast RGB" property */
2419#define INTEL_BROADCAST_RGB_AUTO 0
2420#define INTEL_BROADCAST_RGB_FULL 1
2421#define INTEL_BROADCAST_RGB_LIMITED 2
ba4f01a3 2422
766aa1c4
VS
2423static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
2424{
2425 if (HAS_PCH_SPLIT(dev))
2426 return CPU_VGACNTRL;
2427 else if (IS_VALLEYVIEW(dev))
2428 return VLV_VGACNTRL;
2429 else
2430 return VGACNTRL;
2431}
2432
2bb4629a
VS
2433static inline void __user *to_user_ptr(u64 address)
2434{
2435 return (void __user *)(uintptr_t)address;
2436}
2437
df97729f
ID
2438static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
2439{
2440 unsigned long j = msecs_to_jiffies(m);
2441
2442 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
2443}
2444
2445static inline unsigned long
2446timespec_to_jiffies_timeout(const struct timespec *value)
2447{
2448 unsigned long j = timespec_to_jiffies(value);
2449
2450 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
2451}
2452
1da177e4 2453#endif