]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/gpu/drm/i915/i915_drv.h
drm/i915: Use gtt shortform where possible
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / i915 / i915_drv.h
CommitLineData
1da177e4
LT
1/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
2 */
0d6aa60b 3/*
bc54fd1a 4 *
1da177e4
LT
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
bc54fd1a
DA
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
0d6aa60b 28 */
1da177e4
LT
29
30#ifndef _I915_DRV_H_
31#define _I915_DRV_H_
32
e9b73c67
CW
33#include <uapi/drm/i915_drm.h>
34
585fb111 35#include "i915_reg.h"
79e53945 36#include "intel_bios.h"
8187a2b7 37#include "intel_ringbuffer.h"
0839ccb8 38#include <linux/io-mapping.h>
f899fc64 39#include <linux/i2c.h>
c167a6fc 40#include <linux/i2c-algo-bit.h>
0ade6386 41#include <drm/intel-gtt.h>
aaa6fd2a 42#include <linux/backlight.h>
2911a35b 43#include <linux/intel-iommu.h>
742cbee8 44#include <linux/kref.h>
9ee32fea 45#include <linux/pm_qos.h>
585fb111 46
1da177e4
LT
47/* General customization:
48 */
49
50#define DRIVER_AUTHOR "Tungsten Graphics, Inc."
51
52#define DRIVER_NAME "i915"
53#define DRIVER_DESC "Intel Graphics"
673a394b 54#define DRIVER_DATE "20080730"
1da177e4 55
317c35d1
JB
56enum pipe {
57 PIPE_A = 0,
58 PIPE_B,
9db4a9c7
JB
59 PIPE_C,
60 I915_MAX_PIPES
317c35d1 61};
9db4a9c7 62#define pipe_name(p) ((p) + 'A')
317c35d1 63
a5c961d1
PZ
64enum transcoder {
65 TRANSCODER_A = 0,
66 TRANSCODER_B,
67 TRANSCODER_C,
68 TRANSCODER_EDP = 0xF,
69};
70#define transcoder_name(t) ((t) + 'A')
71
80824003
JB
72enum plane {
73 PLANE_A = 0,
74 PLANE_B,
9db4a9c7 75 PLANE_C,
80824003 76};
9db4a9c7 77#define plane_name(p) ((p) + 'A')
52440211 78
06da8da2
VS
79#define sprite_name(p, s) ((p) * dev_priv->num_plane + (s) + 'A')
80
2b139522
ED
81enum port {
82 PORT_A = 0,
83 PORT_B,
84 PORT_C,
85 PORT_D,
86 PORT_E,
87 I915_MAX_PORTS
88};
89#define port_name(p) ((p) + 'A')
90
b97186f0
PZ
91enum intel_display_power_domain {
92 POWER_DOMAIN_PIPE_A,
93 POWER_DOMAIN_PIPE_B,
94 POWER_DOMAIN_PIPE_C,
95 POWER_DOMAIN_PIPE_A_PANEL_FITTER,
96 POWER_DOMAIN_PIPE_B_PANEL_FITTER,
97 POWER_DOMAIN_PIPE_C_PANEL_FITTER,
98 POWER_DOMAIN_TRANSCODER_A,
99 POWER_DOMAIN_TRANSCODER_B,
100 POWER_DOMAIN_TRANSCODER_C,
101 POWER_DOMAIN_TRANSCODER_EDP = POWER_DOMAIN_TRANSCODER_A + 0xF,
102};
103
104#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
105#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
106 ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
107#define POWER_DOMAIN_TRANSCODER(tran) ((tran) + POWER_DOMAIN_TRANSCODER_A)
108
1d843f9d
EE
109enum hpd_pin {
110 HPD_NONE = 0,
111 HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
112 HPD_TV = HPD_NONE, /* TV is known to be unreliable */
113 HPD_CRT,
114 HPD_SDVO_B,
115 HPD_SDVO_C,
116 HPD_PORT_B,
117 HPD_PORT_C,
118 HPD_PORT_D,
119 HPD_NUM_PINS
120};
121
2a2d5482
CW
122#define I915_GEM_GPU_DOMAINS \
123 (I915_GEM_DOMAIN_RENDER | \
124 I915_GEM_DOMAIN_SAMPLER | \
125 I915_GEM_DOMAIN_COMMAND | \
126 I915_GEM_DOMAIN_INSTRUCTION | \
127 I915_GEM_DOMAIN_VERTEX)
62fdfeaf 128
7eb552ae 129#define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
9db4a9c7 130
6c2b7c12
DV
131#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
132 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
133 if ((intel_encoder)->base.crtc == (__crtc))
134
e7b903d2
DV
135struct drm_i915_private;
136
46edb027
DV
137enum intel_dpll_id {
138 DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
139 /* real shared dpll ids must be >= 0 */
140 DPLL_ID_PCH_PLL_A,
141 DPLL_ID_PCH_PLL_B,
142};
143#define I915_NUM_PLLS 2
144
5358901f 145struct intel_dpll_hw_state {
66e985c0 146 uint32_t dpll;
8bcc2795 147 uint32_t dpll_md;
66e985c0
DV
148 uint32_t fp0;
149 uint32_t fp1;
5358901f
DV
150};
151
e72f9fbf 152struct intel_shared_dpll {
ee7b9f93
JB
153 int refcount; /* count of number of CRTCs sharing this PLL */
154 int active; /* count of number of active CRTCs (i.e. DPMS on) */
155 bool on; /* is the PLL actually active? Disabled during modeset */
46edb027
DV
156 const char *name;
157 /* should match the index in the dev_priv->shared_dplls array */
158 enum intel_dpll_id id;
5358901f 159 struct intel_dpll_hw_state hw_state;
15bdd4cf
DV
160 void (*mode_set)(struct drm_i915_private *dev_priv,
161 struct intel_shared_dpll *pll);
e7b903d2
DV
162 void (*enable)(struct drm_i915_private *dev_priv,
163 struct intel_shared_dpll *pll);
164 void (*disable)(struct drm_i915_private *dev_priv,
165 struct intel_shared_dpll *pll);
5358901f
DV
166 bool (*get_hw_state)(struct drm_i915_private *dev_priv,
167 struct intel_shared_dpll *pll,
168 struct intel_dpll_hw_state *hw_state);
ee7b9f93 169};
ee7b9f93 170
e69d0bc1
DV
171/* Used by dp and fdi links */
172struct intel_link_m_n {
173 uint32_t tu;
174 uint32_t gmch_m;
175 uint32_t gmch_n;
176 uint32_t link_m;
177 uint32_t link_n;
178};
179
180void intel_link_compute_m_n(int bpp, int nlanes,
181 int pixel_clock, int link_clock,
182 struct intel_link_m_n *m_n);
183
6441ab5f
PZ
184struct intel_ddi_plls {
185 int spll_refcount;
186 int wrpll1_refcount;
187 int wrpll2_refcount;
188};
189
1da177e4
LT
190/* Interface history:
191 *
192 * 1.1: Original.
0d6aa60b
DA
193 * 1.2: Add Power Management
194 * 1.3: Add vblank support
de227f5f 195 * 1.4: Fix cmdbuffer path, add heap destroy
702880f2 196 * 1.5: Add vblank pipe configuration
2228ed67
MD
197 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
198 * - Support vertical blank on secondary display pipe
1da177e4
LT
199 */
200#define DRIVER_MAJOR 1
2228ed67 201#define DRIVER_MINOR 6
1da177e4
LT
202#define DRIVER_PATCHLEVEL 0
203
673a394b 204#define WATCH_COHERENCY 0
23bc5982 205#define WATCH_LISTS 0
42d6ab48 206#define WATCH_GTT 0
673a394b 207
71acb5eb
DA
208#define I915_GEM_PHYS_CURSOR_0 1
209#define I915_GEM_PHYS_CURSOR_1 2
210#define I915_GEM_PHYS_OVERLAY_REGS 3
211#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
212
213struct drm_i915_gem_phys_object {
214 int id;
215 struct page **page_list;
216 drm_dma_handle_t *handle;
05394f39 217 struct drm_i915_gem_object *cur_obj;
71acb5eb
DA
218};
219
0a3e67a4
JB
220struct opregion_header;
221struct opregion_acpi;
222struct opregion_swsci;
223struct opregion_asle;
224
8ee1c3db 225struct intel_opregion {
5bc4418b
BW
226 struct opregion_header __iomem *header;
227 struct opregion_acpi __iomem *acpi;
228 struct opregion_swsci __iomem *swsci;
229 struct opregion_asle __iomem *asle;
230 void __iomem *vbt;
01fe9dbd 231 u32 __iomem *lid_state;
8ee1c3db 232};
44834a67 233#define OPREGION_SIZE (8*1024)
8ee1c3db 234
6ef3d427
CW
235struct intel_overlay;
236struct intel_overlay_error_state;
237
7c1c2871
DA
238struct drm_i915_master_private {
239 drm_local_map_t *sarea;
240 struct _drm_i915_sarea *sarea_priv;
241};
de151cf6 242#define I915_FENCE_REG_NONE -1
42b5aeab
VS
243#define I915_MAX_NUM_FENCES 32
244/* 32 fences + sign bit for FENCE_REG_NONE */
245#define I915_MAX_NUM_FENCE_BITS 6
de151cf6
JB
246
247struct drm_i915_fence_reg {
007cc8ac 248 struct list_head lru_list;
caea7476 249 struct drm_i915_gem_object *obj;
1690e1eb 250 int pin_count;
de151cf6 251};
7c1c2871 252
9b9d172d 253struct sdvo_device_mapping {
e957d772 254 u8 initialized;
9b9d172d 255 u8 dvo_port;
256 u8 slave_addr;
257 u8 dvo_wiring;
e957d772 258 u8 i2c_pin;
b1083333 259 u8 ddc_pin;
9b9d172d 260};
261
c4a1d9e4
CW
262struct intel_display_error_state;
263
63eeaf38 264struct drm_i915_error_state {
742cbee8 265 struct kref ref;
63eeaf38
JB
266 u32 eir;
267 u32 pgtbl_er;
be998e2e 268 u32 ier;
b9a3906b 269 u32 ccid;
0f3b6849
CW
270 u32 derrmr;
271 u32 forcewake;
9574b3fe 272 bool waiting[I915_NUM_RINGS];
9db4a9c7 273 u32 pipestat[I915_MAX_PIPES];
c1cd90ed
DV
274 u32 tail[I915_NUM_RINGS];
275 u32 head[I915_NUM_RINGS];
0f3b6849 276 u32 ctl[I915_NUM_RINGS];
d27b1e0e
DV
277 u32 ipeir[I915_NUM_RINGS];
278 u32 ipehr[I915_NUM_RINGS];
279 u32 instdone[I915_NUM_RINGS];
280 u32 acthd[I915_NUM_RINGS];
7e3b8737 281 u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
df2b23d9 282 u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
12f55818 283 u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
7e3b8737
DV
284 /* our own tracking of ring head and tail */
285 u32 cpu_ring_head[I915_NUM_RINGS];
286 u32 cpu_ring_tail[I915_NUM_RINGS];
1d8f38f4 287 u32 error; /* gen6+ */
71e172e8 288 u32 err_int; /* gen7 */
c1cd90ed
DV
289 u32 instpm[I915_NUM_RINGS];
290 u32 instps[I915_NUM_RINGS];
050ee91f 291 u32 extra_instdone[I915_NUM_INSTDONE_REG];
d27b1e0e 292 u32 seqno[I915_NUM_RINGS];
9df30794 293 u64 bbaddr;
33f3f518
DV
294 u32 fault_reg[I915_NUM_RINGS];
295 u32 done_reg;
c1cd90ed 296 u32 faddr[I915_NUM_RINGS];
4b9de737 297 u64 fence[I915_MAX_NUM_FENCES];
63eeaf38 298 struct timeval time;
52d39a21
CW
299 struct drm_i915_error_ring {
300 struct drm_i915_error_object {
301 int page_count;
302 u32 gtt_offset;
303 u32 *pages[0];
8c123e54 304 } *ringbuffer, *batchbuffer, *ctx;
52d39a21
CW
305 struct drm_i915_error_request {
306 long jiffies;
307 u32 seqno;
ee4f42b1 308 u32 tail;
52d39a21
CW
309 } *requests;
310 int num_requests;
311 } ring[I915_NUM_RINGS];
9df30794 312 struct drm_i915_error_buffer {
a779e5ab 313 u32 size;
9df30794 314 u32 name;
0201f1ec 315 u32 rseqno, wseqno;
9df30794
CW
316 u32 gtt_offset;
317 u32 read_domains;
318 u32 write_domain;
4b9de737 319 s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
9df30794
CW
320 s32 pinned:2;
321 u32 tiling:2;
322 u32 dirty:1;
323 u32 purgeable:1;
5d1333fc 324 s32 ring:4;
93dfb40c 325 u32 cache_level:2;
c724e8a9
CW
326 } *active_bo, *pinned_bo;
327 u32 active_bo_count, pinned_bo_count;
6ef3d427 328 struct intel_overlay_error_state *overlay;
c4a1d9e4 329 struct intel_display_error_state *display;
63eeaf38
JB
330};
331
b8cecdf5 332struct intel_crtc_config;
0e8ffe1b 333struct intel_crtc;
ee9300bb
DV
334struct intel_limit;
335struct dpll;
b8cecdf5 336
e70236a8 337struct drm_i915_display_funcs {
ee5382ae 338 bool (*fbc_enabled)(struct drm_device *dev);
e70236a8
JB
339 void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
340 void (*disable_fbc)(struct drm_device *dev);
341 int (*get_display_clock_speed)(struct drm_device *dev);
342 int (*get_fifo_size)(struct drm_device *dev, int plane);
ee9300bb
DV
343 /**
344 * find_dpll() - Find the best values for the PLL
345 * @limit: limits for the PLL
346 * @crtc: current CRTC
347 * @target: target frequency in kHz
348 * @refclk: reference clock frequency in kHz
349 * @match_clock: if provided, @best_clock P divider must
350 * match the P divider from @match_clock
351 * used for LVDS downclocking
352 * @best_clock: best PLL values found
353 *
354 * Returns true on success, false on failure.
355 */
356 bool (*find_dpll)(const struct intel_limit *limit,
357 struct drm_crtc *crtc,
358 int target, int refclk,
359 struct dpll *match_clock,
360 struct dpll *best_clock);
d210246a 361 void (*update_wm)(struct drm_device *dev);
b840d907 362 void (*update_sprite_wm)(struct drm_device *dev, int pipe,
4c4ff43a
PZ
363 uint32_t sprite_width, int pixel_size,
364 bool enable);
47fab737 365 void (*modeset_global_resources)(struct drm_device *dev);
0e8ffe1b
DV
366 /* Returns the active state of the crtc, and if the crtc is active,
367 * fills out the pipe-config with the hw state. */
368 bool (*get_pipe_config)(struct intel_crtc *,
369 struct intel_crtc_config *);
f564048e 370 int (*crtc_mode_set)(struct drm_crtc *crtc,
f564048e
EA
371 int x, int y,
372 struct drm_framebuffer *old_fb);
76e5a89c
DV
373 void (*crtc_enable)(struct drm_crtc *crtc);
374 void (*crtc_disable)(struct drm_crtc *crtc);
ee7b9f93 375 void (*off)(struct drm_crtc *crtc);
e0dac65e
WF
376 void (*write_eld)(struct drm_connector *connector,
377 struct drm_crtc *crtc);
674cf967 378 void (*fdi_link_train)(struct drm_crtc *crtc);
6067aaea 379 void (*init_clock_gating)(struct drm_device *dev);
8c9f3aaf
JB
380 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
381 struct drm_framebuffer *fb,
382 struct drm_i915_gem_object *obj);
17638cd6
JB
383 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
384 int x, int y);
20afbda2 385 void (*hpd_irq_setup)(struct drm_device *dev);
e70236a8
JB
386 /* clock updates for mode set */
387 /* cursor updates */
388 /* render clock increase/decrease */
389 /* display clock increase/decrease */
390 /* pll clock increase/decrease */
e70236a8
JB
391};
392
990bbdad
CW
393struct drm_i915_gt_funcs {
394 void (*force_wake_get)(struct drm_i915_private *dev_priv);
395 void (*force_wake_put)(struct drm_i915_private *dev_priv);
396};
397
79fc46df
DL
398#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
399 func(is_mobile) sep \
400 func(is_i85x) sep \
401 func(is_i915g) sep \
402 func(is_i945gm) sep \
403 func(is_g33) sep \
404 func(need_gfx_hws) sep \
405 func(is_g4x) sep \
406 func(is_pineview) sep \
407 func(is_broadwater) sep \
408 func(is_crestline) sep \
409 func(is_ivybridge) sep \
410 func(is_valleyview) sep \
411 func(is_haswell) sep \
412 func(has_force_wake) sep \
413 func(has_fbc) sep \
414 func(has_pipe_cxsr) sep \
415 func(has_hotplug) sep \
416 func(cursor_needs_physical) sep \
417 func(has_overlay) sep \
418 func(overlay_needs_physical) sep \
419 func(supports_tv) sep \
420 func(has_bsd_ring) sep \
421 func(has_blt_ring) sep \
f72a1183 422 func(has_vebox_ring) sep \
dd93be58 423 func(has_llc) sep \
30568c45
DL
424 func(has_ddi) sep \
425 func(has_fpga_dbg)
c96ea64e 426
a587f779
DL
427#define DEFINE_FLAG(name) u8 name:1
428#define SEP_SEMICOLON ;
c96ea64e 429
cfdf1fa2 430struct intel_device_info {
10fce67a 431 u32 display_mmio_offset;
7eb552ae 432 u8 num_pipes:3;
c96c3a8c 433 u8 gen;
a587f779 434 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
cfdf1fa2
KH
435};
436
a587f779
DL
437#undef DEFINE_FLAG
438#undef SEP_SEMICOLON
439
7faf1ab2
DV
440enum i915_cache_level {
441 I915_CACHE_NONE = 0,
442 I915_CACHE_LLC,
443 I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
444};
445
2d04befb
KG
446typedef uint32_t gen6_gtt_pte_t;
447
5d4545ae
BW
448/* The Graphics Translation Table is the way in which GEN hardware translates a
449 * Graphics Virtual Address into a Physical Address. In addition to the normal
450 * collateral associated with any va->pa translations GEN hardware also has a
451 * portion of the GTT which can be mapped by the CPU and remain both coherent
452 * and correct (in cases like swizzling). That region is referred to as GMADR in
453 * the spec.
454 */
455struct i915_gtt {
456 unsigned long start; /* Start offset of used GTT */
457 size_t total; /* Total size GTT can map */
baa09f5f 458 size_t stolen_size; /* Total size of stolen memory */
5d4545ae
BW
459
460 unsigned long mappable_end; /* End offset that we can CPU map */
461 struct io_mapping *mappable; /* Mapping to our CPU mappable region */
462 phys_addr_t mappable_base; /* PA of our GMADR */
463
464 /** "Graphics Stolen Memory" holds the global PTEs */
465 void __iomem *gsm;
a81cc00c
BW
466
467 bool do_idle_maps;
67167240
BW
468 struct {
469 dma_addr_t addr;
470 struct page *page;
471 } scratch;
7faf1ab2
DV
472
473 /* global gtt ops */
baa09f5f 474 int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
41907ddc
BW
475 size_t *stolen, phys_addr_t *mappable_base,
476 unsigned long *mappable_end);
baa09f5f 477 void (*gtt_remove)(struct drm_device *dev);
7faf1ab2
DV
478 void (*gtt_clear_range)(struct drm_device *dev,
479 unsigned int first_entry,
480 unsigned int num_entries);
481 void (*gtt_insert_entries)(struct drm_device *dev,
482 struct sg_table *st,
483 unsigned int pg_start,
484 enum i915_cache_level cache_level);
80a74f7f 485 gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
2d04befb 486 enum i915_cache_level level);
5d4545ae 487};
a54c0c27 488#define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT)
5d4545ae 489
1d2a314c 490struct i915_hw_ppgtt {
8f2c59f0 491 struct drm_device *dev;
1d2a314c
DV
492 unsigned num_pd_entries;
493 struct page **pt_pages;
494 uint32_t pd_offset;
495 dma_addr_t *pt_dma_addr;
def886c3
DV
496
497 /* pte functions, mirroring the interface of the global gtt. */
498 void (*clear_range)(struct i915_hw_ppgtt *ppgtt,
499 unsigned int first_entry,
500 unsigned int num_entries);
501 void (*insert_entries)(struct i915_hw_ppgtt *ppgtt,
502 struct sg_table *st,
503 unsigned int pg_start,
504 enum i915_cache_level cache_level);
80a74f7f 505 gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
2d04befb 506 enum i915_cache_level level);
b7c36d25 507 int (*enable)(struct drm_device *dev);
3440d265 508 void (*cleanup)(struct i915_hw_ppgtt *ppgtt);
1d2a314c
DV
509};
510
e59ec13d
MK
511struct i915_ctx_hang_stats {
512 /* This context had batch pending when hang was declared */
513 unsigned batch_pending;
514
515 /* This context had batch active when hang was declared */
516 unsigned batch_active;
517};
40521054
BW
518
519/* This must match up with the value previously used for execbuf2.rsvd1. */
520#define DEFAULT_CONTEXT_ID 0
521struct i915_hw_context {
dce3271b 522 struct kref ref;
40521054 523 int id;
e0556841 524 bool is_initialized;
40521054
BW
525 struct drm_i915_file_private *file_priv;
526 struct intel_ring_buffer *ring;
527 struct drm_i915_gem_object *obj;
e59ec13d 528 struct i915_ctx_hang_stats hang_stats;
40521054
BW
529};
530
b5e50c3f 531enum no_fbc_reason {
bed4a673 532 FBC_NO_OUTPUT, /* no outputs enabled to compress */
b5e50c3f
JB
533 FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
534 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
535 FBC_MODE_TOO_LARGE, /* mode too large for compression */
536 FBC_BAD_PLANE, /* fbc not supported on plane */
537 FBC_NOT_TILED, /* buffer not tiled */
9c928d16 538 FBC_MULTIPLE_PIPES, /* more than one pipe active */
c1a9f047 539 FBC_MODULE_PARAM,
8a5729a3 540 FBC_CHIP_DEFAULT, /* disabled by default on this chip */
b5e50c3f
JB
541};
542
3bad0781 543enum intel_pch {
f0350830 544 PCH_NONE = 0, /* No PCH present */
3bad0781
ZW
545 PCH_IBX, /* Ibexpeak PCH */
546 PCH_CPT, /* Cougarpoint PCH */
eb877ebf 547 PCH_LPT, /* Lynxpoint PCH */
40c7ead9 548 PCH_NOP,
3bad0781
ZW
549};
550
988d6ee8
PZ
551enum intel_sbi_destination {
552 SBI_ICLK,
553 SBI_MPHY,
554};
555
b690e96c 556#define QUIRK_PIPEA_FORCE (1<<0)
435793df 557#define QUIRK_LVDS_SSC_DISABLE (1<<1)
4dca20ef 558#define QUIRK_INVERT_BRIGHTNESS (1<<2)
b690e96c 559
8be48d92 560struct intel_fbdev;
1630fe75 561struct intel_fbc_work;
38651674 562
c2b9152f
DV
563struct intel_gmbus {
564 struct i2c_adapter adapter;
f2ce9faf 565 u32 force_bit;
c2b9152f 566 u32 reg0;
36c785f0 567 u32 gpio_reg;
c167a6fc 568 struct i2c_algo_bit_data bit_algo;
c2b9152f
DV
569 struct drm_i915_private *dev_priv;
570};
571
f4c956ad 572struct i915_suspend_saved_registers {
ba8bbcf6
JB
573 u8 saveLBB;
574 u32 saveDSPACNTR;
575 u32 saveDSPBCNTR;
e948e994 576 u32 saveDSPARB;
ba8bbcf6
JB
577 u32 savePIPEACONF;
578 u32 savePIPEBCONF;
579 u32 savePIPEASRC;
580 u32 savePIPEBSRC;
581 u32 saveFPA0;
582 u32 saveFPA1;
583 u32 saveDPLL_A;
584 u32 saveDPLL_A_MD;
585 u32 saveHTOTAL_A;
586 u32 saveHBLANK_A;
587 u32 saveHSYNC_A;
588 u32 saveVTOTAL_A;
589 u32 saveVBLANK_A;
590 u32 saveVSYNC_A;
591 u32 saveBCLRPAT_A;
5586c8bc 592 u32 saveTRANSACONF;
42048781
ZW
593 u32 saveTRANS_HTOTAL_A;
594 u32 saveTRANS_HBLANK_A;
595 u32 saveTRANS_HSYNC_A;
596 u32 saveTRANS_VTOTAL_A;
597 u32 saveTRANS_VBLANK_A;
598 u32 saveTRANS_VSYNC_A;
0da3ea12 599 u32 savePIPEASTAT;
ba8bbcf6
JB
600 u32 saveDSPASTRIDE;
601 u32 saveDSPASIZE;
602 u32 saveDSPAPOS;
585fb111 603 u32 saveDSPAADDR;
ba8bbcf6
JB
604 u32 saveDSPASURF;
605 u32 saveDSPATILEOFF;
606 u32 savePFIT_PGM_RATIOS;
0eb96d6e 607 u32 saveBLC_HIST_CTL;
ba8bbcf6
JB
608 u32 saveBLC_PWM_CTL;
609 u32 saveBLC_PWM_CTL2;
42048781
ZW
610 u32 saveBLC_CPU_PWM_CTL;
611 u32 saveBLC_CPU_PWM_CTL2;
ba8bbcf6
JB
612 u32 saveFPB0;
613 u32 saveFPB1;
614 u32 saveDPLL_B;
615 u32 saveDPLL_B_MD;
616 u32 saveHTOTAL_B;
617 u32 saveHBLANK_B;
618 u32 saveHSYNC_B;
619 u32 saveVTOTAL_B;
620 u32 saveVBLANK_B;
621 u32 saveVSYNC_B;
622 u32 saveBCLRPAT_B;
5586c8bc 623 u32 saveTRANSBCONF;
42048781
ZW
624 u32 saveTRANS_HTOTAL_B;
625 u32 saveTRANS_HBLANK_B;
626 u32 saveTRANS_HSYNC_B;
627 u32 saveTRANS_VTOTAL_B;
628 u32 saveTRANS_VBLANK_B;
629 u32 saveTRANS_VSYNC_B;
0da3ea12 630 u32 savePIPEBSTAT;
ba8bbcf6
JB
631 u32 saveDSPBSTRIDE;
632 u32 saveDSPBSIZE;
633 u32 saveDSPBPOS;
585fb111 634 u32 saveDSPBADDR;
ba8bbcf6
JB
635 u32 saveDSPBSURF;
636 u32 saveDSPBTILEOFF;
585fb111
JB
637 u32 saveVGA0;
638 u32 saveVGA1;
639 u32 saveVGA_PD;
ba8bbcf6
JB
640 u32 saveVGACNTRL;
641 u32 saveADPA;
642 u32 saveLVDS;
585fb111
JB
643 u32 savePP_ON_DELAYS;
644 u32 savePP_OFF_DELAYS;
ba8bbcf6
JB
645 u32 saveDVOA;
646 u32 saveDVOB;
647 u32 saveDVOC;
648 u32 savePP_ON;
649 u32 savePP_OFF;
650 u32 savePP_CONTROL;
585fb111 651 u32 savePP_DIVISOR;
ba8bbcf6
JB
652 u32 savePFIT_CONTROL;
653 u32 save_palette_a[256];
654 u32 save_palette_b[256];
06027f91 655 u32 saveDPFC_CB_BASE;
ba8bbcf6
JB
656 u32 saveFBC_CFB_BASE;
657 u32 saveFBC_LL_BASE;
658 u32 saveFBC_CONTROL;
659 u32 saveFBC_CONTROL2;
0da3ea12
JB
660 u32 saveIER;
661 u32 saveIIR;
662 u32 saveIMR;
42048781
ZW
663 u32 saveDEIER;
664 u32 saveDEIMR;
665 u32 saveGTIER;
666 u32 saveGTIMR;
667 u32 saveFDI_RXA_IMR;
668 u32 saveFDI_RXB_IMR;
1f84e550 669 u32 saveCACHE_MODE_0;
1f84e550 670 u32 saveMI_ARB_STATE;
ba8bbcf6
JB
671 u32 saveSWF0[16];
672 u32 saveSWF1[16];
673 u32 saveSWF2[3];
674 u8 saveMSR;
675 u8 saveSR[8];
123f794f 676 u8 saveGR[25];
ba8bbcf6 677 u8 saveAR_INDEX;
a59e122a 678 u8 saveAR[21];
ba8bbcf6 679 u8 saveDACMASK;
a59e122a 680 u8 saveCR[37];
4b9de737 681 uint64_t saveFENCE[I915_MAX_NUM_FENCES];
1fd1c624
EA
682 u32 saveCURACNTR;
683 u32 saveCURAPOS;
684 u32 saveCURABASE;
685 u32 saveCURBCNTR;
686 u32 saveCURBPOS;
687 u32 saveCURBBASE;
688 u32 saveCURSIZE;
a4fc5ed6
KP
689 u32 saveDP_B;
690 u32 saveDP_C;
691 u32 saveDP_D;
692 u32 savePIPEA_GMCH_DATA_M;
693 u32 savePIPEB_GMCH_DATA_M;
694 u32 savePIPEA_GMCH_DATA_N;
695 u32 savePIPEB_GMCH_DATA_N;
696 u32 savePIPEA_DP_LINK_M;
697 u32 savePIPEB_DP_LINK_M;
698 u32 savePIPEA_DP_LINK_N;
699 u32 savePIPEB_DP_LINK_N;
42048781
ZW
700 u32 saveFDI_RXA_CTL;
701 u32 saveFDI_TXA_CTL;
702 u32 saveFDI_RXB_CTL;
703 u32 saveFDI_TXB_CTL;
704 u32 savePFA_CTL_1;
705 u32 savePFB_CTL_1;
706 u32 savePFA_WIN_SZ;
707 u32 savePFB_WIN_SZ;
708 u32 savePFA_WIN_POS;
709 u32 savePFB_WIN_POS;
5586c8bc
ZW
710 u32 savePCH_DREF_CONTROL;
711 u32 saveDISP_ARB_CTL;
712 u32 savePIPEA_DATA_M1;
713 u32 savePIPEA_DATA_N1;
714 u32 savePIPEA_LINK_M1;
715 u32 savePIPEA_LINK_N1;
716 u32 savePIPEB_DATA_M1;
717 u32 savePIPEB_DATA_N1;
718 u32 savePIPEB_LINK_M1;
719 u32 savePIPEB_LINK_N1;
b5b72e89 720 u32 saveMCHBAR_RENDER_STANDBY;
cda2bb78 721 u32 savePCH_PORT_HOTPLUG;
f4c956ad 722};
c85aa885
DV
723
724struct intel_gen6_power_mgmt {
725 struct work_struct work;
52ceb908 726 struct delayed_work vlv_work;
c85aa885
DV
727 u32 pm_iir;
728 /* lock - irqsave spinlock that protectects the work_struct and
729 * pm_iir. */
730 spinlock_t lock;
731
732 /* The below variables an all the rps hw state are protected by
733 * dev->struct mutext. */
734 u8 cur_delay;
735 u8 min_delay;
736 u8 max_delay;
52ceb908 737 u8 rpe_delay;
31c77388 738 u8 hw_max;
1a01ab3b
JB
739
740 struct delayed_work delayed_resume_work;
4fc688ce
JB
741
742 /*
743 * Protects RPS/RC6 register access and PCU communication.
744 * Must be taken after struct_mutex if nested.
745 */
746 struct mutex hw_lock;
c85aa885
DV
747};
748
1a240d4d
DV
749/* defined intel_pm.c */
750extern spinlock_t mchdev_lock;
751
c85aa885
DV
752struct intel_ilk_power_mgmt {
753 u8 cur_delay;
754 u8 min_delay;
755 u8 max_delay;
756 u8 fmax;
757 u8 fstart;
758
759 u64 last_count1;
760 unsigned long last_time1;
761 unsigned long chipset_power;
762 u64 last_count2;
763 struct timespec last_time2;
764 unsigned long gfx_power;
765 u8 corr;
766
767 int c_m;
768 int r_t;
3e373948
DV
769
770 struct drm_i915_gem_object *pwrctx;
771 struct drm_i915_gem_object *renderctx;
c85aa885
DV
772};
773
a38911a3
WX
774/* Power well structure for haswell */
775struct i915_power_well {
776 struct drm_device *device;
777 spinlock_t lock;
778 /* power well enable/disable usage count */
779 int count;
780 int i915_request;
781};
782
231f42a4
DV
783struct i915_dri1_state {
784 unsigned allow_batchbuffer : 1;
785 u32 __iomem *gfx_hws_cpu_addr;
786
787 unsigned int cpp;
788 int back_offset;
789 int front_offset;
790 int current_page;
791 int page_flipping;
792
793 uint32_t counter;
794};
795
a4da4fa4
DV
796struct intel_l3_parity {
797 u32 *remap_info;
798 struct work_struct error_work;
799};
800
4b5aed62 801struct i915_gem_mm {
4b5aed62
DV
802 /** Memory allocator for GTT stolen memory */
803 struct drm_mm stolen;
804 /** Memory allocator for GTT */
805 struct drm_mm gtt_space;
806 /** List of all objects in gtt_space. Used to restore gtt
807 * mappings on resume */
808 struct list_head bound_list;
809 /**
810 * List of objects which are not bound to the GTT (thus
811 * are idle and not used by the GPU) but still have
812 * (presumably uncached) pages still attached.
813 */
814 struct list_head unbound_list;
815
816 /** Usable portion of the GTT for GEM */
817 unsigned long stolen_base; /* limited to low memory (32-bit) */
818
819 int gtt_mtrr;
820
821 /** PPGTT used for aliasing the PPGTT with the GTT */
822 struct i915_hw_ppgtt *aliasing_ppgtt;
823
824 struct shrinker inactive_shrinker;
825 bool shrinker_no_lock_stealing;
826
827 /**
828 * List of objects currently involved in rendering.
829 *
830 * Includes buffers having the contents of their GPU caches
831 * flushed, not necessarily primitives. last_rendering_seqno
832 * represents when the rendering involved will be completed.
833 *
834 * A reference is held on the buffer while on this list.
835 */
836 struct list_head active_list;
837
838 /**
839 * LRU list of objects which are not in the ringbuffer and
840 * are ready to unbind, but are still in the GTT.
841 *
842 * last_rendering_seqno is 0 while an object is in this list.
843 *
844 * A reference is not held on the buffer while on this list,
845 * as merely being GTT-bound shouldn't prevent its being
846 * freed, and we'll pull it off the list in the free path.
847 */
848 struct list_head inactive_list;
849
850 /** LRU list of objects with fence regs on them. */
851 struct list_head fence_list;
852
853 /**
854 * We leave the user IRQ off as much as possible,
855 * but this means that requests will finish and never
856 * be retired once the system goes idle. Set a timer to
857 * fire periodically while the ring is running. When it
858 * fires, go retire requests.
859 */
860 struct delayed_work retire_work;
861
862 /**
863 * Are we in a non-interruptible section of code like
864 * modesetting?
865 */
866 bool interruptible;
867
868 /**
869 * Flag if the X Server, and thus DRM, is not currently in
870 * control of the device.
871 *
872 * This is set between LeaveVT and EnterVT. It needs to be
873 * replaced with a semaphore. It also needs to be
874 * transitioned away from for kernel modesetting.
875 */
876 int suspended;
877
4b5aed62
DV
878 /** Bit 6 swizzling required for X tiling */
879 uint32_t bit_6_swizzle_x;
880 /** Bit 6 swizzling required for Y tiling */
881 uint32_t bit_6_swizzle_y;
882
883 /* storage for physical objects */
884 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
885
886 /* accounting, useful for userland debugging */
887 size_t object_memory;
888 u32 object_count;
889};
890
edc3d884
MK
891struct drm_i915_error_state_buf {
892 unsigned bytes;
893 unsigned size;
894 int err;
895 u8 *buf;
896 loff_t start;
897 loff_t pos;
898};
899
99584db3
DV
900struct i915_gpu_error {
901 /* For hangcheck timer */
902#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
903#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
904 struct timer_list hangcheck_timer;
99584db3
DV
905
906 /* For reset and error_state handling. */
907 spinlock_t lock;
908 /* Protected by the above dev->gpu_error.lock. */
909 struct drm_i915_error_state *first_error;
910 struct work_struct work;
99584db3
DV
911
912 unsigned long last_reset;
913
1f83fee0 914 /**
f69061be 915 * State variable and reset counter controlling the reset flow
1f83fee0 916 *
f69061be
DV
917 * Upper bits are for the reset counter. This counter is used by the
918 * wait_seqno code to race-free noticed that a reset event happened and
919 * that it needs to restart the entire ioctl (since most likely the
920 * seqno it waited for won't ever signal anytime soon).
921 *
922 * This is important for lock-free wait paths, where no contended lock
923 * naturally enforces the correct ordering between the bail-out of the
924 * waiter and the gpu reset work code.
1f83fee0
DV
925 *
926 * Lowest bit controls the reset state machine: Set means a reset is in
927 * progress. This state will (presuming we don't have any bugs) decay
928 * into either unset (successful reset) or the special WEDGED value (hw
929 * terminally sour). All waiters on the reset_queue will be woken when
930 * that happens.
931 */
932 atomic_t reset_counter;
933
934 /**
935 * Special values/flags for reset_counter
936 *
937 * Note that the code relies on
938 * I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG
939 * being true.
940 */
941#define I915_RESET_IN_PROGRESS_FLAG 1
942#define I915_WEDGED 0xffffffff
943
944 /**
945 * Waitqueue to signal when the reset has completed. Used by clients
946 * that wait for dev_priv->mm.wedged to settle.
947 */
948 wait_queue_head_t reset_queue;
33196ded 949
99584db3
DV
950 /* For gpu hang simulation. */
951 unsigned int stop_rings;
952};
953
b8efb17b
ZR
954enum modeset_restore {
955 MODESET_ON_LID_OPEN,
956 MODESET_DONE,
957 MODESET_SUSPENDED,
958};
959
41aa3448
RV
960struct intel_vbt_data {
961 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
962 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
963
964 /* Feature bits */
965 unsigned int int_tv_support:1;
966 unsigned int lvds_dither:1;
967 unsigned int lvds_vbt:1;
968 unsigned int int_crt_support:1;
969 unsigned int lvds_use_ssc:1;
970 unsigned int display_clock_mode:1;
971 unsigned int fdi_rx_polarity_inverted:1;
972 int lvds_ssc_freq;
973 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
974
975 /* eDP */
976 int edp_rate;
977 int edp_lanes;
978 int edp_preemphasis;
979 int edp_vswing;
980 bool edp_initialized;
981 bool edp_support;
982 int edp_bpp;
983 struct edp_power_seq edp_pps;
984
985 int crt_ddc_pin;
986
987 int child_dev_num;
988 struct child_device_config *child_dev;
989};
990
f4c956ad
DV
991typedef struct drm_i915_private {
992 struct drm_device *dev;
42dcedd4 993 struct kmem_cache *slab;
f4c956ad
DV
994
995 const struct intel_device_info *info;
996
997 int relative_constants_mode;
998
999 void __iomem *regs;
1000
1001 struct drm_i915_gt_funcs gt;
1002 /** gt_fifo_count and the subsequent register write are synchronized
1003 * with dev->struct_mutex. */
1004 unsigned gt_fifo_count;
1005 /** forcewake_count is protected by gt_lock */
1006 unsigned forcewake_count;
1007 /** gt_lock is also taken in irq contexts. */
99057c81 1008 spinlock_t gt_lock;
f4c956ad
DV
1009
1010 struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
1011
28c70f16 1012
f4c956ad
DV
1013 /** gmbus_mutex protects against concurrent usage of the single hw gmbus
1014 * controller on different i2c buses. */
1015 struct mutex gmbus_mutex;
1016
1017 /**
1018 * Base address of the gmbus and gpio block.
1019 */
1020 uint32_t gpio_mmio_base;
1021
28c70f16
DV
1022 wait_queue_head_t gmbus_wait_queue;
1023
f4c956ad
DV
1024 struct pci_dev *bridge_dev;
1025 struct intel_ring_buffer ring[I915_NUM_RINGS];
f72b3435 1026 uint32_t last_seqno, next_seqno;
f4c956ad
DV
1027
1028 drm_dma_handle_t *status_page_dmah;
f4c956ad
DV
1029 struct resource mch_res;
1030
1031 atomic_t irq_received;
1032
1033 /* protects the irq masks */
1034 spinlock_t irq_lock;
1035
9ee32fea
DV
1036 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
1037 struct pm_qos_request pm_qos;
1038
f4c956ad 1039 /* DPIO indirect register protection */
09153000 1040 struct mutex dpio_lock;
f4c956ad
DV
1041
1042 /** Cached value of IMR to avoid reads in updating the bitfield */
f4c956ad
DV
1043 u32 irq_mask;
1044 u32 gt_irq_mask;
f4c956ad 1045
f4c956ad 1046 struct work_struct hotplug_work;
52d7eced 1047 bool enable_hotplug_processing;
b543fb04
EE
1048 struct {
1049 unsigned long hpd_last_jiffies;
1050 int hpd_cnt;
1051 enum {
1052 HPD_ENABLED = 0,
1053 HPD_DISABLED = 1,
1054 HPD_MARK_DISABLED = 2
1055 } hpd_mark;
1056 } hpd_stats[HPD_NUM_PINS];
142e2398 1057 u32 hpd_event_bits;
ac4c16c5 1058 struct timer_list hotplug_reenable_timer;
f4c956ad 1059
7f1f3851 1060 int num_plane;
f4c956ad 1061
f4c956ad
DV
1062 unsigned long cfb_size;
1063 unsigned int cfb_fb;
1064 enum plane cfb_plane;
1065 int cfb_y;
1066 struct intel_fbc_work *fbc_work;
1067
1068 struct intel_opregion opregion;
41aa3448 1069 struct intel_vbt_data vbt;
f4c956ad
DV
1070
1071 /* overlay */
1072 struct intel_overlay *overlay;
2c6602df 1073 unsigned int sprite_scaling_enabled;
f4c956ad 1074
31ad8ec6
JN
1075 /* backlight */
1076 struct {
1077 int level;
1078 bool enabled;
8ba2d185 1079 spinlock_t lock; /* bl registers and the above bl fields */
31ad8ec6
JN
1080 struct backlight_device *device;
1081 } backlight;
1082
f4c956ad 1083 /* LVDS info */
f4c956ad
DV
1084 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
1085 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
f4c956ad
DV
1086 bool no_aux_handshake;
1087
f4c956ad
DV
1088 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
1089 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
1090 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
1091
1092 unsigned int fsb_freq, mem_freq, is_ddr3;
1093
f4c956ad
DV
1094 struct workqueue_struct *wq;
1095
1096 /* Display functions */
1097 struct drm_i915_display_funcs display;
1098
1099 /* PCH chipset type */
1100 enum intel_pch pch_type;
17a303ec 1101 unsigned short pch_id;
f4c956ad
DV
1102
1103 unsigned long quirks;
1104
b8efb17b
ZR
1105 enum modeset_restore modeset_restore;
1106 struct mutex modeset_restore_lock;
673a394b 1107
5d4545ae
BW
1108 struct i915_gtt gtt;
1109
4b5aed62 1110 struct i915_gem_mm mm;
8781342d 1111
8781342d
DV
1112 /* Kernel Modesetting */
1113
9b9d172d 1114 struct sdvo_device_mapping sdvo_mappings[2];
652c393a 1115
27f8227b
JB
1116 struct drm_crtc *plane_to_crtc_mapping[3];
1117 struct drm_crtc *pipe_to_crtc_mapping[3];
6b95a207
KH
1118 wait_queue_head_t pending_flip_queue;
1119
e72f9fbf
DV
1120 int num_shared_dpll;
1121 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
6441ab5f 1122 struct intel_ddi_plls ddi_plls;
ee7b9f93 1123
652c393a
JB
1124 /* Reclocking support */
1125 bool render_reclock_avail;
1126 bool lvds_downclock_avail;
18f9ed12
ZY
1127 /* indicates the reduced downclock for LVDS*/
1128 int lvds_downclock;
652c393a 1129 u16 orig_clock;
f97108d1 1130
c4804411 1131 bool mchbar_need_disable;
f97108d1 1132
a4da4fa4
DV
1133 struct intel_l3_parity l3_parity;
1134
c6a828d3 1135 /* gen6+ rps state */
c85aa885 1136 struct intel_gen6_power_mgmt rps;
c6a828d3 1137
20e4d407
DV
1138 /* ilk-only ips/rps state. Everything in here is protected by the global
1139 * mchdev_lock in intel_pm.c */
c85aa885 1140 struct intel_ilk_power_mgmt ips;
b5e50c3f 1141
a38911a3
WX
1142 /* Haswell power well */
1143 struct i915_power_well power_well;
1144
b5e50c3f 1145 enum no_fbc_reason no_fbc_reason;
38651674 1146
20bf377e
JB
1147 struct drm_mm_node *compressed_fb;
1148 struct drm_mm_node *compressed_llb;
34dc4d44 1149
99584db3 1150 struct i915_gpu_error gpu_error;
ae681d96 1151
c9cddffc
JB
1152 struct drm_i915_gem_object *vlv_pctx;
1153
8be48d92
DA
1154 /* list of fbdev register on this device */
1155 struct intel_fbdev *fbdev;
e953fd7b 1156
073f34d9
JB
1157 /*
1158 * The console may be contended at resume, but we don't
1159 * want it to block on it.
1160 */
1161 struct work_struct console_resume_work;
1162
e953fd7b 1163 struct drm_property *broadcast_rgb_property;
3f43c48d 1164 struct drm_property *force_audio_property;
e3689190 1165
254f965c
BW
1166 bool hw_contexts_disabled;
1167 uint32_t hw_context_size;
f4c956ad 1168
3e68320e 1169 u32 fdi_rx_config;
68d18ad7 1170
f4c956ad 1171 struct i915_suspend_saved_registers regfile;
231f42a4
DV
1172
1173 /* Old dri1 support infrastructure, beware the dragons ya fools entering
1174 * here! */
1175 struct i915_dri1_state dri1;
1da177e4
LT
1176} drm_i915_private_t;
1177
b4519513
CW
1178/* Iterate over initialised rings */
1179#define for_each_ring(ring__, dev_priv__, i__) \
1180 for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
1181 if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
1182
b1d7e4b4
WF
1183enum hdmi_force_audio {
1184 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
1185 HDMI_AUDIO_OFF, /* force turn off HDMI audio */
1186 HDMI_AUDIO_AUTO, /* trust EDID */
1187 HDMI_AUDIO_ON, /* force turn on HDMI audio */
1188};
1189
ed2f3452
CW
1190#define I915_GTT_RESERVED ((struct drm_mm_node *)0x1)
1191
37e680a1
CW
1192struct drm_i915_gem_object_ops {
1193 /* Interface between the GEM object and its backing storage.
1194 * get_pages() is called once prior to the use of the associated set
1195 * of pages before to binding them into the GTT, and put_pages() is
1196 * called after we no longer need them. As we expect there to be
1197 * associated cost with migrating pages between the backing storage
1198 * and making them available for the GPU (e.g. clflush), we may hold
1199 * onto the pages after they are no longer referenced by the GPU
1200 * in case they may be used again shortly (for example migrating the
1201 * pages to a different memory domain within the GTT). put_pages()
1202 * will therefore most likely be called when the object itself is
1203 * being released or under memory pressure (where we attempt to
1204 * reap pages for the shrinker).
1205 */
1206 int (*get_pages)(struct drm_i915_gem_object *);
1207 void (*put_pages)(struct drm_i915_gem_object *);
1208};
1209
673a394b 1210struct drm_i915_gem_object {
c397b908 1211 struct drm_gem_object base;
673a394b 1212
37e680a1
CW
1213 const struct drm_i915_gem_object_ops *ops;
1214
673a394b
EA
1215 /** Current space allocated to this object in the GTT, if any. */
1216 struct drm_mm_node *gtt_space;
c1ad11fc
CW
1217 /** Stolen memory for this object, instead of being backed by shmem. */
1218 struct drm_mm_node *stolen;
35c20a60 1219 struct list_head global_list;
673a394b 1220
65ce3027 1221 /** This object's place on the active/inactive lists */
69dc4987
CW
1222 struct list_head ring_list;
1223 struct list_head mm_list;
432e58ed
CW
1224 /** This object's place in the batchbuffer or on the eviction list */
1225 struct list_head exec_list;
673a394b
EA
1226
1227 /**
65ce3027
CW
1228 * This is set if the object is on the active lists (has pending
1229 * rendering and so a non-zero seqno), and is not set if it i s on
1230 * inactive (ready to be unbound) list.
673a394b 1231 */
0206e353 1232 unsigned int active:1;
673a394b
EA
1233
1234 /**
1235 * This is set if the object has been written to since last bound
1236 * to the GTT
1237 */
0206e353 1238 unsigned int dirty:1;
778c3544
DV
1239
1240 /**
1241 * Fence register bits (if any) for this object. Will be set
1242 * as needed when mapped into the GTT.
1243 * Protected by dev->struct_mutex.
778c3544 1244 */
4b9de737 1245 signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
778c3544 1246
778c3544
DV
1247 /**
1248 * Advice: are the backing pages purgeable?
1249 */
0206e353 1250 unsigned int madv:2;
778c3544 1251
778c3544
DV
1252 /**
1253 * Current tiling mode for the object.
1254 */
0206e353 1255 unsigned int tiling_mode:2;
5d82e3e6
CW
1256 /**
1257 * Whether the tiling parameters for the currently associated fence
1258 * register have changed. Note that for the purposes of tracking
1259 * tiling changes we also treat the unfenced register, the register
1260 * slot that the object occupies whilst it executes a fenced
1261 * command (such as BLT on gen2/3), as a "fence".
1262 */
1263 unsigned int fence_dirty:1;
778c3544
DV
1264
1265 /** How many users have pinned this object in GTT space. The following
1266 * users can each hold at most one reference: pwrite/pread, pin_ioctl
1267 * (via user_pin_count), execbuffer (objects are not allowed multiple
1268 * times for the same batchbuffer), and the framebuffer code. When
1269 * switching/pageflipping, the framebuffer code has at most two buffers
1270 * pinned per crtc.
1271 *
1272 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
1273 * bits with absolutely no headroom. So use 4 bits. */
0206e353 1274 unsigned int pin_count:4;
778c3544 1275#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
673a394b 1276
75e9e915
DV
1277 /**
1278 * Is the object at the current location in the gtt mappable and
1279 * fenceable? Used to avoid costly recalculations.
1280 */
0206e353 1281 unsigned int map_and_fenceable:1;
75e9e915 1282
fb7d516a
DV
1283 /**
1284 * Whether the current gtt mapping needs to be mappable (and isn't just
1285 * mappable by accident). Track pin and fault separate for a more
1286 * accurate mappable working set.
1287 */
0206e353
AJ
1288 unsigned int fault_mappable:1;
1289 unsigned int pin_mappable:1;
fb7d516a 1290
caea7476
CW
1291 /*
1292 * Is the GPU currently using a fence to access this buffer,
1293 */
1294 unsigned int pending_fenced_gpu_access:1;
1295 unsigned int fenced_gpu_access:1;
1296
93dfb40c
CW
1297 unsigned int cache_level:2;
1298
7bddb01f 1299 unsigned int has_aliasing_ppgtt_mapping:1;
74898d7e 1300 unsigned int has_global_gtt_mapping:1;
9da3da66 1301 unsigned int has_dma_mapping:1;
7bddb01f 1302
9da3da66 1303 struct sg_table *pages;
a5570178 1304 int pages_pin_count;
673a394b 1305
1286ff73 1306 /* prime dma-buf support */
9a70cc2a
DA
1307 void *dma_buf_vmapping;
1308 int vmapping_count;
1309
67731b87
CW
1310 /**
1311 * Used for performing relocations during execbuffer insertion.
1312 */
1313 struct hlist_node exec_node;
1314 unsigned long exec_handle;
6fe4f140 1315 struct drm_i915_gem_exec_object2 *exec_entry;
67731b87 1316
673a394b
EA
1317 /**
1318 * Current offset of the object in GTT space.
1319 *
1320 * This is the same as gtt_space->start
1321 */
1322 uint32_t gtt_offset;
e67b8ce1 1323
caea7476
CW
1324 struct intel_ring_buffer *ring;
1325
1c293ea3 1326 /** Breadcrumb of last rendering to the buffer. */
0201f1ec
CW
1327 uint32_t last_read_seqno;
1328 uint32_t last_write_seqno;
caea7476
CW
1329 /** Breadcrumb of last fenced GPU access to the buffer. */
1330 uint32_t last_fenced_seqno;
673a394b 1331
778c3544 1332 /** Current tiling stride for the object, if it's tiled. */
de151cf6 1333 uint32_t stride;
673a394b 1334
280b713b 1335 /** Record of address bit 17 of each page at last unbind. */
d312ec25 1336 unsigned long *bit_17;
280b713b 1337
79e53945
JB
1338 /** User space pin count and filp owning the pin */
1339 uint32_t user_pin_count;
1340 struct drm_file *pin_filp;
71acb5eb
DA
1341
1342 /** for phy allocated objects */
1343 struct drm_i915_gem_phys_object *phys_obj;
673a394b 1344};
b45305fc 1345#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
673a394b 1346
62b8b215 1347#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
23010e43 1348
673a394b
EA
1349/**
1350 * Request queue structure.
1351 *
1352 * The request queue allows us to note sequence numbers that have been emitted
1353 * and may be associated with active buffers to be retired.
1354 *
1355 * By keeping this list, we can avoid having to do questionable
1356 * sequence-number comparisons on buffer last_rendering_seqnos, and associate
1357 * an emission time with seqnos for tracking how far ahead of the GPU we are.
1358 */
1359struct drm_i915_gem_request {
852835f3
ZN
1360 /** On Which ring this request was generated */
1361 struct intel_ring_buffer *ring;
1362
673a394b
EA
1363 /** GEM sequence number associated with this request. */
1364 uint32_t seqno;
1365
7d736f4f
MK
1366 /** Position in the ringbuffer of the start of the request */
1367 u32 head;
1368
1369 /** Position in the ringbuffer of the end of the request */
a71d8d94
CW
1370 u32 tail;
1371
0e50e96b
MK
1372 /** Context related to this request */
1373 struct i915_hw_context *ctx;
1374
7d736f4f
MK
1375 /** Batch buffer related to this request if any */
1376 struct drm_i915_gem_object *batch_obj;
1377
673a394b
EA
1378 /** Time at which this request was emitted, in jiffies. */
1379 unsigned long emitted_jiffies;
1380
b962442e 1381 /** global list entry for this request */
673a394b 1382 struct list_head list;
b962442e 1383
f787a5f5 1384 struct drm_i915_file_private *file_priv;
b962442e
EA
1385 /** file_priv list entry for this request */
1386 struct list_head client_list;
673a394b
EA
1387};
1388
1389struct drm_i915_file_private {
1390 struct {
99057c81 1391 spinlock_t lock;
b962442e 1392 struct list_head request_list;
673a394b 1393 } mm;
40521054 1394 struct idr context_idr;
e59ec13d
MK
1395
1396 struct i915_ctx_hang_stats hang_stats;
673a394b
EA
1397};
1398
cae5852d
ZN
1399#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
1400
1401#define IS_I830(dev) ((dev)->pci_device == 0x3577)
1402#define IS_845G(dev) ((dev)->pci_device == 0x2562)
1403#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
1404#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
1405#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
1406#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
1407#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
1408#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
1409#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
1410#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
1411#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
1412#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
1413#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
1414#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
1415#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
1416#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
1417#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
1418#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
4b65177b 1419#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
8ab43976
JB
1420#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
1421 (dev)->pci_device == 0x0152 || \
1422 (dev)->pci_device == 0x015a)
6547fbdb
DV
1423#define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \
1424 (dev)->pci_device == 0x0106 || \
1425 (dev)->pci_device == 0x010A)
70a3eb7a 1426#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
4cae9ae0 1427#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
cae5852d 1428#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
d567b07f
PZ
1429#define IS_ULT(dev) (IS_HASWELL(dev) && \
1430 ((dev)->pci_device & 0xFF00) == 0x0A00)
cae5852d 1431
85436696
JB
1432/*
1433 * The genX designation typically refers to the render engine, so render
1434 * capability related checks should use IS_GEN, while display and other checks
1435 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
1436 * chips, etc.).
1437 */
cae5852d
ZN
1438#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
1439#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
1440#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
1441#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
1442#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
85436696 1443#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
cae5852d
ZN
1444
1445#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
1446#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
f72a1183 1447#define HAS_VEBOX(dev) (INTEL_INFO(dev)->has_vebox_ring)
3d29b842 1448#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
cae5852d
ZN
1449#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1450
254f965c 1451#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
93553609 1452#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev))
1d2a314c 1453
05394f39 1454#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
cae5852d
ZN
1455#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
1456
b45305fc
DV
1457/* Early gen2 have a totally busted CS tlb and require pinned batches. */
1458#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
1459
cae5852d
ZN
1460/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1461 * rows, which changed the alignment requirements and fence programming.
1462 */
1463#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
1464 IS_I915GM(dev)))
1465#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
1466#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
1467#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
1468#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
1469#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
1470#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
1471/* dsparb controlled by hw only */
1472#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
1473
1474#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
1475#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
1476#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
cae5852d 1477
f5adf94e
DL
1478#define HAS_IPS(dev) (IS_ULT(dev))
1479
eceae481 1480#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
cae5852d 1481
dd93be58 1482#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
86d52df6 1483#define HAS_POWER_WELL(dev) (IS_HASWELL(dev))
30568c45 1484#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
affa9354 1485
17a303ec
PZ
1486#define INTEL_PCH_DEVICE_ID_MASK 0xff00
1487#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
1488#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
1489#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
1490#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
1491#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
1492
cae5852d 1493#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
eb877ebf 1494#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
cae5852d
ZN
1495#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1496#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
40c7ead9 1497#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
45e6e3a1 1498#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
cae5852d 1499
b7884eb4
DV
1500#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
1501
f27b9265 1502#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
e1ef7cc2 1503
c8735b0c
BW
1504#define GT_FREQUENCY_MULTIPLIER 50
1505
05394f39
CW
1506#include "i915_trace.h"
1507
83b7f9ac
ED
1508/**
1509 * RC6 is a special power stage which allows the GPU to enter an very
1510 * low-voltage mode when idle, using down to 0V while at this stage. This
1511 * stage is entered automatically when the GPU is idle when RC6 support is
1512 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
1513 *
1514 * There are different RC6 modes available in Intel GPU, which differentiate
1515 * among each other with the latency required to enter and leave RC6 and
1516 * voltage consumed by the GPU in different states.
1517 *
1518 * The combination of the following flags define which states GPU is allowed
1519 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
1520 * RC6pp is deepest RC6. Their support by hardware varies according to the
1521 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
1522 * which brings the most power savings; deeper states save more power, but
1523 * require higher latency to switch to and wake up.
1524 */
1525#define INTEL_RC6_ENABLE (1<<0)
1526#define INTEL_RC6p_ENABLE (1<<1)
1527#define INTEL_RC6pp_ENABLE (1<<2)
1528
c153f45f 1529extern struct drm_ioctl_desc i915_ioctls[];
b3a83639 1530extern int i915_max_ioctl;
a35d9d3c
BW
1531extern unsigned int i915_fbpercrtc __always_unused;
1532extern int i915_panel_ignore_lid __read_mostly;
1533extern unsigned int i915_powersave __read_mostly;
f45b5557 1534extern int i915_semaphores __read_mostly;
a35d9d3c 1535extern unsigned int i915_lvds_downclock __read_mostly;
121d527a 1536extern int i915_lvds_channel_mode __read_mostly;
4415e63b 1537extern int i915_panel_use_ssc __read_mostly;
a35d9d3c 1538extern int i915_vbt_sdvo_panel_type __read_mostly;
c0f372b3 1539extern int i915_enable_rc6 __read_mostly;
4415e63b 1540extern int i915_enable_fbc __read_mostly;
a35d9d3c 1541extern bool i915_enable_hangcheck __read_mostly;
650dc07e 1542extern int i915_enable_ppgtt __read_mostly;
0a3af268 1543extern unsigned int i915_preliminary_hw_support __read_mostly;
2124b72e 1544extern int i915_disable_power_well __read_mostly;
3c4ca58c 1545extern int i915_enable_ips __read_mostly;
b3a83639 1546
6a9ee8af
DA
1547extern int i915_suspend(struct drm_device *dev, pm_message_t state);
1548extern int i915_resume(struct drm_device *dev);
7c1c2871
DA
1549extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
1550extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
1551
1da177e4 1552 /* i915_dma.c */
d05c617e 1553void i915_update_dri1_breadcrumb(struct drm_device *dev);
84b1fd10 1554extern void i915_kernel_lost_context(struct drm_device * dev);
22eae947 1555extern int i915_driver_load(struct drm_device *, unsigned long flags);
ba8bbcf6 1556extern int i915_driver_unload(struct drm_device *);
673a394b 1557extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
84b1fd10 1558extern void i915_driver_lastclose(struct drm_device * dev);
6c340eac
EA
1559extern void i915_driver_preclose(struct drm_device *dev,
1560 struct drm_file *file_priv);
673a394b
EA
1561extern void i915_driver_postclose(struct drm_device *dev,
1562 struct drm_file *file_priv);
84b1fd10 1563extern int i915_driver_device_is_agp(struct drm_device * dev);
c43b5634 1564#ifdef CONFIG_COMPAT
0d6aa60b
DA
1565extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
1566 unsigned long arg);
c43b5634 1567#endif
673a394b 1568extern int i915_emit_box(struct drm_device *dev,
c4e7a414
CW
1569 struct drm_clip_rect *box,
1570 int DR1, int DR4);
8e96d9c4 1571extern int intel_gpu_reset(struct drm_device *dev);
d4b8bb2a 1572extern int i915_reset(struct drm_device *dev);
7648fa99
JB
1573extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
1574extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
1575extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
1576extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
1577
073f34d9 1578extern void intel_console_resume(struct work_struct *work);
af6061af 1579
1da177e4 1580/* i915_irq.c */
f65d9421 1581void i915_hangcheck_elapsed(unsigned long data);
527f9e90 1582void i915_handle_error(struct drm_device *dev, bool wedged);
1da177e4 1583
f71d4af4 1584extern void intel_irq_init(struct drm_device *dev);
20afbda2 1585extern void intel_hpd_init(struct drm_device *dev);
990bbdad 1586extern void intel_gt_init(struct drm_device *dev);
16995a9f 1587extern void intel_gt_reset(struct drm_device *dev);
b1f14ad0 1588
742cbee8
DV
1589void i915_error_state_free(struct kref *error_ref);
1590
7c463586
KP
1591void
1592i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1593
1594void
1595i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1596
3bd3c932
CW
1597#ifdef CONFIG_DEBUG_FS
1598extern void i915_destroy_error_state(struct drm_device *dev);
1599#else
1600#define i915_destroy_error_state(x)
1601#endif
1602
7c463586 1603
673a394b
EA
1604/* i915_gem.c */
1605int i915_gem_init_ioctl(struct drm_device *dev, void *data,
1606 struct drm_file *file_priv);
1607int i915_gem_create_ioctl(struct drm_device *dev, void *data,
1608 struct drm_file *file_priv);
1609int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1610 struct drm_file *file_priv);
1611int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1612 struct drm_file *file_priv);
1613int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1614 struct drm_file *file_priv);
de151cf6
JB
1615int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1616 struct drm_file *file_priv);
673a394b
EA
1617int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1618 struct drm_file *file_priv);
1619int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1620 struct drm_file *file_priv);
1621int i915_gem_execbuffer(struct drm_device *dev, void *data,
1622 struct drm_file *file_priv);
76446cac
JB
1623int i915_gem_execbuffer2(struct drm_device *dev, void *data,
1624 struct drm_file *file_priv);
673a394b
EA
1625int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
1626 struct drm_file *file_priv);
1627int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
1628 struct drm_file *file_priv);
1629int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
1630 struct drm_file *file_priv);
199adf40
BW
1631int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
1632 struct drm_file *file);
1633int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
1634 struct drm_file *file);
673a394b
EA
1635int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
1636 struct drm_file *file_priv);
3ef94daa
CW
1637int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1638 struct drm_file *file_priv);
673a394b
EA
1639int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
1640 struct drm_file *file_priv);
1641int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
1642 struct drm_file *file_priv);
1643int i915_gem_set_tiling(struct drm_device *dev, void *data,
1644 struct drm_file *file_priv);
1645int i915_gem_get_tiling(struct drm_device *dev, void *data,
1646 struct drm_file *file_priv);
5a125c3c
EA
1647int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
1648 struct drm_file *file_priv);
23ba4fd0
BW
1649int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
1650 struct drm_file *file_priv);
673a394b 1651void i915_gem_load(struct drm_device *dev);
42dcedd4
CW
1652void *i915_gem_object_alloc(struct drm_device *dev);
1653void i915_gem_object_free(struct drm_i915_gem_object *obj);
673a394b 1654int i915_gem_init_object(struct drm_gem_object *obj);
37e680a1
CW
1655void i915_gem_object_init(struct drm_i915_gem_object *obj,
1656 const struct drm_i915_gem_object_ops *ops);
05394f39
CW
1657struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
1658 size_t size);
673a394b 1659void i915_gem_free_object(struct drm_gem_object *obj);
42dcedd4 1660
2021746e
CW
1661int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
1662 uint32_t alignment,
86a1ee26
CW
1663 bool map_and_fenceable,
1664 bool nonblocking);
05394f39 1665void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
2021746e 1666int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
dd624afd 1667int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
05394f39 1668void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
673a394b 1669void i915_gem_lastclose(struct drm_device *dev);
f787a5f5 1670
37e680a1 1671int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
9da3da66
CW
1672static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
1673{
67d5a50c
ID
1674 struct sg_page_iter sg_iter;
1675
1676 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n)
2db76d7c 1677 return sg_page_iter_page(&sg_iter);
67d5a50c
ID
1678
1679 return NULL;
9da3da66 1680}
a5570178
CW
1681static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
1682{
1683 BUG_ON(obj->pages == NULL);
1684 obj->pages_pin_count++;
1685}
1686static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
1687{
1688 BUG_ON(obj->pages_pin_count == 0);
1689 obj->pages_pin_count--;
1690}
1691
54cf91dc 1692int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
2911a35b
BW
1693int i915_gem_object_sync(struct drm_i915_gem_object *obj,
1694 struct intel_ring_buffer *to);
54cf91dc 1695void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
9d773091 1696 struct intel_ring_buffer *ring);
54cf91dc 1697
ff72145b
DA
1698int i915_gem_dumb_create(struct drm_file *file_priv,
1699 struct drm_device *dev,
1700 struct drm_mode_create_dumb *args);
1701int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
1702 uint32_t handle, uint64_t *offset);
1703int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
0206e353 1704 uint32_t handle);
f787a5f5
CW
1705/**
1706 * Returns true if seq1 is later than seq2.
1707 */
1708static inline bool
1709i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1710{
1711 return (int32_t)(seq1 - seq2) >= 0;
1712}
1713
fca26bb4
MK
1714int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
1715int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
06d98131 1716int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
d9e86c0e 1717int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
2021746e 1718
9a5a53b3 1719static inline bool
1690e1eb
CW
1720i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
1721{
1722 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1723 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1724 dev_priv->fence_regs[obj->fence_reg].pin_count++;
9a5a53b3
CW
1725 return true;
1726 } else
1727 return false;
1690e1eb
CW
1728}
1729
1730static inline void
1731i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
1732{
1733 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1734 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
b8c3af76 1735 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
1690e1eb
CW
1736 dev_priv->fence_regs[obj->fence_reg].pin_count--;
1737 }
1738}
1739
b09a1fec 1740void i915_gem_retire_requests(struct drm_device *dev);
a71d8d94 1741void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
33196ded 1742int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
d6b2c790 1743 bool interruptible);
1f83fee0
DV
1744static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
1745{
1746 return unlikely(atomic_read(&error->reset_counter)
1747 & I915_RESET_IN_PROGRESS_FLAG);
1748}
1749
1750static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
1751{
1752 return atomic_read(&error->reset_counter) == I915_WEDGED;
1753}
a71d8d94 1754
069efc1d 1755void i915_gem_reset(struct drm_device *dev);
05394f39 1756void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
2021746e
CW
1757int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
1758 uint32_t read_domains,
1759 uint32_t write_domain);
a8198eea 1760int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
1070a42b 1761int __must_check i915_gem_init(struct drm_device *dev);
f691e2f4 1762int __must_check i915_gem_init_hw(struct drm_device *dev);
b9524a1e 1763void i915_gem_l3_remap(struct drm_device *dev);
f691e2f4 1764void i915_gem_init_swizzling(struct drm_device *dev);
79e53945 1765void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
b2da9fe5 1766int __must_check i915_gpu_idle(struct drm_device *dev);
2021746e 1767int __must_check i915_gem_idle(struct drm_device *dev);
0025c077
MK
1768int __i915_add_request(struct intel_ring_buffer *ring,
1769 struct drm_file *file,
7d736f4f 1770 struct drm_i915_gem_object *batch_obj,
0025c077
MK
1771 u32 *seqno);
1772#define i915_add_request(ring, seqno) \
854c94a7 1773 __i915_add_request(ring, NULL, NULL, seqno)
199b2bc2
BW
1774int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
1775 uint32_t seqno);
de151cf6 1776int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
2021746e
CW
1777int __must_check
1778i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
1779 bool write);
1780int __must_check
dabdfe02
CW
1781i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
1782int __must_check
2da3b9b9
CW
1783i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
1784 u32 alignment,
2021746e 1785 struct intel_ring_buffer *pipelined);
71acb5eb 1786int i915_gem_attach_phys_object(struct drm_device *dev,
05394f39 1787 struct drm_i915_gem_object *obj,
6eeefaf3
CW
1788 int id,
1789 int align);
71acb5eb 1790void i915_gem_detach_phys_object(struct drm_device *dev,
05394f39 1791 struct drm_i915_gem_object *obj);
71acb5eb 1792void i915_gem_free_all_phys_object(struct drm_device *dev);
05394f39 1793void i915_gem_release(struct drm_device *dev, struct drm_file *file);
673a394b 1794
0fa87796
ID
1795uint32_t
1796i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
467cffba 1797uint32_t
d865110c
ID
1798i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1799 int tiling_mode, bool fenced);
467cffba 1800
e4ffd173
CW
1801int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
1802 enum i915_cache_level cache_level);
1803
1286ff73
DV
1804struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
1805 struct dma_buf *dma_buf);
1806
1807struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
1808 struct drm_gem_object *gem_obj, int flags);
1809
254f965c
BW
1810/* i915_gem_context.c */
1811void i915_gem_context_init(struct drm_device *dev);
1812void i915_gem_context_fini(struct drm_device *dev);
254f965c 1813void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
e0556841
BW
1814int i915_switch_context(struct intel_ring_buffer *ring,
1815 struct drm_file *file, int to_id);
dce3271b
MK
1816void i915_gem_context_free(struct kref *ctx_ref);
1817static inline void i915_gem_context_reference(struct i915_hw_context *ctx)
1818{
1819 kref_get(&ctx->ref);
1820}
1821
1822static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
1823{
1824 kref_put(&ctx->ref, i915_gem_context_free);
1825}
1826
c0bb617a
MK
1827struct i915_ctx_hang_stats * __must_check
1828i915_gem_context_get_hang_stats(struct intel_ring_buffer *ring,
1829 struct drm_file *file,
1830 u32 id);
84624813
BW
1831int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
1832 struct drm_file *file);
1833int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
1834 struct drm_file *file);
1286ff73 1835
76aaf220 1836/* i915_gem_gtt.c */
1d2a314c 1837void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
7bddb01f
DV
1838void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
1839 struct drm_i915_gem_object *obj,
1840 enum i915_cache_level cache_level);
1841void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
1842 struct drm_i915_gem_object *obj);
1d2a314c 1843
76aaf220 1844void i915_gem_restore_gtt_mappings(struct drm_device *dev);
74163907
DV
1845int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
1846void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
e4ffd173 1847 enum i915_cache_level cache_level);
05394f39 1848void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
74163907 1849void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
d7e5008f
BW
1850void i915_gem_init_global_gtt(struct drm_device *dev);
1851void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
1852 unsigned long mappable_end, unsigned long end);
e76e9aeb 1853int i915_gem_gtt_init(struct drm_device *dev);
d09105c6 1854static inline void i915_gem_chipset_flush(struct drm_device *dev)
e76e9aeb
BW
1855{
1856 if (INTEL_INFO(dev)->gen < 6)
1857 intel_gtt_chipset_flush();
1858}
1859
76aaf220 1860
b47eb4a2 1861/* i915_gem_evict.c */
2021746e 1862int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
42d6ab48
CW
1863 unsigned alignment,
1864 unsigned cache_level,
86a1ee26
CW
1865 bool mappable,
1866 bool nonblock);
6c085a72 1867int i915_gem_evict_everything(struct drm_device *dev);
b47eb4a2 1868
9797fbfb
CW
1869/* i915_gem_stolen.c */
1870int i915_gem_init_stolen(struct drm_device *dev);
11be49eb
CW
1871int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
1872void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
9797fbfb 1873void i915_gem_cleanup_stolen(struct drm_device *dev);
0104fdbb
CW
1874struct drm_i915_gem_object *
1875i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
866d12b4
CW
1876struct drm_i915_gem_object *
1877i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
1878 u32 stolen_offset,
1879 u32 gtt_offset,
1880 u32 size);
0104fdbb 1881void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
9797fbfb 1882
673a394b 1883/* i915_gem_tiling.c */
e9b73c67
CW
1884inline static bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
1885{
1886 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
1887
1888 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
1889 obj->tiling_mode != I915_TILING_NONE;
1890}
1891
673a394b 1892void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
05394f39
CW
1893void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
1894void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
673a394b
EA
1895
1896/* i915_gem_debug.c */
05394f39 1897void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
673a394b 1898 const char *where, uint32_t mark);
23bc5982
CW
1899#if WATCH_LISTS
1900int i915_verify_lists(struct drm_device *dev);
673a394b 1901#else
23bc5982 1902#define i915_verify_lists(dev) 0
673a394b 1903#endif
05394f39
CW
1904void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj,
1905 int handle);
1906void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
673a394b 1907 const char *where, uint32_t mark);
1da177e4 1908
2017263e 1909/* i915_debugfs.c */
27c202ad
BG
1910int i915_debugfs_init(struct drm_minor *minor);
1911void i915_debugfs_cleanup(struct drm_minor *minor);
edc3d884
MK
1912__printf(2, 3)
1913void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
2017263e 1914
317c35d1
JB
1915/* i915_suspend.c */
1916extern int i915_save_state(struct drm_device *dev);
1917extern int i915_restore_state(struct drm_device *dev);
0a3e67a4 1918
d8157a36
DV
1919/* i915_ums.c */
1920void i915_save_display_reg(struct drm_device *dev);
1921void i915_restore_display_reg(struct drm_device *dev);
317c35d1 1922
0136db58
BW
1923/* i915_sysfs.c */
1924void i915_setup_sysfs(struct drm_device *dev_priv);
1925void i915_teardown_sysfs(struct drm_device *dev_priv);
1926
f899fc64
CW
1927/* intel_i2c.c */
1928extern int intel_setup_gmbus(struct drm_device *dev);
1929extern void intel_teardown_gmbus(struct drm_device *dev);
8f375e10 1930static inline bool intel_gmbus_is_port_valid(unsigned port)
3bd7d909 1931{
2ed06c93 1932 return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
3bd7d909
DK
1933}
1934
1935extern struct i2c_adapter *intel_gmbus_get_adapter(
1936 struct drm_i915_private *dev_priv, unsigned port);
e957d772
CW
1937extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
1938extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
8f375e10 1939static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
b8232e90
CW
1940{
1941 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
1942}
f899fc64
CW
1943extern void intel_i2c_reset(struct drm_device *dev);
1944
3b617967 1945/* intel_opregion.c */
44834a67
CW
1946extern int intel_opregion_setup(struct drm_device *dev);
1947#ifdef CONFIG_ACPI
1948extern void intel_opregion_init(struct drm_device *dev);
1949extern void intel_opregion_fini(struct drm_device *dev);
3b617967 1950extern void intel_opregion_asle_intr(struct drm_device *dev);
65e082c9 1951#else
44834a67
CW
1952static inline void intel_opregion_init(struct drm_device *dev) { return; }
1953static inline void intel_opregion_fini(struct drm_device *dev) { return; }
3b617967 1954static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
65e082c9 1955#endif
8ee1c3db 1956
723bfd70
JB
1957/* intel_acpi.c */
1958#ifdef CONFIG_ACPI
1959extern void intel_register_dsm_handler(void);
1960extern void intel_unregister_dsm_handler(void);
1961#else
1962static inline void intel_register_dsm_handler(void) { return; }
1963static inline void intel_unregister_dsm_handler(void) { return; }
1964#endif /* CONFIG_ACPI */
1965
79e53945 1966/* modesetting */
f817586c 1967extern void intel_modeset_init_hw(struct drm_device *dev);
7d708ee4 1968extern void intel_modeset_suspend_hw(struct drm_device *dev);
79e53945 1969extern void intel_modeset_init(struct drm_device *dev);
2c7111db 1970extern void intel_modeset_gem_init(struct drm_device *dev);
79e53945 1971extern void intel_modeset_cleanup(struct drm_device *dev);
28d52043 1972extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
45e2b5f6
DV
1973extern void intel_modeset_setup_hw_state(struct drm_device *dev,
1974 bool force_restore);
44cec740 1975extern void i915_redisable_vga(struct drm_device *dev);
ee5382ae 1976extern bool intel_fbc_enabled(struct drm_device *dev);
43a9539f 1977extern void intel_disable_fbc(struct drm_device *dev);
7648fa99 1978extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
dde86e2d 1979extern void intel_init_pch_refclk(struct drm_device *dev);
3b8d8d91 1980extern void gen6_set_rps(struct drm_device *dev, u8 val);
0a073b84
JB
1981extern void valleyview_set_rps(struct drm_device *dev, u8 val);
1982extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv);
1983extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv);
0206e353
AJ
1984extern void intel_detect_pch(struct drm_device *dev);
1985extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
0136db58 1986extern int intel_enable_rc6(const struct drm_device *dev);
3bad0781 1987
2911a35b 1988extern bool i915_semaphore_is_enabled(struct drm_device *dev);
c0c7babc
BW
1989int i915_reg_read_ioctl(struct drm_device *dev, void *data,
1990 struct drm_file *file);
575155a9 1991
6ef3d427 1992/* overlay */
3bd3c932 1993#ifdef CONFIG_DEBUG_FS
6ef3d427 1994extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
edc3d884
MK
1995extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
1996 struct intel_overlay_error_state *error);
c4a1d9e4
CW
1997
1998extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
edc3d884 1999extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
c4a1d9e4
CW
2000 struct drm_device *dev,
2001 struct intel_display_error_state *error);
3bd3c932 2002#endif
6ef3d427 2003
b7287d80
BW
2004/* On SNB platform, before reading ring registers forcewake bit
2005 * must be set to prevent GT core from power down and stale values being
2006 * returned.
2007 */
fcca7926
BW
2008void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
2009void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
67a3744f 2010int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
b7287d80 2011
42c0526c
BW
2012int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
2013int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
59de0813
JN
2014
2015/* intel_sideband.c */
64936258
JN
2016u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
2017void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
2018u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
ae99258f
JN
2019u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg);
2020void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val);
59de0813
JN
2021u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
2022 enum intel_sbi_destination destination);
2023void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
2024 enum intel_sbi_destination destination);
0a073b84 2025
855ba3be
JB
2026int vlv_gpu_freq(int ddr_freq, int val);
2027int vlv_freq_opcode(int ddr_freq, int val);
42c0526c 2028
5f75377d 2029#define __i915_read(x, y) \
f7000883 2030 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
fcca7926 2031
5f75377d
KP
2032__i915_read(8, b)
2033__i915_read(16, w)
2034__i915_read(32, l)
2035__i915_read(64, q)
2036#undef __i915_read
2037
2038#define __i915_write(x, y) \
f7000883
AK
2039 void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val);
2040
5f75377d
KP
2041__i915_write(8, b)
2042__i915_write(16, w)
2043__i915_write(32, l)
2044__i915_write(64, q)
2045#undef __i915_write
2046
2047#define I915_READ8(reg) i915_read8(dev_priv, (reg))
2048#define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val))
2049
2050#define I915_READ16(reg) i915_read16(dev_priv, (reg))
2051#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val))
2052#define I915_READ16_NOTRACE(reg) readw(dev_priv->regs + (reg))
2053#define I915_WRITE16_NOTRACE(reg, val) writew(val, dev_priv->regs + (reg))
2054
2055#define I915_READ(reg) i915_read32(dev_priv, (reg))
2056#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val))
cae5852d
ZN
2057#define I915_READ_NOTRACE(reg) readl(dev_priv->regs + (reg))
2058#define I915_WRITE_NOTRACE(reg, val) writel(val, dev_priv->regs + (reg))
5f75377d
KP
2059
2060#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val))
2061#define I915_READ64(reg) i915_read64(dev_priv, (reg))
cae5852d
ZN
2062
2063#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
2064#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
2065
55bc60db
VS
2066/* "Broadcast RGB" property */
2067#define INTEL_BROADCAST_RGB_AUTO 0
2068#define INTEL_BROADCAST_RGB_FULL 1
2069#define INTEL_BROADCAST_RGB_LIMITED 2
ba4f01a3 2070
766aa1c4
VS
2071static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
2072{
2073 if (HAS_PCH_SPLIT(dev))
2074 return CPU_VGACNTRL;
2075 else if (IS_VALLEYVIEW(dev))
2076 return VLV_VGACNTRL;
2077 else
2078 return VGACNTRL;
2079}
2080
2bb4629a
VS
2081static inline void __user *to_user_ptr(u64 address)
2082{
2083 return (void __user *)(uintptr_t)address;
2084}
2085
df97729f
ID
2086static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
2087{
2088 unsigned long j = msecs_to_jiffies(m);
2089
2090 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
2091}
2092
2093static inline unsigned long
2094timespec_to_jiffies_timeout(const struct timespec *value)
2095{
2096 unsigned long j = timespec_to_jiffies(value);
2097
2098 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
2099}
2100
1da177e4 2101#endif