]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/gpu/drm/i915/i915_drv.h
drm/i915: vfuncs for gtt_clear_range/insert_entries
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / i915 / i915_drv.h
CommitLineData
1da177e4
LT
1/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
2 */
0d6aa60b 3/*
bc54fd1a 4 *
1da177e4
LT
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
bc54fd1a
DA
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
0d6aa60b 28 */
1da177e4
LT
29
30#ifndef _I915_DRV_H_
31#define _I915_DRV_H_
32
e9b73c67
CW
33#include <uapi/drm/i915_drm.h>
34
585fb111 35#include "i915_reg.h"
79e53945 36#include "intel_bios.h"
8187a2b7 37#include "intel_ringbuffer.h"
0839ccb8 38#include <linux/io-mapping.h>
f899fc64 39#include <linux/i2c.h>
c167a6fc 40#include <linux/i2c-algo-bit.h>
0ade6386 41#include <drm/intel-gtt.h>
aaa6fd2a 42#include <linux/backlight.h>
2911a35b 43#include <linux/intel-iommu.h>
742cbee8 44#include <linux/kref.h>
9ee32fea 45#include <linux/pm_qos.h>
585fb111 46
1da177e4
LT
47/* General customization:
48 */
49
50#define DRIVER_AUTHOR "Tungsten Graphics, Inc."
51
52#define DRIVER_NAME "i915"
53#define DRIVER_DESC "Intel Graphics"
673a394b 54#define DRIVER_DATE "20080730"
1da177e4 55
317c35d1
JB
56enum pipe {
57 PIPE_A = 0,
58 PIPE_B,
9db4a9c7
JB
59 PIPE_C,
60 I915_MAX_PIPES
317c35d1 61};
9db4a9c7 62#define pipe_name(p) ((p) + 'A')
317c35d1 63
a5c961d1
PZ
64enum transcoder {
65 TRANSCODER_A = 0,
66 TRANSCODER_B,
67 TRANSCODER_C,
68 TRANSCODER_EDP = 0xF,
69};
70#define transcoder_name(t) ((t) + 'A')
71
80824003
JB
72enum plane {
73 PLANE_A = 0,
74 PLANE_B,
9db4a9c7 75 PLANE_C,
80824003 76};
9db4a9c7 77#define plane_name(p) ((p) + 'A')
52440211 78
2b139522
ED
79enum port {
80 PORT_A = 0,
81 PORT_B,
82 PORT_C,
83 PORT_D,
84 PORT_E,
85 I915_MAX_PORTS
86};
87#define port_name(p) ((p) + 'A')
88
2a2d5482
CW
89#define I915_GEM_GPU_DOMAINS \
90 (I915_GEM_DOMAIN_RENDER | \
91 I915_GEM_DOMAIN_SAMPLER | \
92 I915_GEM_DOMAIN_COMMAND | \
93 I915_GEM_DOMAIN_INSTRUCTION | \
94 I915_GEM_DOMAIN_VERTEX)
62fdfeaf 95
9db4a9c7
JB
96#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
97
6c2b7c12
DV
98#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
99 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
100 if ((intel_encoder)->base.crtc == (__crtc))
101
ee7b9f93
JB
102struct intel_pch_pll {
103 int refcount; /* count of number of CRTCs sharing this PLL */
104 int active; /* count of number of active CRTCs (i.e. DPMS on) */
105 bool on; /* is the PLL actually active? Disabled during modeset */
106 int pll_reg;
107 int fp0_reg;
108 int fp1_reg;
109};
110#define I915_NUM_PLLS 2
111
e69d0bc1
DV
112/* Used by dp and fdi links */
113struct intel_link_m_n {
114 uint32_t tu;
115 uint32_t gmch_m;
116 uint32_t gmch_n;
117 uint32_t link_m;
118 uint32_t link_n;
119};
120
121void intel_link_compute_m_n(int bpp, int nlanes,
122 int pixel_clock, int link_clock,
123 struct intel_link_m_n *m_n);
124
6441ab5f
PZ
125struct intel_ddi_plls {
126 int spll_refcount;
127 int wrpll1_refcount;
128 int wrpll2_refcount;
129};
130
1da177e4
LT
131/* Interface history:
132 *
133 * 1.1: Original.
0d6aa60b
DA
134 * 1.2: Add Power Management
135 * 1.3: Add vblank support
de227f5f 136 * 1.4: Fix cmdbuffer path, add heap destroy
702880f2 137 * 1.5: Add vblank pipe configuration
2228ed67
MD
138 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
139 * - Support vertical blank on secondary display pipe
1da177e4
LT
140 */
141#define DRIVER_MAJOR 1
2228ed67 142#define DRIVER_MINOR 6
1da177e4
LT
143#define DRIVER_PATCHLEVEL 0
144
673a394b 145#define WATCH_COHERENCY 0
23bc5982 146#define WATCH_LISTS 0
42d6ab48 147#define WATCH_GTT 0
673a394b 148
71acb5eb
DA
149#define I915_GEM_PHYS_CURSOR_0 1
150#define I915_GEM_PHYS_CURSOR_1 2
151#define I915_GEM_PHYS_OVERLAY_REGS 3
152#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
153
154struct drm_i915_gem_phys_object {
155 int id;
156 struct page **page_list;
157 drm_dma_handle_t *handle;
05394f39 158 struct drm_i915_gem_object *cur_obj;
71acb5eb
DA
159};
160
0a3e67a4
JB
161struct opregion_header;
162struct opregion_acpi;
163struct opregion_swsci;
164struct opregion_asle;
8d715f00 165struct drm_i915_private;
0a3e67a4 166
8ee1c3db 167struct intel_opregion {
5bc4418b
BW
168 struct opregion_header __iomem *header;
169 struct opregion_acpi __iomem *acpi;
170 struct opregion_swsci __iomem *swsci;
171 struct opregion_asle __iomem *asle;
172 void __iomem *vbt;
01fe9dbd 173 u32 __iomem *lid_state;
8ee1c3db 174};
44834a67 175#define OPREGION_SIZE (8*1024)
8ee1c3db 176
6ef3d427
CW
177struct intel_overlay;
178struct intel_overlay_error_state;
179
7c1c2871
DA
180struct drm_i915_master_private {
181 drm_local_map_t *sarea;
182 struct _drm_i915_sarea *sarea_priv;
183};
de151cf6 184#define I915_FENCE_REG_NONE -1
4b9de737
DV
185#define I915_MAX_NUM_FENCES 16
186/* 16 fences + sign bit for FENCE_REG_NONE */
187#define I915_MAX_NUM_FENCE_BITS 5
de151cf6
JB
188
189struct drm_i915_fence_reg {
007cc8ac 190 struct list_head lru_list;
caea7476 191 struct drm_i915_gem_object *obj;
1690e1eb 192 int pin_count;
de151cf6 193};
7c1c2871 194
9b9d172d 195struct sdvo_device_mapping {
e957d772 196 u8 initialized;
9b9d172d 197 u8 dvo_port;
198 u8 slave_addr;
199 u8 dvo_wiring;
e957d772 200 u8 i2c_pin;
b1083333 201 u8 ddc_pin;
9b9d172d 202};
203
c4a1d9e4
CW
204struct intel_display_error_state;
205
63eeaf38 206struct drm_i915_error_state {
742cbee8 207 struct kref ref;
63eeaf38
JB
208 u32 eir;
209 u32 pgtbl_er;
be998e2e 210 u32 ier;
b9a3906b 211 u32 ccid;
9574b3fe 212 bool waiting[I915_NUM_RINGS];
9db4a9c7 213 u32 pipestat[I915_MAX_PIPES];
c1cd90ed
DV
214 u32 tail[I915_NUM_RINGS];
215 u32 head[I915_NUM_RINGS];
d27b1e0e
DV
216 u32 ipeir[I915_NUM_RINGS];
217 u32 ipehr[I915_NUM_RINGS];
218 u32 instdone[I915_NUM_RINGS];
219 u32 acthd[I915_NUM_RINGS];
7e3b8737 220 u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
df2b23d9 221 u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
12f55818 222 u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
7e3b8737
DV
223 /* our own tracking of ring head and tail */
224 u32 cpu_ring_head[I915_NUM_RINGS];
225 u32 cpu_ring_tail[I915_NUM_RINGS];
1d8f38f4 226 u32 error; /* gen6+ */
71e172e8 227 u32 err_int; /* gen7 */
c1cd90ed
DV
228 u32 instpm[I915_NUM_RINGS];
229 u32 instps[I915_NUM_RINGS];
050ee91f 230 u32 extra_instdone[I915_NUM_INSTDONE_REG];
d27b1e0e 231 u32 seqno[I915_NUM_RINGS];
9df30794 232 u64 bbaddr;
33f3f518
DV
233 u32 fault_reg[I915_NUM_RINGS];
234 u32 done_reg;
c1cd90ed 235 u32 faddr[I915_NUM_RINGS];
4b9de737 236 u64 fence[I915_MAX_NUM_FENCES];
63eeaf38 237 struct timeval time;
52d39a21
CW
238 struct drm_i915_error_ring {
239 struct drm_i915_error_object {
240 int page_count;
241 u32 gtt_offset;
242 u32 *pages[0];
243 } *ringbuffer, *batchbuffer;
244 struct drm_i915_error_request {
245 long jiffies;
246 u32 seqno;
ee4f42b1 247 u32 tail;
52d39a21
CW
248 } *requests;
249 int num_requests;
250 } ring[I915_NUM_RINGS];
9df30794 251 struct drm_i915_error_buffer {
a779e5ab 252 u32 size;
9df30794 253 u32 name;
0201f1ec 254 u32 rseqno, wseqno;
9df30794
CW
255 u32 gtt_offset;
256 u32 read_domains;
257 u32 write_domain;
4b9de737 258 s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
9df30794
CW
259 s32 pinned:2;
260 u32 tiling:2;
261 u32 dirty:1;
262 u32 purgeable:1;
5d1333fc 263 s32 ring:4;
93dfb40c 264 u32 cache_level:2;
c724e8a9
CW
265 } *active_bo, *pinned_bo;
266 u32 active_bo_count, pinned_bo_count;
6ef3d427 267 struct intel_overlay_error_state *overlay;
c4a1d9e4 268 struct intel_display_error_state *display;
63eeaf38
JB
269};
270
e70236a8 271struct drm_i915_display_funcs {
ee5382ae 272 bool (*fbc_enabled)(struct drm_device *dev);
e70236a8
JB
273 void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
274 void (*disable_fbc)(struct drm_device *dev);
275 int (*get_display_clock_speed)(struct drm_device *dev);
276 int (*get_fifo_size)(struct drm_device *dev, int plane);
d210246a 277 void (*update_wm)(struct drm_device *dev);
b840d907
JB
278 void (*update_sprite_wm)(struct drm_device *dev, int pipe,
279 uint32_t sprite_width, int pixel_size);
1f8eeabf
ED
280 void (*update_linetime_wm)(struct drm_device *dev, int pipe,
281 struct drm_display_mode *mode);
47fab737 282 void (*modeset_global_resources)(struct drm_device *dev);
f564048e
EA
283 int (*crtc_mode_set)(struct drm_crtc *crtc,
284 struct drm_display_mode *mode,
285 struct drm_display_mode *adjusted_mode,
286 int x, int y,
287 struct drm_framebuffer *old_fb);
76e5a89c
DV
288 void (*crtc_enable)(struct drm_crtc *crtc);
289 void (*crtc_disable)(struct drm_crtc *crtc);
ee7b9f93 290 void (*off)(struct drm_crtc *crtc);
e0dac65e
WF
291 void (*write_eld)(struct drm_connector *connector,
292 struct drm_crtc *crtc);
674cf967 293 void (*fdi_link_train)(struct drm_crtc *crtc);
6067aaea 294 void (*init_clock_gating)(struct drm_device *dev);
8c9f3aaf
JB
295 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
296 struct drm_framebuffer *fb,
297 struct drm_i915_gem_object *obj);
17638cd6
JB
298 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
299 int x, int y);
20afbda2 300 void (*hpd_irq_setup)(struct drm_device *dev);
e70236a8
JB
301 /* clock updates for mode set */
302 /* cursor updates */
303 /* render clock increase/decrease */
304 /* display clock increase/decrease */
305 /* pll clock increase/decrease */
e70236a8
JB
306};
307
990bbdad
CW
308struct drm_i915_gt_funcs {
309 void (*force_wake_get)(struct drm_i915_private *dev_priv);
310 void (*force_wake_put)(struct drm_i915_private *dev_priv);
311};
312
c96ea64e
DV
313#define DEV_INFO_FLAGS \
314 DEV_INFO_FLAG(is_mobile) DEV_INFO_SEP \
315 DEV_INFO_FLAG(is_i85x) DEV_INFO_SEP \
316 DEV_INFO_FLAG(is_i915g) DEV_INFO_SEP \
317 DEV_INFO_FLAG(is_i945gm) DEV_INFO_SEP \
318 DEV_INFO_FLAG(is_g33) DEV_INFO_SEP \
319 DEV_INFO_FLAG(need_gfx_hws) DEV_INFO_SEP \
320 DEV_INFO_FLAG(is_g4x) DEV_INFO_SEP \
321 DEV_INFO_FLAG(is_pineview) DEV_INFO_SEP \
322 DEV_INFO_FLAG(is_broadwater) DEV_INFO_SEP \
323 DEV_INFO_FLAG(is_crestline) DEV_INFO_SEP \
324 DEV_INFO_FLAG(is_ivybridge) DEV_INFO_SEP \
325 DEV_INFO_FLAG(is_valleyview) DEV_INFO_SEP \
326 DEV_INFO_FLAG(is_haswell) DEV_INFO_SEP \
327 DEV_INFO_FLAG(has_force_wake) DEV_INFO_SEP \
328 DEV_INFO_FLAG(has_fbc) DEV_INFO_SEP \
329 DEV_INFO_FLAG(has_pipe_cxsr) DEV_INFO_SEP \
330 DEV_INFO_FLAG(has_hotplug) DEV_INFO_SEP \
331 DEV_INFO_FLAG(cursor_needs_physical) DEV_INFO_SEP \
332 DEV_INFO_FLAG(has_overlay) DEV_INFO_SEP \
333 DEV_INFO_FLAG(overlay_needs_physical) DEV_INFO_SEP \
334 DEV_INFO_FLAG(supports_tv) DEV_INFO_SEP \
335 DEV_INFO_FLAG(has_bsd_ring) DEV_INFO_SEP \
336 DEV_INFO_FLAG(has_blt_ring) DEV_INFO_SEP \
337 DEV_INFO_FLAG(has_llc)
338
cfdf1fa2 339struct intel_device_info {
10fce67a 340 u32 display_mmio_offset;
c96c3a8c 341 u8 gen;
0206e353
AJ
342 u8 is_mobile:1;
343 u8 is_i85x:1;
344 u8 is_i915g:1;
345 u8 is_i945gm:1;
346 u8 is_g33:1;
347 u8 need_gfx_hws:1;
348 u8 is_g4x:1;
349 u8 is_pineview:1;
350 u8 is_broadwater:1;
351 u8 is_crestline:1;
352 u8 is_ivybridge:1;
70a3eb7a 353 u8 is_valleyview:1;
b7884eb4 354 u8 has_force_wake:1;
4cae9ae0 355 u8 is_haswell:1;
0206e353
AJ
356 u8 has_fbc:1;
357 u8 has_pipe_cxsr:1;
358 u8 has_hotplug:1;
359 u8 cursor_needs_physical:1;
360 u8 has_overlay:1;
361 u8 overlay_needs_physical:1;
362 u8 supports_tv:1;
363 u8 has_bsd_ring:1;
364 u8 has_blt_ring:1;
3d29b842 365 u8 has_llc:1;
cfdf1fa2
KH
366};
367
7faf1ab2
DV
368enum i915_cache_level {
369 I915_CACHE_NONE = 0,
370 I915_CACHE_LLC,
371 I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
372};
373
5d4545ae
BW
374/* The Graphics Translation Table is the way in which GEN hardware translates a
375 * Graphics Virtual Address into a Physical Address. In addition to the normal
376 * collateral associated with any va->pa translations GEN hardware also has a
377 * portion of the GTT which can be mapped by the CPU and remain both coherent
378 * and correct (in cases like swizzling). That region is referred to as GMADR in
379 * the spec.
380 */
381struct i915_gtt {
382 unsigned long start; /* Start offset of used GTT */
383 size_t total; /* Total size GTT can map */
384
385 unsigned long mappable_end; /* End offset that we can CPU map */
386 struct io_mapping *mappable; /* Mapping to our CPU mappable region */
387 phys_addr_t mappable_base; /* PA of our GMADR */
388
389 /** "Graphics Stolen Memory" holds the global PTEs */
390 void __iomem *gsm;
a81cc00c
BW
391
392 bool do_idle_maps;
9c61a32d
BW
393 dma_addr_t scratch_page_dma;
394 struct page *scratch_page;
7faf1ab2
DV
395
396 /* global gtt ops */
397 void (*gtt_clear_range)(struct drm_device *dev,
398 unsigned int first_entry,
399 unsigned int num_entries);
400 void (*gtt_insert_entries)(struct drm_device *dev,
401 struct sg_table *st,
402 unsigned int pg_start,
403 enum i915_cache_level cache_level);
5d4545ae
BW
404};
405
1d2a314c
DV
406#define I915_PPGTT_PD_ENTRIES 512
407#define I915_PPGTT_PT_ENTRIES 1024
408struct i915_hw_ppgtt {
8f2c59f0 409 struct drm_device *dev;
1d2a314c
DV
410 unsigned num_pd_entries;
411 struct page **pt_pages;
412 uint32_t pd_offset;
413 dma_addr_t *pt_dma_addr;
414 dma_addr_t scratch_page_dma_addr;
415};
416
40521054
BW
417
418/* This must match up with the value previously used for execbuf2.rsvd1. */
419#define DEFAULT_CONTEXT_ID 0
420struct i915_hw_context {
421 int id;
e0556841 422 bool is_initialized;
40521054
BW
423 struct drm_i915_file_private *file_priv;
424 struct intel_ring_buffer *ring;
425 struct drm_i915_gem_object *obj;
426};
427
b5e50c3f 428enum no_fbc_reason {
bed4a673 429 FBC_NO_OUTPUT, /* no outputs enabled to compress */
b5e50c3f
JB
430 FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
431 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
432 FBC_MODE_TOO_LARGE, /* mode too large for compression */
433 FBC_BAD_PLANE, /* fbc not supported on plane */
434 FBC_NOT_TILED, /* buffer not tiled */
9c928d16 435 FBC_MULTIPLE_PIPES, /* more than one pipe active */
c1a9f047 436 FBC_MODULE_PARAM,
b5e50c3f
JB
437};
438
3bad0781 439enum intel_pch {
f0350830 440 PCH_NONE = 0, /* No PCH present */
3bad0781
ZW
441 PCH_IBX, /* Ibexpeak PCH */
442 PCH_CPT, /* Cougarpoint PCH */
eb877ebf 443 PCH_LPT, /* Lynxpoint PCH */
3bad0781
ZW
444};
445
988d6ee8
PZ
446enum intel_sbi_destination {
447 SBI_ICLK,
448 SBI_MPHY,
449};
450
b690e96c 451#define QUIRK_PIPEA_FORCE (1<<0)
435793df 452#define QUIRK_LVDS_SSC_DISABLE (1<<1)
4dca20ef 453#define QUIRK_INVERT_BRIGHTNESS (1<<2)
b690e96c 454
8be48d92 455struct intel_fbdev;
1630fe75 456struct intel_fbc_work;
38651674 457
c2b9152f
DV
458struct intel_gmbus {
459 struct i2c_adapter adapter;
f2ce9faf 460 u32 force_bit;
c2b9152f 461 u32 reg0;
36c785f0 462 u32 gpio_reg;
c167a6fc 463 struct i2c_algo_bit_data bit_algo;
c2b9152f
DV
464 struct drm_i915_private *dev_priv;
465};
466
f4c956ad 467struct i915_suspend_saved_registers {
ba8bbcf6
JB
468 u8 saveLBB;
469 u32 saveDSPACNTR;
470 u32 saveDSPBCNTR;
e948e994 471 u32 saveDSPARB;
ba8bbcf6
JB
472 u32 savePIPEACONF;
473 u32 savePIPEBCONF;
474 u32 savePIPEASRC;
475 u32 savePIPEBSRC;
476 u32 saveFPA0;
477 u32 saveFPA1;
478 u32 saveDPLL_A;
479 u32 saveDPLL_A_MD;
480 u32 saveHTOTAL_A;
481 u32 saveHBLANK_A;
482 u32 saveHSYNC_A;
483 u32 saveVTOTAL_A;
484 u32 saveVBLANK_A;
485 u32 saveVSYNC_A;
486 u32 saveBCLRPAT_A;
5586c8bc 487 u32 saveTRANSACONF;
42048781
ZW
488 u32 saveTRANS_HTOTAL_A;
489 u32 saveTRANS_HBLANK_A;
490 u32 saveTRANS_HSYNC_A;
491 u32 saveTRANS_VTOTAL_A;
492 u32 saveTRANS_VBLANK_A;
493 u32 saveTRANS_VSYNC_A;
0da3ea12 494 u32 savePIPEASTAT;
ba8bbcf6
JB
495 u32 saveDSPASTRIDE;
496 u32 saveDSPASIZE;
497 u32 saveDSPAPOS;
585fb111 498 u32 saveDSPAADDR;
ba8bbcf6
JB
499 u32 saveDSPASURF;
500 u32 saveDSPATILEOFF;
501 u32 savePFIT_PGM_RATIOS;
0eb96d6e 502 u32 saveBLC_HIST_CTL;
ba8bbcf6
JB
503 u32 saveBLC_PWM_CTL;
504 u32 saveBLC_PWM_CTL2;
42048781
ZW
505 u32 saveBLC_CPU_PWM_CTL;
506 u32 saveBLC_CPU_PWM_CTL2;
ba8bbcf6
JB
507 u32 saveFPB0;
508 u32 saveFPB1;
509 u32 saveDPLL_B;
510 u32 saveDPLL_B_MD;
511 u32 saveHTOTAL_B;
512 u32 saveHBLANK_B;
513 u32 saveHSYNC_B;
514 u32 saveVTOTAL_B;
515 u32 saveVBLANK_B;
516 u32 saveVSYNC_B;
517 u32 saveBCLRPAT_B;
5586c8bc 518 u32 saveTRANSBCONF;
42048781
ZW
519 u32 saveTRANS_HTOTAL_B;
520 u32 saveTRANS_HBLANK_B;
521 u32 saveTRANS_HSYNC_B;
522 u32 saveTRANS_VTOTAL_B;
523 u32 saveTRANS_VBLANK_B;
524 u32 saveTRANS_VSYNC_B;
0da3ea12 525 u32 savePIPEBSTAT;
ba8bbcf6
JB
526 u32 saveDSPBSTRIDE;
527 u32 saveDSPBSIZE;
528 u32 saveDSPBPOS;
585fb111 529 u32 saveDSPBADDR;
ba8bbcf6
JB
530 u32 saveDSPBSURF;
531 u32 saveDSPBTILEOFF;
585fb111
JB
532 u32 saveVGA0;
533 u32 saveVGA1;
534 u32 saveVGA_PD;
ba8bbcf6
JB
535 u32 saveVGACNTRL;
536 u32 saveADPA;
537 u32 saveLVDS;
585fb111
JB
538 u32 savePP_ON_DELAYS;
539 u32 savePP_OFF_DELAYS;
ba8bbcf6
JB
540 u32 saveDVOA;
541 u32 saveDVOB;
542 u32 saveDVOC;
543 u32 savePP_ON;
544 u32 savePP_OFF;
545 u32 savePP_CONTROL;
585fb111 546 u32 savePP_DIVISOR;
ba8bbcf6
JB
547 u32 savePFIT_CONTROL;
548 u32 save_palette_a[256];
549 u32 save_palette_b[256];
06027f91 550 u32 saveDPFC_CB_BASE;
ba8bbcf6
JB
551 u32 saveFBC_CFB_BASE;
552 u32 saveFBC_LL_BASE;
553 u32 saveFBC_CONTROL;
554 u32 saveFBC_CONTROL2;
0da3ea12
JB
555 u32 saveIER;
556 u32 saveIIR;
557 u32 saveIMR;
42048781
ZW
558 u32 saveDEIER;
559 u32 saveDEIMR;
560 u32 saveGTIER;
561 u32 saveGTIMR;
562 u32 saveFDI_RXA_IMR;
563 u32 saveFDI_RXB_IMR;
1f84e550 564 u32 saveCACHE_MODE_0;
1f84e550 565 u32 saveMI_ARB_STATE;
ba8bbcf6
JB
566 u32 saveSWF0[16];
567 u32 saveSWF1[16];
568 u32 saveSWF2[3];
569 u8 saveMSR;
570 u8 saveSR[8];
123f794f 571 u8 saveGR[25];
ba8bbcf6 572 u8 saveAR_INDEX;
a59e122a 573 u8 saveAR[21];
ba8bbcf6 574 u8 saveDACMASK;
a59e122a 575 u8 saveCR[37];
4b9de737 576 uint64_t saveFENCE[I915_MAX_NUM_FENCES];
1fd1c624
EA
577 u32 saveCURACNTR;
578 u32 saveCURAPOS;
579 u32 saveCURABASE;
580 u32 saveCURBCNTR;
581 u32 saveCURBPOS;
582 u32 saveCURBBASE;
583 u32 saveCURSIZE;
a4fc5ed6
KP
584 u32 saveDP_B;
585 u32 saveDP_C;
586 u32 saveDP_D;
587 u32 savePIPEA_GMCH_DATA_M;
588 u32 savePIPEB_GMCH_DATA_M;
589 u32 savePIPEA_GMCH_DATA_N;
590 u32 savePIPEB_GMCH_DATA_N;
591 u32 savePIPEA_DP_LINK_M;
592 u32 savePIPEB_DP_LINK_M;
593 u32 savePIPEA_DP_LINK_N;
594 u32 savePIPEB_DP_LINK_N;
42048781
ZW
595 u32 saveFDI_RXA_CTL;
596 u32 saveFDI_TXA_CTL;
597 u32 saveFDI_RXB_CTL;
598 u32 saveFDI_TXB_CTL;
599 u32 savePFA_CTL_1;
600 u32 savePFB_CTL_1;
601 u32 savePFA_WIN_SZ;
602 u32 savePFB_WIN_SZ;
603 u32 savePFA_WIN_POS;
604 u32 savePFB_WIN_POS;
5586c8bc
ZW
605 u32 savePCH_DREF_CONTROL;
606 u32 saveDISP_ARB_CTL;
607 u32 savePIPEA_DATA_M1;
608 u32 savePIPEA_DATA_N1;
609 u32 savePIPEA_LINK_M1;
610 u32 savePIPEA_LINK_N1;
611 u32 savePIPEB_DATA_M1;
612 u32 savePIPEB_DATA_N1;
613 u32 savePIPEB_LINK_M1;
614 u32 savePIPEB_LINK_N1;
b5b72e89 615 u32 saveMCHBAR_RENDER_STANDBY;
cda2bb78 616 u32 savePCH_PORT_HOTPLUG;
f4c956ad 617};
c85aa885
DV
618
619struct intel_gen6_power_mgmt {
620 struct work_struct work;
621 u32 pm_iir;
622 /* lock - irqsave spinlock that protectects the work_struct and
623 * pm_iir. */
624 spinlock_t lock;
625
626 /* The below variables an all the rps hw state are protected by
627 * dev->struct mutext. */
628 u8 cur_delay;
629 u8 min_delay;
630 u8 max_delay;
1a01ab3b
JB
631
632 struct delayed_work delayed_resume_work;
4fc688ce
JB
633
634 /*
635 * Protects RPS/RC6 register access and PCU communication.
636 * Must be taken after struct_mutex if nested.
637 */
638 struct mutex hw_lock;
c85aa885
DV
639};
640
1a240d4d
DV
641/* defined intel_pm.c */
642extern spinlock_t mchdev_lock;
643
c85aa885
DV
644struct intel_ilk_power_mgmt {
645 u8 cur_delay;
646 u8 min_delay;
647 u8 max_delay;
648 u8 fmax;
649 u8 fstart;
650
651 u64 last_count1;
652 unsigned long last_time1;
653 unsigned long chipset_power;
654 u64 last_count2;
655 struct timespec last_time2;
656 unsigned long gfx_power;
657 u8 corr;
658
659 int c_m;
660 int r_t;
3e373948
DV
661
662 struct drm_i915_gem_object *pwrctx;
663 struct drm_i915_gem_object *renderctx;
c85aa885
DV
664};
665
231f42a4
DV
666struct i915_dri1_state {
667 unsigned allow_batchbuffer : 1;
668 u32 __iomem *gfx_hws_cpu_addr;
669
670 unsigned int cpp;
671 int back_offset;
672 int front_offset;
673 int current_page;
674 int page_flipping;
675
676 uint32_t counter;
677};
678
a4da4fa4
DV
679struct intel_l3_parity {
680 u32 *remap_info;
681 struct work_struct error_work;
682};
683
4b5aed62
DV
684struct i915_gem_mm {
685 /** Bridge to intel-gtt-ko */
686 struct intel_gtt *gtt;
687 /** Memory allocator for GTT stolen memory */
688 struct drm_mm stolen;
689 /** Memory allocator for GTT */
690 struct drm_mm gtt_space;
691 /** List of all objects in gtt_space. Used to restore gtt
692 * mappings on resume */
693 struct list_head bound_list;
694 /**
695 * List of objects which are not bound to the GTT (thus
696 * are idle and not used by the GPU) but still have
697 * (presumably uncached) pages still attached.
698 */
699 struct list_head unbound_list;
700
701 /** Usable portion of the GTT for GEM */
702 unsigned long stolen_base; /* limited to low memory (32-bit) */
703
704 int gtt_mtrr;
705
706 /** PPGTT used for aliasing the PPGTT with the GTT */
707 struct i915_hw_ppgtt *aliasing_ppgtt;
708
709 struct shrinker inactive_shrinker;
710 bool shrinker_no_lock_stealing;
711
712 /**
713 * List of objects currently involved in rendering.
714 *
715 * Includes buffers having the contents of their GPU caches
716 * flushed, not necessarily primitives. last_rendering_seqno
717 * represents when the rendering involved will be completed.
718 *
719 * A reference is held on the buffer while on this list.
720 */
721 struct list_head active_list;
722
723 /**
724 * LRU list of objects which are not in the ringbuffer and
725 * are ready to unbind, but are still in the GTT.
726 *
727 * last_rendering_seqno is 0 while an object is in this list.
728 *
729 * A reference is not held on the buffer while on this list,
730 * as merely being GTT-bound shouldn't prevent its being
731 * freed, and we'll pull it off the list in the free path.
732 */
733 struct list_head inactive_list;
734
735 /** LRU list of objects with fence regs on them. */
736 struct list_head fence_list;
737
738 /**
739 * We leave the user IRQ off as much as possible,
740 * but this means that requests will finish and never
741 * be retired once the system goes idle. Set a timer to
742 * fire periodically while the ring is running. When it
743 * fires, go retire requests.
744 */
745 struct delayed_work retire_work;
746
747 /**
748 * Are we in a non-interruptible section of code like
749 * modesetting?
750 */
751 bool interruptible;
752
753 /**
754 * Flag if the X Server, and thus DRM, is not currently in
755 * control of the device.
756 *
757 * This is set between LeaveVT and EnterVT. It needs to be
758 * replaced with a semaphore. It also needs to be
759 * transitioned away from for kernel modesetting.
760 */
761 int suspended;
762
4b5aed62
DV
763 /** Bit 6 swizzling required for X tiling */
764 uint32_t bit_6_swizzle_x;
765 /** Bit 6 swizzling required for Y tiling */
766 uint32_t bit_6_swizzle_y;
767
768 /* storage for physical objects */
769 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
770
771 /* accounting, useful for userland debugging */
772 size_t object_memory;
773 u32 object_count;
774};
775
99584db3
DV
776struct i915_gpu_error {
777 /* For hangcheck timer */
778#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
779#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
780 struct timer_list hangcheck_timer;
781 int hangcheck_count;
782 uint32_t last_acthd[I915_NUM_RINGS];
783 uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
784
785 /* For reset and error_state handling. */
786 spinlock_t lock;
787 /* Protected by the above dev->gpu_error.lock. */
788 struct drm_i915_error_state *first_error;
789 struct work_struct work;
99584db3
DV
790
791 unsigned long last_reset;
792
1f83fee0 793 /**
f69061be 794 * State variable and reset counter controlling the reset flow
1f83fee0 795 *
f69061be
DV
796 * Upper bits are for the reset counter. This counter is used by the
797 * wait_seqno code to race-free noticed that a reset event happened and
798 * that it needs to restart the entire ioctl (since most likely the
799 * seqno it waited for won't ever signal anytime soon).
800 *
801 * This is important for lock-free wait paths, where no contended lock
802 * naturally enforces the correct ordering between the bail-out of the
803 * waiter and the gpu reset work code.
1f83fee0
DV
804 *
805 * Lowest bit controls the reset state machine: Set means a reset is in
806 * progress. This state will (presuming we don't have any bugs) decay
807 * into either unset (successful reset) or the special WEDGED value (hw
808 * terminally sour). All waiters on the reset_queue will be woken when
809 * that happens.
810 */
811 atomic_t reset_counter;
812
813 /**
814 * Special values/flags for reset_counter
815 *
816 * Note that the code relies on
817 * I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG
818 * being true.
819 */
820#define I915_RESET_IN_PROGRESS_FLAG 1
821#define I915_WEDGED 0xffffffff
822
823 /**
824 * Waitqueue to signal when the reset has completed. Used by clients
825 * that wait for dev_priv->mm.wedged to settle.
826 */
827 wait_queue_head_t reset_queue;
33196ded 828
99584db3
DV
829 /* For gpu hang simulation. */
830 unsigned int stop_rings;
831};
832
f4c956ad
DV
833typedef struct drm_i915_private {
834 struct drm_device *dev;
42dcedd4 835 struct kmem_cache *slab;
f4c956ad
DV
836
837 const struct intel_device_info *info;
838
839 int relative_constants_mode;
840
841 void __iomem *regs;
842
843 struct drm_i915_gt_funcs gt;
844 /** gt_fifo_count and the subsequent register write are synchronized
845 * with dev->struct_mutex. */
846 unsigned gt_fifo_count;
847 /** forcewake_count is protected by gt_lock */
848 unsigned forcewake_count;
849 /** gt_lock is also taken in irq contexts. */
99057c81 850 spinlock_t gt_lock;
f4c956ad
DV
851
852 struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
853
28c70f16 854
f4c956ad
DV
855 /** gmbus_mutex protects against concurrent usage of the single hw gmbus
856 * controller on different i2c buses. */
857 struct mutex gmbus_mutex;
858
859 /**
860 * Base address of the gmbus and gpio block.
861 */
862 uint32_t gpio_mmio_base;
863
28c70f16
DV
864 wait_queue_head_t gmbus_wait_queue;
865
f4c956ad
DV
866 struct pci_dev *bridge_dev;
867 struct intel_ring_buffer ring[I915_NUM_RINGS];
f72b3435 868 uint32_t last_seqno, next_seqno;
f4c956ad
DV
869
870 drm_dma_handle_t *status_page_dmah;
f4c956ad
DV
871 struct resource mch_res;
872
873 atomic_t irq_received;
874
875 /* protects the irq masks */
876 spinlock_t irq_lock;
877
9ee32fea
DV
878 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
879 struct pm_qos_request pm_qos;
880
f4c956ad 881 /* DPIO indirect register protection */
09153000 882 struct mutex dpio_lock;
f4c956ad
DV
883
884 /** Cached value of IMR to avoid reads in updating the bitfield */
885 u32 pipestat[2];
886 u32 irq_mask;
887 u32 gt_irq_mask;
f4c956ad
DV
888
889 u32 hotplug_supported_mask;
890 struct work_struct hotplug_work;
52d7eced 891 bool enable_hotplug_processing;
f4c956ad
DV
892
893 int num_pipe;
894 int num_pch_pll;
895
f4c956ad
DV
896 unsigned long cfb_size;
897 unsigned int cfb_fb;
898 enum plane cfb_plane;
899 int cfb_y;
900 struct intel_fbc_work *fbc_work;
901
902 struct intel_opregion opregion;
903
904 /* overlay */
905 struct intel_overlay *overlay;
906 bool sprite_scaling_enabled;
907
908 /* LVDS info */
909 int backlight_level; /* restore backlight to this value */
910 bool backlight_enabled;
911 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
912 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
913
914 /* Feature bits from the VBIOS */
915 unsigned int int_tv_support:1;
916 unsigned int lvds_dither:1;
917 unsigned int lvds_vbt:1;
918 unsigned int int_crt_support:1;
919 unsigned int lvds_use_ssc:1;
920 unsigned int display_clock_mode:1;
921 int lvds_ssc_freq;
922 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
f4c956ad
DV
923 struct {
924 int rate;
925 int lanes;
926 int preemphasis;
927 int vswing;
928
929 bool initialized;
930 bool support;
931 int bpp;
932 struct edp_power_seq pps;
933 } edp;
934 bool no_aux_handshake;
935
936 int crt_ddc_pin;
937 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
938 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
939 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
940
941 unsigned int fsb_freq, mem_freq, is_ddr3;
942
f4c956ad
DV
943 struct workqueue_struct *wq;
944
945 /* Display functions */
946 struct drm_i915_display_funcs display;
947
948 /* PCH chipset type */
949 enum intel_pch pch_type;
17a303ec 950 unsigned short pch_id;
f4c956ad
DV
951
952 unsigned long quirks;
953
954 /* Register state */
955 bool modeset_on_lid;
673a394b 956
5d4545ae
BW
957 struct i915_gtt gtt;
958
4b5aed62 959 struct i915_gem_mm mm;
8781342d 960
8781342d
DV
961 /* Kernel Modesetting */
962
9b9d172d 963 struct sdvo_device_mapping sdvo_mappings[2];
a3e17eb8
ZY
964 /* indicate whether the LVDS_BORDER should be enabled or not */
965 unsigned int lvds_border_bits;
1d8e1c75
CW
966 /* Panel fitter placement and size for Ironlake+ */
967 u32 pch_pf_pos, pch_pf_size;
652c393a 968
27f8227b
JB
969 struct drm_crtc *plane_to_crtc_mapping[3];
970 struct drm_crtc *pipe_to_crtc_mapping[3];
6b95a207
KH
971 wait_queue_head_t pending_flip_queue;
972
ee7b9f93 973 struct intel_pch_pll pch_plls[I915_NUM_PLLS];
6441ab5f 974 struct intel_ddi_plls ddi_plls;
ee7b9f93 975
652c393a
JB
976 /* Reclocking support */
977 bool render_reclock_avail;
978 bool lvds_downclock_avail;
18f9ed12
ZY
979 /* indicates the reduced downclock for LVDS*/
980 int lvds_downclock;
652c393a 981 u16 orig_clock;
6363ee6f
ZY
982 int child_dev_num;
983 struct child_device_config *child_dev;
f97108d1 984
c4804411 985 bool mchbar_need_disable;
f97108d1 986
a4da4fa4
DV
987 struct intel_l3_parity l3_parity;
988
c6a828d3 989 /* gen6+ rps state */
c85aa885 990 struct intel_gen6_power_mgmt rps;
c6a828d3 991
20e4d407
DV
992 /* ilk-only ips/rps state. Everything in here is protected by the global
993 * mchdev_lock in intel_pm.c */
c85aa885 994 struct intel_ilk_power_mgmt ips;
b5e50c3f
JB
995
996 enum no_fbc_reason no_fbc_reason;
38651674 997
20bf377e
JB
998 struct drm_mm_node *compressed_fb;
999 struct drm_mm_node *compressed_llb;
34dc4d44 1000
99584db3 1001 struct i915_gpu_error gpu_error;
ae681d96 1002
8be48d92
DA
1003 /* list of fbdev register on this device */
1004 struct intel_fbdev *fbdev;
e953fd7b 1005
073f34d9
JB
1006 /*
1007 * The console may be contended at resume, but we don't
1008 * want it to block on it.
1009 */
1010 struct work_struct console_resume_work;
1011
aaa6fd2a
MG
1012 struct backlight_device *backlight;
1013
e953fd7b 1014 struct drm_property *broadcast_rgb_property;
3f43c48d 1015 struct drm_property *force_audio_property;
e3689190 1016
254f965c
BW
1017 bool hw_contexts_disabled;
1018 uint32_t hw_context_size;
f4c956ad 1019
68d18ad7
PZ
1020 bool fdi_rx_polarity_reversed;
1021
f4c956ad 1022 struct i915_suspend_saved_registers regfile;
231f42a4
DV
1023
1024 /* Old dri1 support infrastructure, beware the dragons ya fools entering
1025 * here! */
1026 struct i915_dri1_state dri1;
1da177e4
LT
1027} drm_i915_private_t;
1028
b4519513
CW
1029/* Iterate over initialised rings */
1030#define for_each_ring(ring__, dev_priv__, i__) \
1031 for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
1032 if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
1033
b1d7e4b4
WF
1034enum hdmi_force_audio {
1035 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
1036 HDMI_AUDIO_OFF, /* force turn off HDMI audio */
1037 HDMI_AUDIO_AUTO, /* trust EDID */
1038 HDMI_AUDIO_ON, /* force turn on HDMI audio */
1039};
1040
ed2f3452
CW
1041#define I915_GTT_RESERVED ((struct drm_mm_node *)0x1)
1042
37e680a1
CW
1043struct drm_i915_gem_object_ops {
1044 /* Interface between the GEM object and its backing storage.
1045 * get_pages() is called once prior to the use of the associated set
1046 * of pages before to binding them into the GTT, and put_pages() is
1047 * called after we no longer need them. As we expect there to be
1048 * associated cost with migrating pages between the backing storage
1049 * and making them available for the GPU (e.g. clflush), we may hold
1050 * onto the pages after they are no longer referenced by the GPU
1051 * in case they may be used again shortly (for example migrating the
1052 * pages to a different memory domain within the GTT). put_pages()
1053 * will therefore most likely be called when the object itself is
1054 * being released or under memory pressure (where we attempt to
1055 * reap pages for the shrinker).
1056 */
1057 int (*get_pages)(struct drm_i915_gem_object *);
1058 void (*put_pages)(struct drm_i915_gem_object *);
1059};
1060
673a394b 1061struct drm_i915_gem_object {
c397b908 1062 struct drm_gem_object base;
673a394b 1063
37e680a1
CW
1064 const struct drm_i915_gem_object_ops *ops;
1065
673a394b
EA
1066 /** Current space allocated to this object in the GTT, if any. */
1067 struct drm_mm_node *gtt_space;
c1ad11fc
CW
1068 /** Stolen memory for this object, instead of being backed by shmem. */
1069 struct drm_mm_node *stolen;
93a37f20 1070 struct list_head gtt_list;
673a394b 1071
65ce3027 1072 /** This object's place on the active/inactive lists */
69dc4987
CW
1073 struct list_head ring_list;
1074 struct list_head mm_list;
432e58ed
CW
1075 /** This object's place in the batchbuffer or on the eviction list */
1076 struct list_head exec_list;
673a394b
EA
1077
1078 /**
65ce3027
CW
1079 * This is set if the object is on the active lists (has pending
1080 * rendering and so a non-zero seqno), and is not set if it i s on
1081 * inactive (ready to be unbound) list.
673a394b 1082 */
0206e353 1083 unsigned int active:1;
673a394b
EA
1084
1085 /**
1086 * This is set if the object has been written to since last bound
1087 * to the GTT
1088 */
0206e353 1089 unsigned int dirty:1;
778c3544
DV
1090
1091 /**
1092 * Fence register bits (if any) for this object. Will be set
1093 * as needed when mapped into the GTT.
1094 * Protected by dev->struct_mutex.
778c3544 1095 */
4b9de737 1096 signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
778c3544 1097
778c3544
DV
1098 /**
1099 * Advice: are the backing pages purgeable?
1100 */
0206e353 1101 unsigned int madv:2;
778c3544 1102
778c3544
DV
1103 /**
1104 * Current tiling mode for the object.
1105 */
0206e353 1106 unsigned int tiling_mode:2;
5d82e3e6
CW
1107 /**
1108 * Whether the tiling parameters for the currently associated fence
1109 * register have changed. Note that for the purposes of tracking
1110 * tiling changes we also treat the unfenced register, the register
1111 * slot that the object occupies whilst it executes a fenced
1112 * command (such as BLT on gen2/3), as a "fence".
1113 */
1114 unsigned int fence_dirty:1;
778c3544
DV
1115
1116 /** How many users have pinned this object in GTT space. The following
1117 * users can each hold at most one reference: pwrite/pread, pin_ioctl
1118 * (via user_pin_count), execbuffer (objects are not allowed multiple
1119 * times for the same batchbuffer), and the framebuffer code. When
1120 * switching/pageflipping, the framebuffer code has at most two buffers
1121 * pinned per crtc.
1122 *
1123 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
1124 * bits with absolutely no headroom. So use 4 bits. */
0206e353 1125 unsigned int pin_count:4;
778c3544 1126#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
673a394b 1127
75e9e915
DV
1128 /**
1129 * Is the object at the current location in the gtt mappable and
1130 * fenceable? Used to avoid costly recalculations.
1131 */
0206e353 1132 unsigned int map_and_fenceable:1;
75e9e915 1133
fb7d516a
DV
1134 /**
1135 * Whether the current gtt mapping needs to be mappable (and isn't just
1136 * mappable by accident). Track pin and fault separate for a more
1137 * accurate mappable working set.
1138 */
0206e353
AJ
1139 unsigned int fault_mappable:1;
1140 unsigned int pin_mappable:1;
fb7d516a 1141
caea7476
CW
1142 /*
1143 * Is the GPU currently using a fence to access this buffer,
1144 */
1145 unsigned int pending_fenced_gpu_access:1;
1146 unsigned int fenced_gpu_access:1;
1147
93dfb40c
CW
1148 unsigned int cache_level:2;
1149
7bddb01f 1150 unsigned int has_aliasing_ppgtt_mapping:1;
74898d7e 1151 unsigned int has_global_gtt_mapping:1;
9da3da66 1152 unsigned int has_dma_mapping:1;
7bddb01f 1153
9da3da66 1154 struct sg_table *pages;
a5570178 1155 int pages_pin_count;
673a394b 1156
1286ff73 1157 /* prime dma-buf support */
9a70cc2a
DA
1158 void *dma_buf_vmapping;
1159 int vmapping_count;
1160
67731b87
CW
1161 /**
1162 * Used for performing relocations during execbuffer insertion.
1163 */
1164 struct hlist_node exec_node;
1165 unsigned long exec_handle;
6fe4f140 1166 struct drm_i915_gem_exec_object2 *exec_entry;
67731b87 1167
673a394b
EA
1168 /**
1169 * Current offset of the object in GTT space.
1170 *
1171 * This is the same as gtt_space->start
1172 */
1173 uint32_t gtt_offset;
e67b8ce1 1174
caea7476
CW
1175 struct intel_ring_buffer *ring;
1176
1c293ea3 1177 /** Breadcrumb of last rendering to the buffer. */
0201f1ec
CW
1178 uint32_t last_read_seqno;
1179 uint32_t last_write_seqno;
caea7476
CW
1180 /** Breadcrumb of last fenced GPU access to the buffer. */
1181 uint32_t last_fenced_seqno;
673a394b 1182
778c3544 1183 /** Current tiling stride for the object, if it's tiled. */
de151cf6 1184 uint32_t stride;
673a394b 1185
280b713b 1186 /** Record of address bit 17 of each page at last unbind. */
d312ec25 1187 unsigned long *bit_17;
280b713b 1188
79e53945
JB
1189 /** User space pin count and filp owning the pin */
1190 uint32_t user_pin_count;
1191 struct drm_file *pin_filp;
71acb5eb
DA
1192
1193 /** for phy allocated objects */
1194 struct drm_i915_gem_phys_object *phys_obj;
b70d11da 1195
6b95a207
KH
1196 /**
1197 * Number of crtcs where this object is currently the fb, but
1198 * will be page flipped away on the next vblank. When it
1199 * reaches 0, dev_priv->pending_flip_queue will be woken up.
1200 */
1201 atomic_t pending_flip;
673a394b 1202};
b45305fc 1203#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
673a394b 1204
62b8b215 1205#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
23010e43 1206
673a394b
EA
1207/**
1208 * Request queue structure.
1209 *
1210 * The request queue allows us to note sequence numbers that have been emitted
1211 * and may be associated with active buffers to be retired.
1212 *
1213 * By keeping this list, we can avoid having to do questionable
1214 * sequence-number comparisons on buffer last_rendering_seqnos, and associate
1215 * an emission time with seqnos for tracking how far ahead of the GPU we are.
1216 */
1217struct drm_i915_gem_request {
852835f3
ZN
1218 /** On Which ring this request was generated */
1219 struct intel_ring_buffer *ring;
1220
673a394b
EA
1221 /** GEM sequence number associated with this request. */
1222 uint32_t seqno;
1223
a71d8d94
CW
1224 /** Postion in the ringbuffer of the end of the request */
1225 u32 tail;
1226
673a394b
EA
1227 /** Time at which this request was emitted, in jiffies. */
1228 unsigned long emitted_jiffies;
1229
b962442e 1230 /** global list entry for this request */
673a394b 1231 struct list_head list;
b962442e 1232
f787a5f5 1233 struct drm_i915_file_private *file_priv;
b962442e
EA
1234 /** file_priv list entry for this request */
1235 struct list_head client_list;
673a394b
EA
1236};
1237
1238struct drm_i915_file_private {
1239 struct {
99057c81 1240 spinlock_t lock;
b962442e 1241 struct list_head request_list;
673a394b 1242 } mm;
40521054 1243 struct idr context_idr;
673a394b
EA
1244};
1245
cae5852d
ZN
1246#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
1247
1248#define IS_I830(dev) ((dev)->pci_device == 0x3577)
1249#define IS_845G(dev) ((dev)->pci_device == 0x2562)
1250#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
1251#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
1252#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
1253#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
1254#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
1255#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
1256#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
1257#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
1258#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
1259#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
1260#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
1261#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
1262#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
1263#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
1264#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
1265#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
4b65177b 1266#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
8ab43976
JB
1267#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
1268 (dev)->pci_device == 0x0152 || \
1269 (dev)->pci_device == 0x015a)
6547fbdb
DV
1270#define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \
1271 (dev)->pci_device == 0x0106 || \
1272 (dev)->pci_device == 0x010A)
70a3eb7a 1273#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
4cae9ae0 1274#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
cae5852d 1275#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
d567b07f
PZ
1276#define IS_ULT(dev) (IS_HASWELL(dev) && \
1277 ((dev)->pci_device & 0xFF00) == 0x0A00)
cae5852d 1278
85436696
JB
1279/*
1280 * The genX designation typically refers to the render engine, so render
1281 * capability related checks should use IS_GEN, while display and other checks
1282 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
1283 * chips, etc.).
1284 */
cae5852d
ZN
1285#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
1286#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
1287#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
1288#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
1289#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
85436696 1290#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
cae5852d
ZN
1291
1292#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
1293#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
3d29b842 1294#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
cae5852d
ZN
1295#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1296
254f965c 1297#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
93553609 1298#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev))
1d2a314c 1299
05394f39 1300#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
cae5852d
ZN
1301#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
1302
b45305fc
DV
1303/* Early gen2 have a totally busted CS tlb and require pinned batches. */
1304#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
1305
cae5852d
ZN
1306/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1307 * rows, which changed the alignment requirements and fence programming.
1308 */
1309#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
1310 IS_I915GM(dev)))
1311#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
1312#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
1313#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
1314#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
1315#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
1316#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
1317/* dsparb controlled by hw only */
1318#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
1319
1320#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
1321#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
1322#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
cae5852d 1323
eceae481 1324#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
cae5852d 1325
affa9354
PZ
1326#define HAS_DDI(dev) (IS_HASWELL(dev))
1327
17a303ec
PZ
1328#define INTEL_PCH_DEVICE_ID_MASK 0xff00
1329#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
1330#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
1331#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
1332#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
1333#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
1334
cae5852d 1335#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
eb877ebf 1336#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
cae5852d
ZN
1337#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1338#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
45e6e3a1 1339#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
cae5852d 1340
b7884eb4
DV
1341#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
1342
f27b9265 1343#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
e1ef7cc2 1344
c8735b0c
BW
1345#define GT_FREQUENCY_MULTIPLIER 50
1346
05394f39
CW
1347#include "i915_trace.h"
1348
83b7f9ac
ED
1349/**
1350 * RC6 is a special power stage which allows the GPU to enter an very
1351 * low-voltage mode when idle, using down to 0V while at this stage. This
1352 * stage is entered automatically when the GPU is idle when RC6 support is
1353 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
1354 *
1355 * There are different RC6 modes available in Intel GPU, which differentiate
1356 * among each other with the latency required to enter and leave RC6 and
1357 * voltage consumed by the GPU in different states.
1358 *
1359 * The combination of the following flags define which states GPU is allowed
1360 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
1361 * RC6pp is deepest RC6. Their support by hardware varies according to the
1362 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
1363 * which brings the most power savings; deeper states save more power, but
1364 * require higher latency to switch to and wake up.
1365 */
1366#define INTEL_RC6_ENABLE (1<<0)
1367#define INTEL_RC6p_ENABLE (1<<1)
1368#define INTEL_RC6pp_ENABLE (1<<2)
1369
c153f45f 1370extern struct drm_ioctl_desc i915_ioctls[];
b3a83639 1371extern int i915_max_ioctl;
a35d9d3c
BW
1372extern unsigned int i915_fbpercrtc __always_unused;
1373extern int i915_panel_ignore_lid __read_mostly;
1374extern unsigned int i915_powersave __read_mostly;
f45b5557 1375extern int i915_semaphores __read_mostly;
a35d9d3c 1376extern unsigned int i915_lvds_downclock __read_mostly;
121d527a 1377extern int i915_lvds_channel_mode __read_mostly;
4415e63b 1378extern int i915_panel_use_ssc __read_mostly;
a35d9d3c 1379extern int i915_vbt_sdvo_panel_type __read_mostly;
c0f372b3 1380extern int i915_enable_rc6 __read_mostly;
4415e63b 1381extern int i915_enable_fbc __read_mostly;
a35d9d3c 1382extern bool i915_enable_hangcheck __read_mostly;
650dc07e 1383extern int i915_enable_ppgtt __read_mostly;
0a3af268 1384extern unsigned int i915_preliminary_hw_support __read_mostly;
b3a83639 1385
6a9ee8af
DA
1386extern int i915_suspend(struct drm_device *dev, pm_message_t state);
1387extern int i915_resume(struct drm_device *dev);
7c1c2871
DA
1388extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
1389extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
1390
1da177e4 1391 /* i915_dma.c */
d05c617e 1392void i915_update_dri1_breadcrumb(struct drm_device *dev);
84b1fd10 1393extern void i915_kernel_lost_context(struct drm_device * dev);
22eae947 1394extern int i915_driver_load(struct drm_device *, unsigned long flags);
ba8bbcf6 1395extern int i915_driver_unload(struct drm_device *);
673a394b 1396extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
84b1fd10 1397extern void i915_driver_lastclose(struct drm_device * dev);
6c340eac
EA
1398extern void i915_driver_preclose(struct drm_device *dev,
1399 struct drm_file *file_priv);
673a394b
EA
1400extern void i915_driver_postclose(struct drm_device *dev,
1401 struct drm_file *file_priv);
84b1fd10 1402extern int i915_driver_device_is_agp(struct drm_device * dev);
c43b5634 1403#ifdef CONFIG_COMPAT
0d6aa60b
DA
1404extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
1405 unsigned long arg);
c43b5634 1406#endif
673a394b 1407extern int i915_emit_box(struct drm_device *dev,
c4e7a414
CW
1408 struct drm_clip_rect *box,
1409 int DR1, int DR4);
8e96d9c4 1410extern int intel_gpu_reset(struct drm_device *dev);
d4b8bb2a 1411extern int i915_reset(struct drm_device *dev);
7648fa99
JB
1412extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
1413extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
1414extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
1415extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
1416
073f34d9 1417extern void intel_console_resume(struct work_struct *work);
af6061af 1418
1da177e4 1419/* i915_irq.c */
f65d9421 1420void i915_hangcheck_elapsed(unsigned long data);
527f9e90 1421void i915_handle_error(struct drm_device *dev, bool wedged);
1da177e4 1422
f71d4af4 1423extern void intel_irq_init(struct drm_device *dev);
20afbda2 1424extern void intel_hpd_init(struct drm_device *dev);
990bbdad 1425extern void intel_gt_init(struct drm_device *dev);
16995a9f 1426extern void intel_gt_reset(struct drm_device *dev);
b1f14ad0 1427
742cbee8
DV
1428void i915_error_state_free(struct kref *error_ref);
1429
7c463586
KP
1430void
1431i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1432
1433void
1434i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1435
0206e353 1436void intel_enable_asle(struct drm_device *dev);
01c66889 1437
3bd3c932
CW
1438#ifdef CONFIG_DEBUG_FS
1439extern void i915_destroy_error_state(struct drm_device *dev);
1440#else
1441#define i915_destroy_error_state(x)
1442#endif
1443
7c463586 1444
673a394b
EA
1445/* i915_gem.c */
1446int i915_gem_init_ioctl(struct drm_device *dev, void *data,
1447 struct drm_file *file_priv);
1448int i915_gem_create_ioctl(struct drm_device *dev, void *data,
1449 struct drm_file *file_priv);
1450int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1451 struct drm_file *file_priv);
1452int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1453 struct drm_file *file_priv);
1454int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1455 struct drm_file *file_priv);
de151cf6
JB
1456int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1457 struct drm_file *file_priv);
673a394b
EA
1458int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1459 struct drm_file *file_priv);
1460int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1461 struct drm_file *file_priv);
1462int i915_gem_execbuffer(struct drm_device *dev, void *data,
1463 struct drm_file *file_priv);
76446cac
JB
1464int i915_gem_execbuffer2(struct drm_device *dev, void *data,
1465 struct drm_file *file_priv);
673a394b
EA
1466int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
1467 struct drm_file *file_priv);
1468int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
1469 struct drm_file *file_priv);
1470int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
1471 struct drm_file *file_priv);
199adf40
BW
1472int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
1473 struct drm_file *file);
1474int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
1475 struct drm_file *file);
673a394b
EA
1476int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
1477 struct drm_file *file_priv);
3ef94daa
CW
1478int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1479 struct drm_file *file_priv);
673a394b
EA
1480int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
1481 struct drm_file *file_priv);
1482int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
1483 struct drm_file *file_priv);
1484int i915_gem_set_tiling(struct drm_device *dev, void *data,
1485 struct drm_file *file_priv);
1486int i915_gem_get_tiling(struct drm_device *dev, void *data,
1487 struct drm_file *file_priv);
5a125c3c
EA
1488int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
1489 struct drm_file *file_priv);
23ba4fd0
BW
1490int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
1491 struct drm_file *file_priv);
673a394b 1492void i915_gem_load(struct drm_device *dev);
42dcedd4
CW
1493void *i915_gem_object_alloc(struct drm_device *dev);
1494void i915_gem_object_free(struct drm_i915_gem_object *obj);
673a394b 1495int i915_gem_init_object(struct drm_gem_object *obj);
37e680a1
CW
1496void i915_gem_object_init(struct drm_i915_gem_object *obj,
1497 const struct drm_i915_gem_object_ops *ops);
05394f39
CW
1498struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
1499 size_t size);
673a394b 1500void i915_gem_free_object(struct drm_gem_object *obj);
42dcedd4 1501
2021746e
CW
1502int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
1503 uint32_t alignment,
86a1ee26
CW
1504 bool map_and_fenceable,
1505 bool nonblocking);
05394f39 1506void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
2021746e 1507int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
dd624afd 1508int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
05394f39 1509void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
673a394b 1510void i915_gem_lastclose(struct drm_device *dev);
f787a5f5 1511
37e680a1 1512int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
9da3da66
CW
1513static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
1514{
1515 struct scatterlist *sg = obj->pages->sgl;
1cf83789
CW
1516 int nents = obj->pages->nents;
1517 while (nents > SG_MAX_SINGLE_ALLOC) {
1518 if (n < SG_MAX_SINGLE_ALLOC - 1)
1519 break;
1520
9da3da66
CW
1521 sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1);
1522 n -= SG_MAX_SINGLE_ALLOC - 1;
1cf83789 1523 nents -= SG_MAX_SINGLE_ALLOC - 1;
9da3da66
CW
1524 }
1525 return sg_page(sg+n);
1526}
a5570178
CW
1527static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
1528{
1529 BUG_ON(obj->pages == NULL);
1530 obj->pages_pin_count++;
1531}
1532static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
1533{
1534 BUG_ON(obj->pages_pin_count == 0);
1535 obj->pages_pin_count--;
1536}
1537
54cf91dc 1538int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
2911a35b
BW
1539int i915_gem_object_sync(struct drm_i915_gem_object *obj,
1540 struct intel_ring_buffer *to);
54cf91dc 1541void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
9d773091 1542 struct intel_ring_buffer *ring);
54cf91dc 1543
ff72145b
DA
1544int i915_gem_dumb_create(struct drm_file *file_priv,
1545 struct drm_device *dev,
1546 struct drm_mode_create_dumb *args);
1547int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
1548 uint32_t handle, uint64_t *offset);
1549int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
0206e353 1550 uint32_t handle);
f787a5f5
CW
1551/**
1552 * Returns true if seq1 is later than seq2.
1553 */
1554static inline bool
1555i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1556{
1557 return (int32_t)(seq1 - seq2) >= 0;
1558}
1559
fca26bb4
MK
1560int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
1561int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
06d98131 1562int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
d9e86c0e 1563int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
2021746e 1564
9a5a53b3 1565static inline bool
1690e1eb
CW
1566i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
1567{
1568 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1569 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1570 dev_priv->fence_regs[obj->fence_reg].pin_count++;
9a5a53b3
CW
1571 return true;
1572 } else
1573 return false;
1690e1eb
CW
1574}
1575
1576static inline void
1577i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
1578{
1579 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1580 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1581 dev_priv->fence_regs[obj->fence_reg].pin_count--;
1582 }
1583}
1584
b09a1fec 1585void i915_gem_retire_requests(struct drm_device *dev);
a71d8d94 1586void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
33196ded 1587int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
d6b2c790 1588 bool interruptible);
1f83fee0
DV
1589static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
1590{
1591 return unlikely(atomic_read(&error->reset_counter)
1592 & I915_RESET_IN_PROGRESS_FLAG);
1593}
1594
1595static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
1596{
1597 return atomic_read(&error->reset_counter) == I915_WEDGED;
1598}
a71d8d94 1599
069efc1d 1600void i915_gem_reset(struct drm_device *dev);
05394f39 1601void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
2021746e
CW
1602int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
1603 uint32_t read_domains,
1604 uint32_t write_domain);
a8198eea 1605int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
1070a42b 1606int __must_check i915_gem_init(struct drm_device *dev);
f691e2f4 1607int __must_check i915_gem_init_hw(struct drm_device *dev);
b9524a1e 1608void i915_gem_l3_remap(struct drm_device *dev);
f691e2f4 1609void i915_gem_init_swizzling(struct drm_device *dev);
e21af88d 1610void i915_gem_init_ppgtt(struct drm_device *dev);
79e53945 1611void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
b2da9fe5 1612int __must_check i915_gpu_idle(struct drm_device *dev);
2021746e 1613int __must_check i915_gem_idle(struct drm_device *dev);
3bb73aba
CW
1614int i915_add_request(struct intel_ring_buffer *ring,
1615 struct drm_file *file,
acb868d3 1616 u32 *seqno);
199b2bc2
BW
1617int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
1618 uint32_t seqno);
de151cf6 1619int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
2021746e
CW
1620int __must_check
1621i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
1622 bool write);
1623int __must_check
dabdfe02
CW
1624i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
1625int __must_check
2da3b9b9
CW
1626i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
1627 u32 alignment,
2021746e 1628 struct intel_ring_buffer *pipelined);
71acb5eb 1629int i915_gem_attach_phys_object(struct drm_device *dev,
05394f39 1630 struct drm_i915_gem_object *obj,
6eeefaf3
CW
1631 int id,
1632 int align);
71acb5eb 1633void i915_gem_detach_phys_object(struct drm_device *dev,
05394f39 1634 struct drm_i915_gem_object *obj);
71acb5eb 1635void i915_gem_free_all_phys_object(struct drm_device *dev);
05394f39 1636void i915_gem_release(struct drm_device *dev, struct drm_file *file);
673a394b 1637
0fa87796
ID
1638uint32_t
1639i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
467cffba 1640uint32_t
d865110c
ID
1641i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1642 int tiling_mode, bool fenced);
467cffba 1643
e4ffd173
CW
1644int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
1645 enum i915_cache_level cache_level);
1646
1286ff73
DV
1647struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
1648 struct dma_buf *dma_buf);
1649
1650struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
1651 struct drm_gem_object *gem_obj, int flags);
1652
254f965c
BW
1653/* i915_gem_context.c */
1654void i915_gem_context_init(struct drm_device *dev);
1655void i915_gem_context_fini(struct drm_device *dev);
254f965c 1656void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
e0556841
BW
1657int i915_switch_context(struct intel_ring_buffer *ring,
1658 struct drm_file *file, int to_id);
84624813
BW
1659int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
1660 struct drm_file *file);
1661int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
1662 struct drm_file *file);
1286ff73 1663
76aaf220 1664/* i915_gem_gtt.c */
1d2a314c
DV
1665int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
1666void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
7bddb01f
DV
1667void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
1668 struct drm_i915_gem_object *obj,
1669 enum i915_cache_level cache_level);
1670void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
1671 struct drm_i915_gem_object *obj);
1d2a314c 1672
76aaf220 1673void i915_gem_restore_gtt_mappings(struct drm_device *dev);
74163907
DV
1674int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
1675void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
e4ffd173 1676 enum i915_cache_level cache_level);
05394f39 1677void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
74163907 1678void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
d7e5008f
BW
1679void i915_gem_init_global_gtt(struct drm_device *dev);
1680void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
1681 unsigned long mappable_end, unsigned long end);
e76e9aeb
BW
1682int i915_gem_gtt_init(struct drm_device *dev);
1683void i915_gem_gtt_fini(struct drm_device *dev);
d09105c6 1684static inline void i915_gem_chipset_flush(struct drm_device *dev)
e76e9aeb
BW
1685{
1686 if (INTEL_INFO(dev)->gen < 6)
1687 intel_gtt_chipset_flush();
1688}
1689
76aaf220 1690
b47eb4a2 1691/* i915_gem_evict.c */
2021746e 1692int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
42d6ab48
CW
1693 unsigned alignment,
1694 unsigned cache_level,
86a1ee26
CW
1695 bool mappable,
1696 bool nonblock);
6c085a72 1697int i915_gem_evict_everything(struct drm_device *dev);
b47eb4a2 1698
9797fbfb
CW
1699/* i915_gem_stolen.c */
1700int i915_gem_init_stolen(struct drm_device *dev);
11be49eb
CW
1701int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
1702void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
9797fbfb 1703void i915_gem_cleanup_stolen(struct drm_device *dev);
0104fdbb
CW
1704struct drm_i915_gem_object *
1705i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
1706void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
9797fbfb 1707
673a394b 1708/* i915_gem_tiling.c */
e9b73c67
CW
1709inline static bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
1710{
1711 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
1712
1713 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
1714 obj->tiling_mode != I915_TILING_NONE;
1715}
1716
673a394b 1717void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
05394f39
CW
1718void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
1719void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
673a394b
EA
1720
1721/* i915_gem_debug.c */
05394f39 1722void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
673a394b 1723 const char *where, uint32_t mark);
23bc5982
CW
1724#if WATCH_LISTS
1725int i915_verify_lists(struct drm_device *dev);
673a394b 1726#else
23bc5982 1727#define i915_verify_lists(dev) 0
673a394b 1728#endif
05394f39
CW
1729void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj,
1730 int handle);
1731void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
673a394b 1732 const char *where, uint32_t mark);
1da177e4 1733
2017263e 1734/* i915_debugfs.c */
27c202ad
BG
1735int i915_debugfs_init(struct drm_minor *minor);
1736void i915_debugfs_cleanup(struct drm_minor *minor);
2017263e 1737
317c35d1
JB
1738/* i915_suspend.c */
1739extern int i915_save_state(struct drm_device *dev);
1740extern int i915_restore_state(struct drm_device *dev);
0a3e67a4 1741
d8157a36
DV
1742/* i915_ums.c */
1743void i915_save_display_reg(struct drm_device *dev);
1744void i915_restore_display_reg(struct drm_device *dev);
317c35d1 1745
0136db58
BW
1746/* i915_sysfs.c */
1747void i915_setup_sysfs(struct drm_device *dev_priv);
1748void i915_teardown_sysfs(struct drm_device *dev_priv);
1749
f899fc64
CW
1750/* intel_i2c.c */
1751extern int intel_setup_gmbus(struct drm_device *dev);
1752extern void intel_teardown_gmbus(struct drm_device *dev);
3bd7d909
DK
1753extern inline bool intel_gmbus_is_port_valid(unsigned port)
1754{
2ed06c93 1755 return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
3bd7d909
DK
1756}
1757
1758extern struct i2c_adapter *intel_gmbus_get_adapter(
1759 struct drm_i915_private *dev_priv, unsigned port);
e957d772
CW
1760extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
1761extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
b8232e90
CW
1762extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
1763{
1764 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
1765}
f899fc64
CW
1766extern void intel_i2c_reset(struct drm_device *dev);
1767
3b617967 1768/* intel_opregion.c */
44834a67
CW
1769extern int intel_opregion_setup(struct drm_device *dev);
1770#ifdef CONFIG_ACPI
1771extern void intel_opregion_init(struct drm_device *dev);
1772extern void intel_opregion_fini(struct drm_device *dev);
3b617967
CW
1773extern void intel_opregion_asle_intr(struct drm_device *dev);
1774extern void intel_opregion_gse_intr(struct drm_device *dev);
1775extern void intel_opregion_enable_asle(struct drm_device *dev);
65e082c9 1776#else
44834a67
CW
1777static inline void intel_opregion_init(struct drm_device *dev) { return; }
1778static inline void intel_opregion_fini(struct drm_device *dev) { return; }
3b617967
CW
1779static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
1780static inline void intel_opregion_gse_intr(struct drm_device *dev) { return; }
1781static inline void intel_opregion_enable_asle(struct drm_device *dev) { return; }
65e082c9 1782#endif
8ee1c3db 1783
723bfd70
JB
1784/* intel_acpi.c */
1785#ifdef CONFIG_ACPI
1786extern void intel_register_dsm_handler(void);
1787extern void intel_unregister_dsm_handler(void);
1788#else
1789static inline void intel_register_dsm_handler(void) { return; }
1790static inline void intel_unregister_dsm_handler(void) { return; }
1791#endif /* CONFIG_ACPI */
1792
79e53945 1793/* modesetting */
f817586c 1794extern void intel_modeset_init_hw(struct drm_device *dev);
79e53945 1795extern void intel_modeset_init(struct drm_device *dev);
2c7111db 1796extern void intel_modeset_gem_init(struct drm_device *dev);
79e53945 1797extern void intel_modeset_cleanup(struct drm_device *dev);
28d52043 1798extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
45e2b5f6
DV
1799extern void intel_modeset_setup_hw_state(struct drm_device *dev,
1800 bool force_restore);
44cec740 1801extern void i915_redisable_vga(struct drm_device *dev);
ee5382ae 1802extern bool intel_fbc_enabled(struct drm_device *dev);
43a9539f 1803extern void intel_disable_fbc(struct drm_device *dev);
7648fa99 1804extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
dde86e2d 1805extern void intel_init_pch_refclk(struct drm_device *dev);
3b8d8d91 1806extern void gen6_set_rps(struct drm_device *dev, u8 val);
0206e353
AJ
1807extern void intel_detect_pch(struct drm_device *dev);
1808extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
0136db58 1809extern int intel_enable_rc6(const struct drm_device *dev);
3bad0781 1810
2911a35b 1811extern bool i915_semaphore_is_enabled(struct drm_device *dev);
c0c7babc
BW
1812int i915_reg_read_ioctl(struct drm_device *dev, void *data,
1813 struct drm_file *file);
575155a9 1814
6ef3d427 1815/* overlay */
3bd3c932 1816#ifdef CONFIG_DEBUG_FS
6ef3d427
CW
1817extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
1818extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error);
c4a1d9e4
CW
1819
1820extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
1821extern void intel_display_print_error_state(struct seq_file *m,
1822 struct drm_device *dev,
1823 struct intel_display_error_state *error);
3bd3c932 1824#endif
6ef3d427 1825
b7287d80
BW
1826/* On SNB platform, before reading ring registers forcewake bit
1827 * must be set to prevent GT core from power down and stale values being
1828 * returned.
1829 */
fcca7926
BW
1830void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
1831void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
67a3744f 1832int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
b7287d80 1833
42c0526c
BW
1834int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
1835int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
1836
5f75377d 1837#define __i915_read(x, y) \
f7000883 1838 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
fcca7926 1839
5f75377d
KP
1840__i915_read(8, b)
1841__i915_read(16, w)
1842__i915_read(32, l)
1843__i915_read(64, q)
1844#undef __i915_read
1845
1846#define __i915_write(x, y) \
f7000883
AK
1847 void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val);
1848
5f75377d
KP
1849__i915_write(8, b)
1850__i915_write(16, w)
1851__i915_write(32, l)
1852__i915_write(64, q)
1853#undef __i915_write
1854
1855#define I915_READ8(reg) i915_read8(dev_priv, (reg))
1856#define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val))
1857
1858#define I915_READ16(reg) i915_read16(dev_priv, (reg))
1859#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val))
1860#define I915_READ16_NOTRACE(reg) readw(dev_priv->regs + (reg))
1861#define I915_WRITE16_NOTRACE(reg, val) writew(val, dev_priv->regs + (reg))
1862
1863#define I915_READ(reg) i915_read32(dev_priv, (reg))
1864#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val))
cae5852d
ZN
1865#define I915_READ_NOTRACE(reg) readl(dev_priv->regs + (reg))
1866#define I915_WRITE_NOTRACE(reg, val) writel(val, dev_priv->regs + (reg))
5f75377d
KP
1867
1868#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val))
1869#define I915_READ64(reg) i915_read64(dev_priv, (reg))
cae5852d
ZN
1870
1871#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
1872#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
1873
55bc60db
VS
1874/* "Broadcast RGB" property */
1875#define INTEL_BROADCAST_RGB_AUTO 0
1876#define INTEL_BROADCAST_RGB_FULL 1
1877#define INTEL_BROADCAST_RGB_LIMITED 2
ba4f01a3 1878
1da177e4 1879#endif