]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/i915/i915_drv.h
drm/i915: Restore nonblocking awaits for modesetting
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / i915_drv.h
CommitLineData
1da177e4
LT
1/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
2 */
0d6aa60b 3/*
bc54fd1a 4 *
1da177e4
LT
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
bc54fd1a
DA
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
0d6aa60b 28 */
1da177e4
LT
29
30#ifndef _I915_DRV_H_
31#define _I915_DRV_H_
32
e9b73c67 33#include <uapi/drm/i915_drm.h>
93b81f51 34#include <uapi/drm/drm_fourcc.h>
e9b73c67 35
0839ccb8 36#include <linux/io-mapping.h>
f899fc64 37#include <linux/i2c.h>
c167a6fc 38#include <linux/i2c-algo-bit.h>
aaa6fd2a 39#include <linux/backlight.h>
5cc9ed4b 40#include <linux/hashtable.h>
2911a35b 41#include <linux/intel-iommu.h>
742cbee8 42#include <linux/kref.h>
9ee32fea 43#include <linux/pm_qos.h>
d07f0e59 44#include <linux/reservation.h>
e73bdd20
CW
45#include <linux/shmem_fs.h>
46
47#include <drm/drmP.h>
48#include <drm/intel-gtt.h>
49#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
50#include <drm/drm_gem.h>
3b96a0b1 51#include <drm/drm_auth.h>
e73bdd20
CW
52
53#include "i915_params.h"
54#include "i915_reg.h"
55
56#include "intel_bios.h"
ac7f11c6 57#include "intel_dpll_mgr.h"
e73bdd20
CW
58#include "intel_guc.h"
59#include "intel_lrc.h"
60#include "intel_ringbuffer.h"
61
d501b1d2 62#include "i915_gem.h"
e73bdd20
CW
63#include "i915_gem_gtt.h"
64#include "i915_gem_render_state.h"
05235c53 65#include "i915_gem_request.h"
585fb111 66
0ad35fed
ZW
67#include "intel_gvt.h"
68
1da177e4
LT
69/* General customization:
70 */
71
1da177e4
LT
72#define DRIVER_NAME "i915"
73#define DRIVER_DESC "Intel Graphics"
9558e74c
DV
74#define DRIVER_DATE "20161024"
75#define DRIVER_TIMESTAMP 1477290335
1da177e4 76
c883ef1b 77#undef WARN_ON
5f77eeb0
DV
78/* Many gcc seem to no see through this and fall over :( */
79#if 0
80#define WARN_ON(x) ({ \
81 bool __i915_warn_cond = (x); \
82 if (__builtin_constant_p(__i915_warn_cond)) \
83 BUILD_BUG_ON(__i915_warn_cond); \
84 WARN(__i915_warn_cond, "WARN_ON(" #x ")"); })
85#else
152b2262 86#define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
5f77eeb0
DV
87#endif
88
cd9bfacb 89#undef WARN_ON_ONCE
152b2262 90#define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")")
cd9bfacb 91
5f77eeb0
DV
92#define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
93 (long) (x), __func__);
c883ef1b 94
e2c719b7
RC
95/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
96 * WARN_ON()) for hw state sanity checks to check for unexpected conditions
97 * which may not necessarily be a user visible problem. This will either
98 * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to
99 * enable distros and users to tailor their preferred amount of i915 abrt
100 * spam.
101 */
102#define I915_STATE_WARN(condition, format...) ({ \
103 int __ret_warn_on = !!(condition); \
32753cb8
JL
104 if (unlikely(__ret_warn_on)) \
105 if (!WARN(i915.verbose_state_checks, format)) \
e2c719b7 106 DRM_ERROR(format); \
e2c719b7
RC
107 unlikely(__ret_warn_on); \
108})
109
152b2262
JL
110#define I915_STATE_WARN_ON(x) \
111 I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
c883ef1b 112
4fec15d1
ID
113bool __i915_inject_load_failure(const char *func, int line);
114#define i915_inject_load_failure() \
115 __i915_inject_load_failure(__func__, __LINE__)
116
42a8ca4c
JN
117static inline const char *yesno(bool v)
118{
119 return v ? "yes" : "no";
120}
121
87ad3212
JN
122static inline const char *onoff(bool v)
123{
124 return v ? "on" : "off";
125}
126
317c35d1 127enum pipe {
752aa88a 128 INVALID_PIPE = -1,
317c35d1
JB
129 PIPE_A = 0,
130 PIPE_B,
9db4a9c7 131 PIPE_C,
a57c774a
AK
132 _PIPE_EDP,
133 I915_MAX_PIPES = _PIPE_EDP
317c35d1 134};
9db4a9c7 135#define pipe_name(p) ((p) + 'A')
317c35d1 136
a5c961d1
PZ
137enum transcoder {
138 TRANSCODER_A = 0,
139 TRANSCODER_B,
140 TRANSCODER_C,
a57c774a 141 TRANSCODER_EDP,
4d1de975
JN
142 TRANSCODER_DSI_A,
143 TRANSCODER_DSI_C,
a57c774a 144 I915_MAX_TRANSCODERS
a5c961d1 145};
da205630
JN
146
147static inline const char *transcoder_name(enum transcoder transcoder)
148{
149 switch (transcoder) {
150 case TRANSCODER_A:
151 return "A";
152 case TRANSCODER_B:
153 return "B";
154 case TRANSCODER_C:
155 return "C";
156 case TRANSCODER_EDP:
157 return "EDP";
4d1de975
JN
158 case TRANSCODER_DSI_A:
159 return "DSI A";
160 case TRANSCODER_DSI_C:
161 return "DSI C";
da205630
JN
162 default:
163 return "<invalid>";
164 }
165}
a5c961d1 166
4d1de975
JN
167static inline bool transcoder_is_dsi(enum transcoder transcoder)
168{
169 return transcoder == TRANSCODER_DSI_A || transcoder == TRANSCODER_DSI_C;
170}
171
84139d1e 172/*
31409e97
MR
173 * I915_MAX_PLANES in the enum below is the maximum (across all platforms)
174 * number of planes per CRTC. Not all platforms really have this many planes,
175 * which means some arrays of size I915_MAX_PLANES may have unused entries
176 * between the topmost sprite plane and the cursor plane.
84139d1e 177 */
80824003
JB
178enum plane {
179 PLANE_A = 0,
180 PLANE_B,
9db4a9c7 181 PLANE_C,
31409e97
MR
182 PLANE_CURSOR,
183 I915_MAX_PLANES,
80824003 184};
9db4a9c7 185#define plane_name(p) ((p) + 'A')
52440211 186
d615a166 187#define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A')
06da8da2 188
2b139522 189enum port {
03cdc1d4 190 PORT_NONE = -1,
2b139522
ED
191 PORT_A = 0,
192 PORT_B,
193 PORT_C,
194 PORT_D,
195 PORT_E,
196 I915_MAX_PORTS
197};
198#define port_name(p) ((p) + 'A')
199
a09caddd 200#define I915_NUM_PHYS_VLV 2
e4607fcf
CML
201
202enum dpio_channel {
203 DPIO_CH0,
204 DPIO_CH1
205};
206
207enum dpio_phy {
208 DPIO_PHY0,
209 DPIO_PHY1
210};
211
b97186f0
PZ
212enum intel_display_power_domain {
213 POWER_DOMAIN_PIPE_A,
214 POWER_DOMAIN_PIPE_B,
215 POWER_DOMAIN_PIPE_C,
216 POWER_DOMAIN_PIPE_A_PANEL_FITTER,
217 POWER_DOMAIN_PIPE_B_PANEL_FITTER,
218 POWER_DOMAIN_PIPE_C_PANEL_FITTER,
219 POWER_DOMAIN_TRANSCODER_A,
220 POWER_DOMAIN_TRANSCODER_B,
221 POWER_DOMAIN_TRANSCODER_C,
f52e353e 222 POWER_DOMAIN_TRANSCODER_EDP,
4d1de975
JN
223 POWER_DOMAIN_TRANSCODER_DSI_A,
224 POWER_DOMAIN_TRANSCODER_DSI_C,
6331a704
PJ
225 POWER_DOMAIN_PORT_DDI_A_LANES,
226 POWER_DOMAIN_PORT_DDI_B_LANES,
227 POWER_DOMAIN_PORT_DDI_C_LANES,
228 POWER_DOMAIN_PORT_DDI_D_LANES,
229 POWER_DOMAIN_PORT_DDI_E_LANES,
319be8ae
ID
230 POWER_DOMAIN_PORT_DSI,
231 POWER_DOMAIN_PORT_CRT,
232 POWER_DOMAIN_PORT_OTHER,
cdf8dd7f 233 POWER_DOMAIN_VGA,
fbeeaa23 234 POWER_DOMAIN_AUDIO,
bd2bb1b9 235 POWER_DOMAIN_PLLS,
1407121a
S
236 POWER_DOMAIN_AUX_A,
237 POWER_DOMAIN_AUX_B,
238 POWER_DOMAIN_AUX_C,
239 POWER_DOMAIN_AUX_D,
f0ab43e6 240 POWER_DOMAIN_GMBUS,
dfa57627 241 POWER_DOMAIN_MODESET,
baa70707 242 POWER_DOMAIN_INIT,
bddc7645
ID
243
244 POWER_DOMAIN_NUM,
b97186f0
PZ
245};
246
247#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
248#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
249 ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
f52e353e
ID
250#define POWER_DOMAIN_TRANSCODER(tran) \
251 ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
252 (tran) + POWER_DOMAIN_TRANSCODER_A)
b97186f0 253
1d843f9d
EE
254enum hpd_pin {
255 HPD_NONE = 0,
1d843f9d
EE
256 HPD_TV = HPD_NONE, /* TV is known to be unreliable */
257 HPD_CRT,
258 HPD_SDVO_B,
259 HPD_SDVO_C,
cc24fcdc 260 HPD_PORT_A,
1d843f9d
EE
261 HPD_PORT_B,
262 HPD_PORT_C,
263 HPD_PORT_D,
26951caf 264 HPD_PORT_E,
1d843f9d
EE
265 HPD_NUM_PINS
266};
267
c91711f9
JN
268#define for_each_hpd_pin(__pin) \
269 for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
270
5fcece80
JN
271struct i915_hotplug {
272 struct work_struct hotplug_work;
273
274 struct {
275 unsigned long last_jiffies;
276 int count;
277 enum {
278 HPD_ENABLED = 0,
279 HPD_DISABLED = 1,
280 HPD_MARK_DISABLED = 2
281 } state;
282 } stats[HPD_NUM_PINS];
283 u32 event_bits;
284 struct delayed_work reenable_work;
285
286 struct intel_digital_port *irq_port[I915_MAX_PORTS];
287 u32 long_port_mask;
288 u32 short_port_mask;
289 struct work_struct dig_port_work;
290
19625e85
L
291 struct work_struct poll_init_work;
292 bool poll_enabled;
293
5fcece80
JN
294 /*
295 * if we get a HPD irq from DP and a HPD irq from non-DP
296 * the non-DP HPD could block the workqueue on a mode config
297 * mutex getting, that userspace may have taken. However
298 * userspace is waiting on the DP workqueue to run which is
299 * blocked behind the non-DP one.
300 */
301 struct workqueue_struct *dp_wq;
302};
303
2a2d5482
CW
304#define I915_GEM_GPU_DOMAINS \
305 (I915_GEM_DOMAIN_RENDER | \
306 I915_GEM_DOMAIN_SAMPLER | \
307 I915_GEM_DOMAIN_COMMAND | \
308 I915_GEM_DOMAIN_INSTRUCTION | \
309 I915_GEM_DOMAIN_VERTEX)
62fdfeaf 310
055e393f
DL
311#define for_each_pipe(__dev_priv, __p) \
312 for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
6831f3e3
VS
313#define for_each_pipe_masked(__dev_priv, __p, __mask) \
314 for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \
315 for_each_if ((__mask) & (1 << (__p)))
8b364b41 316#define for_each_universal_plane(__dev_priv, __pipe, __p) \
dd740780
DL
317 for ((__p) = 0; \
318 (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
319 (__p)++)
3bdcfc0c
DL
320#define for_each_sprite(__dev_priv, __p, __s) \
321 for ((__s) = 0; \
322 (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \
323 (__s)++)
9db4a9c7 324
c3aeadc8
JN
325#define for_each_port_masked(__port, __ports_mask) \
326 for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
327 for_each_if ((__ports_mask) & (1 << (__port)))
328
d79b814d 329#define for_each_crtc(dev, crtc) \
91c8a326 330 list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
d79b814d 331
27321ae8
ML
332#define for_each_intel_plane(dev, intel_plane) \
333 list_for_each_entry(intel_plane, \
91c8a326 334 &(dev)->mode_config.plane_list, \
27321ae8
ML
335 base.head)
336
c107acfe 337#define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \
91c8a326
CW
338 list_for_each_entry(intel_plane, \
339 &(dev)->mode_config.plane_list, \
c107acfe
MR
340 base.head) \
341 for_each_if ((plane_mask) & \
342 (1 << drm_plane_index(&intel_plane->base)))
343
262cd2e1
VS
344#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \
345 list_for_each_entry(intel_plane, \
346 &(dev)->mode_config.plane_list, \
347 base.head) \
95150bdf 348 for_each_if ((intel_plane)->pipe == (intel_crtc)->pipe)
262cd2e1 349
91c8a326
CW
350#define for_each_intel_crtc(dev, intel_crtc) \
351 list_for_each_entry(intel_crtc, \
352 &(dev)->mode_config.crtc_list, \
353 base.head)
d063ae48 354
91c8a326
CW
355#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \
356 list_for_each_entry(intel_crtc, \
357 &(dev)->mode_config.crtc_list, \
358 base.head) \
98d39494
MR
359 for_each_if ((crtc_mask) & (1 << drm_crtc_index(&intel_crtc->base)))
360
b2784e15
DL
361#define for_each_intel_encoder(dev, intel_encoder) \
362 list_for_each_entry(intel_encoder, \
363 &(dev)->mode_config.encoder_list, \
364 base.head)
365
3a3371ff
ACO
366#define for_each_intel_connector(dev, intel_connector) \
367 list_for_each_entry(intel_connector, \
91c8a326 368 &(dev)->mode_config.connector_list, \
3a3371ff
ACO
369 base.head)
370
6c2b7c12
DV
371#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
372 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
95150bdf 373 for_each_if ((intel_encoder)->base.crtc == (__crtc))
6c2b7c12 374
53f5e3ca
JB
375#define for_each_connector_on_encoder(dev, __encoder, intel_connector) \
376 list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
95150bdf 377 for_each_if ((intel_connector)->base.encoder == (__encoder))
53f5e3ca 378
b04c5bd6
BF
379#define for_each_power_domain(domain, mask) \
380 for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
95150bdf 381 for_each_if ((1 << (domain)) & (mask))
b04c5bd6 382
e7b903d2 383struct drm_i915_private;
ad46cb53 384struct i915_mm_struct;
5cc9ed4b 385struct i915_mmu_object;
e7b903d2 386
a6f766f3
CW
387struct drm_i915_file_private {
388 struct drm_i915_private *dev_priv;
389 struct drm_file *file;
390
391 struct {
392 spinlock_t lock;
393 struct list_head request_list;
d0bc54f2
CW
394/* 20ms is a fairly arbitrary limit (greater than the average frame time)
395 * chosen to prevent the CPU getting more than a frame ahead of the GPU
396 * (when using lax throttling for the frontbuffer). We also use it to
397 * offer free GPU waitboosts for severely congested workloads.
398 */
399#define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20)
a6f766f3
CW
400 } mm;
401 struct idr context_idr;
402
2e1b8730
CW
403 struct intel_rps_client {
404 struct list_head link;
405 unsigned boosts;
406 } rps;
a6f766f3 407
c80ff16e 408 unsigned int bsd_engine;
a6f766f3
CW
409};
410
e69d0bc1
DV
411/* Used by dp and fdi links */
412struct intel_link_m_n {
413 uint32_t tu;
414 uint32_t gmch_m;
415 uint32_t gmch_n;
416 uint32_t link_m;
417 uint32_t link_n;
418};
419
420void intel_link_compute_m_n(int bpp, int nlanes,
421 int pixel_clock, int link_clock,
422 struct intel_link_m_n *m_n);
423
1da177e4
LT
424/* Interface history:
425 *
426 * 1.1: Original.
0d6aa60b
DA
427 * 1.2: Add Power Management
428 * 1.3: Add vblank support
de227f5f 429 * 1.4: Fix cmdbuffer path, add heap destroy
702880f2 430 * 1.5: Add vblank pipe configuration
2228ed67
MD
431 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
432 * - Support vertical blank on secondary display pipe
1da177e4
LT
433 */
434#define DRIVER_MAJOR 1
2228ed67 435#define DRIVER_MINOR 6
1da177e4
LT
436#define DRIVER_PATCHLEVEL 0
437
0a3e67a4
JB
438struct opregion_header;
439struct opregion_acpi;
440struct opregion_swsci;
441struct opregion_asle;
442
8ee1c3db 443struct intel_opregion {
115719fc
WD
444 struct opregion_header *header;
445 struct opregion_acpi *acpi;
446 struct opregion_swsci *swsci;
ebde53c7
JN
447 u32 swsci_gbda_sub_functions;
448 u32 swsci_sbcb_sub_functions;
115719fc 449 struct opregion_asle *asle;
04ebaadb 450 void *rvda;
82730385 451 const void *vbt;
ada8f955 452 u32 vbt_size;
115719fc 453 u32 *lid_state;
91a60f20 454 struct work_struct asle_work;
8ee1c3db 455};
44834a67 456#define OPREGION_SIZE (8*1024)
8ee1c3db 457
6ef3d427
CW
458struct intel_overlay;
459struct intel_overlay_error_state;
460
de151cf6 461struct drm_i915_fence_reg {
a1e5afbe 462 struct list_head link;
49ef5294
CW
463 struct drm_i915_private *i915;
464 struct i915_vma *vma;
1690e1eb 465 int pin_count;
49ef5294
CW
466 int id;
467 /**
468 * Whether the tiling parameters for the currently
469 * associated fence register have changed. Note that
470 * for the purposes of tracking tiling changes we also
471 * treat the unfenced register, the register slot that
472 * the object occupies whilst it executes a fenced
473 * command (such as BLT on gen2/3), as a "fence".
474 */
475 bool dirty;
de151cf6 476};
7c1c2871 477
9b9d172d 478struct sdvo_device_mapping {
e957d772 479 u8 initialized;
9b9d172d 480 u8 dvo_port;
481 u8 slave_addr;
482 u8 dvo_wiring;
e957d772 483 u8 i2c_pin;
b1083333 484 u8 ddc_pin;
9b9d172d 485};
486
7bd688cd 487struct intel_connector;
820d2d77 488struct intel_encoder;
5cec258b 489struct intel_crtc_state;
5724dbd1 490struct intel_initial_plane_config;
0e8ffe1b 491struct intel_crtc;
ee9300bb
DV
492struct intel_limit;
493struct dpll;
b8cecdf5 494
e70236a8 495struct drm_i915_display_funcs {
e70236a8
JB
496 int (*get_display_clock_speed)(struct drm_device *dev);
497 int (*get_fifo_size)(struct drm_device *dev, int plane);
e3bddded 498 int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
ed4a6a7c
MR
499 int (*compute_intermediate_wm)(struct drm_device *dev,
500 struct intel_crtc *intel_crtc,
501 struct intel_crtc_state *newstate);
502 void (*initial_watermarks)(struct intel_crtc_state *cstate);
503 void (*optimize_watermarks)(struct intel_crtc_state *cstate);
98d39494 504 int (*compute_global_watermarks)(struct drm_atomic_state *state);
46ba614c 505 void (*update_wm)(struct drm_crtc *crtc);
27c329ed
ML
506 int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
507 void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
0e8ffe1b
DV
508 /* Returns the active state of the crtc, and if the crtc is active,
509 * fills out the pipe-config with the hw state. */
510 bool (*get_pipe_config)(struct intel_crtc *,
5cec258b 511 struct intel_crtc_state *);
5724dbd1
DL
512 void (*get_initial_plane_config)(struct intel_crtc *,
513 struct intel_initial_plane_config *);
190f68c5
ACO
514 int (*crtc_compute_clock)(struct intel_crtc *crtc,
515 struct intel_crtc_state *crtc_state);
4a806558
ML
516 void (*crtc_enable)(struct intel_crtc_state *pipe_config,
517 struct drm_atomic_state *old_state);
518 void (*crtc_disable)(struct intel_crtc_state *old_crtc_state,
519 struct drm_atomic_state *old_state);
896e5bb0
L
520 void (*update_crtcs)(struct drm_atomic_state *state,
521 unsigned int *crtc_vblank_mask);
69bfe1a9
JN
522 void (*audio_codec_enable)(struct drm_connector *connector,
523 struct intel_encoder *encoder,
5e7234c9 524 const struct drm_display_mode *adjusted_mode);
69bfe1a9 525 void (*audio_codec_disable)(struct intel_encoder *encoder);
674cf967 526 void (*fdi_link_train)(struct drm_crtc *crtc);
6067aaea 527 void (*init_clock_gating)(struct drm_device *dev);
5a21b665
DV
528 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
529 struct drm_framebuffer *fb,
530 struct drm_i915_gem_object *obj,
531 struct drm_i915_gem_request *req,
532 uint32_t flags);
91d14251 533 void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
e70236a8
JB
534 /* clock updates for mode set */
535 /* cursor updates */
536 /* render clock increase/decrease */
537 /* display clock increase/decrease */
538 /* pll clock increase/decrease */
8563b1e8 539
b95c5321
ML
540 void (*load_csc_matrix)(struct drm_crtc_state *crtc_state);
541 void (*load_luts)(struct drm_crtc_state *crtc_state);
e70236a8
JB
542};
543
48c1026a
MK
544enum forcewake_domain_id {
545 FW_DOMAIN_ID_RENDER = 0,
546 FW_DOMAIN_ID_BLITTER,
547 FW_DOMAIN_ID_MEDIA,
548
549 FW_DOMAIN_ID_COUNT
550};
551
552enum forcewake_domains {
553 FORCEWAKE_RENDER = (1 << FW_DOMAIN_ID_RENDER),
554 FORCEWAKE_BLITTER = (1 << FW_DOMAIN_ID_BLITTER),
555 FORCEWAKE_MEDIA = (1 << FW_DOMAIN_ID_MEDIA),
556 FORCEWAKE_ALL = (FORCEWAKE_RENDER |
557 FORCEWAKE_BLITTER |
558 FORCEWAKE_MEDIA)
559};
560
3756685a
TU
561#define FW_REG_READ (1)
562#define FW_REG_WRITE (2)
563
564enum forcewake_domains
565intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
566 i915_reg_t reg, unsigned int op);
567
907b28c5 568struct intel_uncore_funcs {
c8d9a590 569 void (*force_wake_get)(struct drm_i915_private *dev_priv,
48c1026a 570 enum forcewake_domains domains);
c8d9a590 571 void (*force_wake_put)(struct drm_i915_private *dev_priv,
48c1026a 572 enum forcewake_domains domains);
0b274481 573
f0f59a00
VS
574 uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
575 uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
576 uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
577 uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
0b274481 578
f0f59a00 579 void (*mmio_writeb)(struct drm_i915_private *dev_priv, i915_reg_t r,
0b274481 580 uint8_t val, bool trace);
f0f59a00 581 void (*mmio_writew)(struct drm_i915_private *dev_priv, i915_reg_t r,
0b274481 582 uint16_t val, bool trace);
f0f59a00 583 void (*mmio_writel)(struct drm_i915_private *dev_priv, i915_reg_t r,
0b274481 584 uint32_t val, bool trace);
990bbdad
CW
585};
586
15157970
TU
587struct intel_forcewake_range {
588 u32 start;
589 u32 end;
590
591 enum forcewake_domains domains;
592};
593
907b28c5
CW
594struct intel_uncore {
595 spinlock_t lock; /** lock is also taken in irq contexts. */
596
15157970
TU
597 const struct intel_forcewake_range *fw_domains_table;
598 unsigned int fw_domains_table_entries;
599
907b28c5
CW
600 struct intel_uncore_funcs funcs;
601
602 unsigned fifo_count;
003342a5 603
48c1026a 604 enum forcewake_domains fw_domains;
003342a5 605 enum forcewake_domains fw_domains_active;
b2cff0db
CW
606
607 struct intel_uncore_forcewake_domain {
608 struct drm_i915_private *i915;
48c1026a 609 enum forcewake_domain_id id;
33c582c1 610 enum forcewake_domains mask;
b2cff0db 611 unsigned wake_count;
a57a4a67 612 struct hrtimer timer;
f0f59a00 613 i915_reg_t reg_set;
05a2fb15
MK
614 u32 val_set;
615 u32 val_clear;
f0f59a00
VS
616 i915_reg_t reg_ack;
617 i915_reg_t reg_post;
05a2fb15 618 u32 val_reset;
b2cff0db 619 } fw_domain[FW_DOMAIN_ID_COUNT];
75714940
MK
620
621 int unclaimed_mmio_check;
b2cff0db
CW
622};
623
624/* Iterate over initialised fw domains */
33c582c1
TU
625#define for_each_fw_domain_masked(domain__, mask__, dev_priv__) \
626 for ((domain__) = &(dev_priv__)->uncore.fw_domain[0]; \
627 (domain__) < &(dev_priv__)->uncore.fw_domain[FW_DOMAIN_ID_COUNT]; \
628 (domain__)++) \
629 for_each_if ((mask__) & (domain__)->mask)
630
631#define for_each_fw_domain(domain__, dev_priv__) \
632 for_each_fw_domain_masked(domain__, FORCEWAKE_ALL, dev_priv__)
907b28c5 633
b6e7d894
DL
634#define CSR_VERSION(major, minor) ((major) << 16 | (minor))
635#define CSR_VERSION_MAJOR(version) ((version) >> 16)
636#define CSR_VERSION_MINOR(version) ((version) & 0xffff)
637
eb805623 638struct intel_csr {
8144ac59 639 struct work_struct work;
eb805623 640 const char *fw_path;
a7f749f9 641 uint32_t *dmc_payload;
eb805623 642 uint32_t dmc_fw_size;
b6e7d894 643 uint32_t version;
eb805623 644 uint32_t mmio_count;
f0f59a00 645 i915_reg_t mmioaddr[8];
eb805623 646 uint32_t mmiodata[8];
832dba88 647 uint32_t dc_state;
a37baf3b 648 uint32_t allowed_dc_mask;
eb805623
DV
649};
650
604db650 651#define DEV_INFO_FOR_EACH_FLAG(func) \
566c56a4 652 /* Keep is_* in chronological order */ \
604db650
JL
653 func(is_mobile); \
654 func(is_i85x); \
655 func(is_i915g); \
656 func(is_i945gm); \
657 func(is_g33); \
604db650
JL
658 func(is_g4x); \
659 func(is_pineview); \
660 func(is_broadwater); \
661 func(is_crestline); \
662 func(is_ivybridge); \
663 func(is_valleyview); \
664 func(is_cherryview); \
665 func(is_haswell); \
666 func(is_broadwell); \
667 func(is_skylake); \
668 func(is_broxton); \
669 func(is_kabylake); \
670 func(is_preliminary); \
566c56a4 671 /* Keep has_* in alphabetical order */ \
604db650 672 func(has_csr); \
566c56a4 673 func(has_ddi); \
604db650 674 func(has_dp_mst); \
566c56a4
JL
675 func(has_fbc); \
676 func(has_fpga_dbg); \
604db650 677 func(has_gmbus_irq); \
604db650
JL
678 func(has_gmch_display); \
679 func(has_guc); \
604db650 680 func(has_hotplug); \
566c56a4
JL
681 func(has_hw_contexts); \
682 func(has_l3_dpf); \
604db650 683 func(has_llc); \
566c56a4
JL
684 func(has_logical_ring_contexts); \
685 func(has_overlay); \
686 func(has_pipe_cxsr); \
687 func(has_pooled_eu); \
688 func(has_psr); \
689 func(has_rc6); \
690 func(has_rc6p); \
691 func(has_resource_streamer); \
692 func(has_runtime_pm); \
604db650 693 func(has_snoop); \
566c56a4
JL
694 func(cursor_needs_physical); \
695 func(hws_needs_physical); \
696 func(overlay_needs_physical); \
697 func(supports_tv)
c96ea64e 698
915490d5 699struct sseu_dev_info {
f08a0c92 700 u8 slice_mask;
57ec171e 701 u8 subslice_mask;
915490d5
ID
702 u8 eu_total;
703 u8 eu_per_subslice;
43b67998
ID
704 u8 min_eu_in_pool;
705 /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
706 u8 subslice_7eu[3];
707 u8 has_slice_pg:1;
708 u8 has_subslice_pg:1;
709 u8 has_eu_pg:1;
915490d5
ID
710};
711
57ec171e
ID
712static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu)
713{
714 return hweight8(sseu->slice_mask) * hweight8(sseu->subslice_mask);
715}
716
cfdf1fa2 717struct intel_device_info {
10fce67a 718 u32 display_mmio_offset;
87f1f465 719 u16 device_id;
ac208a8b 720 u8 num_pipes;
d615a166 721 u8 num_sprites[I915_MAX_PIPES];
c96c3a8c 722 u8 gen;
ae5702d2 723 u16 gen_mask;
73ae478c 724 u8 ring_mask; /* Rings supported by the HW */
c1bb1145 725 u8 num_rings;
604db650
JL
726#define DEFINE_FLAG(name) u8 name:1
727 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
728#undef DEFINE_FLAG
6f3fff60 729 u16 ddb_size; /* in blocks */
a57c774a
AK
730 /* Register offsets for the various display pipes and transcoders */
731 int pipe_offsets[I915_MAX_TRANSCODERS];
732 int trans_offsets[I915_MAX_TRANSCODERS];
a57c774a 733 int palette_offsets[I915_MAX_PIPES];
5efb3e28 734 int cursor_offsets[I915_MAX_PIPES];
3873218f
JM
735
736 /* Slice/subslice/EU info */
43b67998 737 struct sseu_dev_info sseu;
82cf435b
LL
738
739 struct color_luts {
740 u16 degamma_lut_size;
741 u16 gamma_lut_size;
742 } color;
cfdf1fa2
KH
743};
744
2bd160a1
CW
745struct intel_display_error_state;
746
747struct drm_i915_error_state {
748 struct kref ref;
749 struct timeval time;
de867c20
CW
750 struct timeval boottime;
751 struct timeval uptime;
2bd160a1 752
9f267eb8
CW
753 struct drm_i915_private *i915;
754
2bd160a1
CW
755 char error_msg[128];
756 bool simulated;
757 int iommu;
758 u32 reset_count;
759 u32 suspend_count;
760 struct intel_device_info device_info;
761
762 /* Generic register state */
763 u32 eir;
764 u32 pgtbl_er;
765 u32 ier;
766 u32 gtier[4];
767 u32 ccid;
768 u32 derrmr;
769 u32 forcewake;
770 u32 error; /* gen6+ */
771 u32 err_int; /* gen7 */
772 u32 fault_data0; /* gen8, gen9 */
773 u32 fault_data1; /* gen8, gen9 */
774 u32 done_reg;
775 u32 gac_eco;
776 u32 gam_ecochk;
777 u32 gab_ctl;
778 u32 gfx_mode;
d636951e 779
2bd160a1
CW
780 u64 fence[I915_MAX_NUM_FENCES];
781 struct intel_overlay_error_state *overlay;
782 struct intel_display_error_state *display;
51d545d0 783 struct drm_i915_error_object *semaphore;
27b85bea 784 struct drm_i915_error_object *guc_log;
2bd160a1
CW
785
786 struct drm_i915_error_engine {
787 int engine_id;
788 /* Software tracked state */
789 bool waiting;
790 int num_waiters;
791 int hangcheck_score;
792 enum intel_engine_hangcheck_action hangcheck_action;
793 struct i915_address_space *vm;
794 int num_requests;
795
cdb324bd
CW
796 /* position of active request inside the ring */
797 u32 rq_head, rq_post, rq_tail;
798
2bd160a1
CW
799 /* our own tracking of ring head and tail */
800 u32 cpu_ring_head;
801 u32 cpu_ring_tail;
802
803 u32 last_seqno;
804 u32 semaphore_seqno[I915_NUM_ENGINES - 1];
805
806 /* Register state */
807 u32 start;
808 u32 tail;
809 u32 head;
810 u32 ctl;
21a2c58a 811 u32 mode;
2bd160a1
CW
812 u32 hws;
813 u32 ipeir;
814 u32 ipehr;
2bd160a1
CW
815 u32 bbstate;
816 u32 instpm;
817 u32 instps;
818 u32 seqno;
819 u64 bbaddr;
820 u64 acthd;
821 u32 fault_reg;
822 u64 faddr;
823 u32 rc_psmi; /* sleep state */
824 u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
d636951e 825 struct intel_instdone instdone;
2bd160a1
CW
826
827 struct drm_i915_error_object {
2bd160a1 828 u64 gtt_offset;
03382dfb 829 u64 gtt_size;
0a97015d
CW
830 int page_count;
831 int unused;
2bd160a1
CW
832 u32 *pages[0];
833 } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
834
835 struct drm_i915_error_object *wa_ctx;
836
837 struct drm_i915_error_request {
838 long jiffies;
c84455b4 839 pid_t pid;
35ca039e 840 u32 context;
2bd160a1
CW
841 u32 seqno;
842 u32 head;
843 u32 tail;
35ca039e 844 } *requests, execlist[2];
2bd160a1
CW
845
846 struct drm_i915_error_waiter {
847 char comm[TASK_COMM_LEN];
848 pid_t pid;
849 u32 seqno;
850 } *waiters;
851
852 struct {
853 u32 gfx_mode;
854 union {
855 u64 pdp[4];
856 u32 pp_dir_base;
857 };
858 } vm_info;
859
860 pid_t pid;
861 char comm[TASK_COMM_LEN];
862 } engine[I915_NUM_ENGINES];
863
864 struct drm_i915_error_buffer {
865 u32 size;
866 u32 name;
867 u32 rseqno[I915_NUM_ENGINES], wseqno;
868 u64 gtt_offset;
869 u32 read_domains;
870 u32 write_domain;
871 s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
872 u32 tiling:2;
873 u32 dirty:1;
874 u32 purgeable:1;
875 u32 userptr:1;
876 s32 engine:4;
877 u32 cache_level:3;
878 } *active_bo[I915_NUM_ENGINES], *pinned_bo;
879 u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count;
880 struct i915_address_space *active_vm[I915_NUM_ENGINES];
881};
882
7faf1ab2
DV
883enum i915_cache_level {
884 I915_CACHE_NONE = 0,
350ec881
CW
885 I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
886 I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc
887 caches, eg sampler/render caches, and the
888 large Last-Level-Cache. LLC is coherent with
889 the CPU, but L3 is only visible to the GPU. */
651d794f 890 I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
7faf1ab2
DV
891};
892
e59ec13d
MK
893struct i915_ctx_hang_stats {
894 /* This context had batch pending when hang was declared */
895 unsigned batch_pending;
896
897 /* This context had batch active when hang was declared */
898 unsigned batch_active;
be62acb4
MK
899
900 /* Time when this context was last blamed for a GPU reset */
901 unsigned long guilty_ts;
902
676fa572
CW
903 /* If the contexts causes a second GPU hang within this time,
904 * it is permanently banned from submitting any more work.
905 */
906 unsigned long ban_period_seconds;
907
be62acb4
MK
908 /* This context is banned to submit more work */
909 bool banned;
e59ec13d 910};
40521054
BW
911
912/* This must match up with the value previously used for execbuf2.rsvd1. */
821d66dd 913#define DEFAULT_CONTEXT_HANDLE 0
b1b38278 914
31b7a88d 915/**
e2efd130 916 * struct i915_gem_context - as the name implies, represents a context.
31b7a88d
OM
917 * @ref: reference count.
918 * @user_handle: userspace tracking identity for this context.
919 * @remap_slice: l3 row remapping information.
b1b38278
DW
920 * @flags: context specific flags:
921 * CONTEXT_NO_ZEROMAP: do not allow mapping things to page 0.
31b7a88d
OM
922 * @file_priv: filp associated with this context (NULL for global default
923 * context).
924 * @hang_stats: information about the role of this context in possible GPU
925 * hangs.
7df113e4 926 * @ppgtt: virtual memory space used by this context.
31b7a88d
OM
927 * @legacy_hw_ctx: render context backing object and whether it is correctly
928 * initialized (legacy ring submission mechanism only).
929 * @link: link in the global list of contexts.
930 *
931 * Contexts are memory images used by the hardware to store copies of their
932 * internal state.
933 */
e2efd130 934struct i915_gem_context {
dce3271b 935 struct kref ref;
9ea4feec 936 struct drm_i915_private *i915;
40521054 937 struct drm_i915_file_private *file_priv;
ae6c4806 938 struct i915_hw_ppgtt *ppgtt;
c84455b4 939 struct pid *pid;
a33afea5 940
8d59bc6a
CW
941 struct i915_ctx_hang_stats hang_stats;
942
8d59bc6a 943 unsigned long flags;
bc3d6744
CW
944#define CONTEXT_NO_ZEROMAP BIT(0)
945#define CONTEXT_NO_ERROR_CAPTURE BIT(1)
0be81156
DG
946
947 /* Unique identifier for this context, used by the hw for tracking */
948 unsigned int hw_id;
8d59bc6a 949 u32 user_handle;
5d1808ec 950
0cb26a8e
CW
951 u32 ggtt_alignment;
952
9021ad03 953 struct intel_context {
bf3783e5 954 struct i915_vma *state;
7e37f889 955 struct intel_ring *ring;
82352e90 956 uint32_t *lrc_reg_state;
8d59bc6a
CW
957 u64 lrc_desc;
958 int pin_count;
24f1d3cc 959 bool initialised;
666796da 960 } engine[I915_NUM_ENGINES];
bcd794c2 961 u32 ring_size;
c01fc532 962 u32 desc_template;
3c7ba635 963 struct atomic_notifier_head status_notifier;
80a9a8db 964 bool execlists_force_single_submission;
c9e003af 965
a33afea5 966 struct list_head link;
8d59bc6a
CW
967
968 u8 remap_slice;
50e046b6 969 bool closed:1;
40521054
BW
970};
971
a4001f1b
PZ
972enum fb_op_origin {
973 ORIGIN_GTT,
974 ORIGIN_CPU,
975 ORIGIN_CS,
976 ORIGIN_FLIP,
74b4ea1e 977 ORIGIN_DIRTYFB,
a4001f1b
PZ
978};
979
ab34a7e8 980struct intel_fbc {
25ad93fd
PZ
981 /* This is always the inner lock when overlapping with struct_mutex and
982 * it's the outer lock when overlapping with stolen_lock. */
983 struct mutex lock;
5e59f717 984 unsigned threshold;
dbef0f15
PZ
985 unsigned int possible_framebuffer_bits;
986 unsigned int busy_bits;
010cf73d 987 unsigned int visible_pipes_mask;
e35fef21 988 struct intel_crtc *crtc;
5c3fe8b0 989
c4213885 990 struct drm_mm_node compressed_fb;
5c3fe8b0
BW
991 struct drm_mm_node *compressed_llb;
992
da46f936
RV
993 bool false_color;
994
d029bcad 995 bool enabled;
0e631adc 996 bool active;
9adccc60 997
61a585d6
PZ
998 bool underrun_detected;
999 struct work_struct underrun_work;
1000
aaf78d27
PZ
1001 struct intel_fbc_state_cache {
1002 struct {
1003 unsigned int mode_flags;
1004 uint32_t hsw_bdw_pixel_rate;
1005 } crtc;
1006
1007 struct {
1008 unsigned int rotation;
1009 int src_w;
1010 int src_h;
1011 bool visible;
1012 } plane;
1013
1014 struct {
1015 u64 ilk_ggtt_offset;
aaf78d27
PZ
1016 uint32_t pixel_format;
1017 unsigned int stride;
1018 int fence_reg;
1019 unsigned int tiling_mode;
1020 } fb;
1021 } state_cache;
1022
b183b3f1
PZ
1023 struct intel_fbc_reg_params {
1024 struct {
1025 enum pipe pipe;
1026 enum plane plane;
1027 unsigned int fence_y_offset;
1028 } crtc;
1029
1030 struct {
1031 u64 ggtt_offset;
b183b3f1
PZ
1032 uint32_t pixel_format;
1033 unsigned int stride;
1034 int fence_reg;
1035 } fb;
1036
1037 int cfb_size;
1038 } params;
1039
5c3fe8b0 1040 struct intel_fbc_work {
128d7356 1041 bool scheduled;
ca18d51d 1042 u32 scheduled_vblank;
128d7356 1043 struct work_struct work;
128d7356 1044 } work;
5c3fe8b0 1045
bf6189c6 1046 const char *no_fbc_reason;
b5e50c3f
JB
1047};
1048
96178eeb
VK
1049/**
1050 * HIGH_RR is the highest eDP panel refresh rate read from EDID
1051 * LOW_RR is the lowest eDP panel refresh rate found from EDID
1052 * parsing for same resolution.
1053 */
1054enum drrs_refresh_rate_type {
1055 DRRS_HIGH_RR,
1056 DRRS_LOW_RR,
1057 DRRS_MAX_RR, /* RR count */
1058};
1059
1060enum drrs_support_type {
1061 DRRS_NOT_SUPPORTED = 0,
1062 STATIC_DRRS_SUPPORT = 1,
1063 SEAMLESS_DRRS_SUPPORT = 2
439d7ac0
PB
1064};
1065
2807cf69 1066struct intel_dp;
96178eeb
VK
1067struct i915_drrs {
1068 struct mutex mutex;
1069 struct delayed_work work;
1070 struct intel_dp *dp;
1071 unsigned busy_frontbuffer_bits;
1072 enum drrs_refresh_rate_type refresh_rate_type;
1073 enum drrs_support_type type;
1074};
1075
a031d709 1076struct i915_psr {
f0355c4a 1077 struct mutex lock;
a031d709
RV
1078 bool sink_support;
1079 bool source_ok;
2807cf69 1080 struct intel_dp *enabled;
7c8f8a70
RV
1081 bool active;
1082 struct delayed_work work;
9ca15301 1083 unsigned busy_frontbuffer_bits;
474d1ec4
SJ
1084 bool psr2_support;
1085 bool aux_frame_sync;
60e5ffe3 1086 bool link_standby;
3f51e471 1087};
5c3fe8b0 1088
3bad0781 1089enum intel_pch {
f0350830 1090 PCH_NONE = 0, /* No PCH present */
3bad0781
ZW
1091 PCH_IBX, /* Ibexpeak PCH */
1092 PCH_CPT, /* Cougarpoint PCH */
eb877ebf 1093 PCH_LPT, /* Lynxpoint PCH */
e7e7ea20 1094 PCH_SPT, /* Sunrisepoint PCH */
22dea0be 1095 PCH_KBP, /* Kabypoint PCH */
40c7ead9 1096 PCH_NOP,
3bad0781
ZW
1097};
1098
988d6ee8
PZ
1099enum intel_sbi_destination {
1100 SBI_ICLK,
1101 SBI_MPHY,
1102};
1103
b690e96c 1104#define QUIRK_PIPEA_FORCE (1<<0)
435793df 1105#define QUIRK_LVDS_SSC_DISABLE (1<<1)
4dca20ef 1106#define QUIRK_INVERT_BRIGHTNESS (1<<2)
9c72cc6f 1107#define QUIRK_BACKLIGHT_PRESENT (1<<3)
b6b5d049 1108#define QUIRK_PIPEB_FORCE (1<<4)
656bfa3a 1109#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
b690e96c 1110
8be48d92 1111struct intel_fbdev;
1630fe75 1112struct intel_fbc_work;
38651674 1113
c2b9152f
DV
1114struct intel_gmbus {
1115 struct i2c_adapter adapter;
3e4d44e0 1116#define GMBUS_FORCE_BIT_RETRY (1U << 31)
f2ce9faf 1117 u32 force_bit;
c2b9152f 1118 u32 reg0;
f0f59a00 1119 i915_reg_t gpio_reg;
c167a6fc 1120 struct i2c_algo_bit_data bit_algo;
c2b9152f
DV
1121 struct drm_i915_private *dev_priv;
1122};
1123
f4c956ad 1124struct i915_suspend_saved_registers {
e948e994 1125 u32 saveDSPARB;
ba8bbcf6 1126 u32 saveFBC_CONTROL;
1f84e550 1127 u32 saveCACHE_MODE_0;
1f84e550 1128 u32 saveMI_ARB_STATE;
ba8bbcf6
JB
1129 u32 saveSWF0[16];
1130 u32 saveSWF1[16];
85fa792b 1131 u32 saveSWF3[3];
4b9de737 1132 uint64_t saveFENCE[I915_MAX_NUM_FENCES];
cda2bb78 1133 u32 savePCH_PORT_HOTPLUG;
9f49c376 1134 u16 saveGCDGMBUS;
f4c956ad 1135};
c85aa885 1136
ddeea5b0
ID
1137struct vlv_s0ix_state {
1138 /* GAM */
1139 u32 wr_watermark;
1140 u32 gfx_prio_ctrl;
1141 u32 arb_mode;
1142 u32 gfx_pend_tlb0;
1143 u32 gfx_pend_tlb1;
1144 u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
1145 u32 media_max_req_count;
1146 u32 gfx_max_req_count;
1147 u32 render_hwsp;
1148 u32 ecochk;
1149 u32 bsd_hwsp;
1150 u32 blt_hwsp;
1151 u32 tlb_rd_addr;
1152
1153 /* MBC */
1154 u32 g3dctl;
1155 u32 gsckgctl;
1156 u32 mbctl;
1157
1158 /* GCP */
1159 u32 ucgctl1;
1160 u32 ucgctl3;
1161 u32 rcgctl1;
1162 u32 rcgctl2;
1163 u32 rstctl;
1164 u32 misccpctl;
1165
1166 /* GPM */
1167 u32 gfxpause;
1168 u32 rpdeuhwtc;
1169 u32 rpdeuc;
1170 u32 ecobus;
1171 u32 pwrdwnupctl;
1172 u32 rp_down_timeout;
1173 u32 rp_deucsw;
1174 u32 rcubmabdtmr;
1175 u32 rcedata;
1176 u32 spare2gh;
1177
1178 /* Display 1 CZ domain */
1179 u32 gt_imr;
1180 u32 gt_ier;
1181 u32 pm_imr;
1182 u32 pm_ier;
1183 u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
1184
1185 /* GT SA CZ domain */
1186 u32 tilectl;
1187 u32 gt_fifoctl;
1188 u32 gtlc_wake_ctrl;
1189 u32 gtlc_survive;
1190 u32 pmwgicz;
1191
1192 /* Display 2 CZ domain */
1193 u32 gu_ctl0;
1194 u32 gu_ctl1;
9c25210f 1195 u32 pcbr;
ddeea5b0
ID
1196 u32 clock_gate_dis2;
1197};
1198
bf225f20
CW
1199struct intel_rps_ei {
1200 u32 cz_clock;
1201 u32 render_c0;
1202 u32 media_c0;
31685c25
D
1203};
1204
c85aa885 1205struct intel_gen6_power_mgmt {
d4d70aa5
ID
1206 /*
1207 * work, interrupts_enabled and pm_iir are protected by
1208 * dev_priv->irq_lock
1209 */
c85aa885 1210 struct work_struct work;
d4d70aa5 1211 bool interrupts_enabled;
c85aa885 1212 u32 pm_iir;
59cdb63d 1213
b20e3cfe 1214 /* PM interrupt bits that should never be masked */
1800ad25
SAK
1215 u32 pm_intr_keep;
1216
b39fb297
BW
1217 /* Frequencies are stored in potentially platform dependent multiples.
1218 * In other words, *_freq needs to be multiplied by X to be interesting.
1219 * Soft limits are those which are used for the dynamic reclocking done
1220 * by the driver (raise frequencies under heavy loads, and lower for
1221 * lighter loads). Hard limits are those imposed by the hardware.
1222 *
1223 * A distinction is made for overclocking, which is never enabled by
1224 * default, and is considered to be above the hard limit if it's
1225 * possible at all.
1226 */
1227 u8 cur_freq; /* Current frequency (cached, may not == HW) */
1228 u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */
1229 u8 max_freq_softlimit; /* Max frequency permitted by the driver */
1230 u8 max_freq; /* Maximum frequency, RP0 if not overclocking */
1231 u8 min_freq; /* AKA RPn. Minimum frequency */
29ecd78d 1232 u8 boost_freq; /* Frequency to request when wait boosting */
aed242ff 1233 u8 idle_freq; /* Frequency to request when we are idle */
b39fb297
BW
1234 u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
1235 u8 rp1_freq; /* "less than" RP0 power/freqency */
1236 u8 rp0_freq; /* Non-overclocked max frequency. */
c30fec65 1237 u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */
1a01ab3b 1238
8fb55197
CW
1239 u8 up_threshold; /* Current %busy required to uplock */
1240 u8 down_threshold; /* Current %busy required to downclock */
1241
dd75fdc8
CW
1242 int last_adj;
1243 enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
1244
8d3afd7d
CW
1245 spinlock_t client_lock;
1246 struct list_head clients;
1247 bool client_boost;
1248
c0951f0c 1249 bool enabled;
54b4f68f 1250 struct delayed_work autoenable_work;
1854d5ca 1251 unsigned boosts;
4fc688ce 1252
bf225f20
CW
1253 /* manual wa residency calculations */
1254 struct intel_rps_ei up_ei, down_ei;
1255
4fc688ce
JB
1256 /*
1257 * Protects RPS/RC6 register access and PCU communication.
8d3afd7d
CW
1258 * Must be taken after struct_mutex if nested. Note that
1259 * this lock may be held for long periods of time when
1260 * talking to hw - so only take it when talking to hw!
4fc688ce
JB
1261 */
1262 struct mutex hw_lock;
c85aa885
DV
1263};
1264
1a240d4d
DV
1265/* defined intel_pm.c */
1266extern spinlock_t mchdev_lock;
1267
c85aa885
DV
1268struct intel_ilk_power_mgmt {
1269 u8 cur_delay;
1270 u8 min_delay;
1271 u8 max_delay;
1272 u8 fmax;
1273 u8 fstart;
1274
1275 u64 last_count1;
1276 unsigned long last_time1;
1277 unsigned long chipset_power;
1278 u64 last_count2;
5ed0bdf2 1279 u64 last_time2;
c85aa885
DV
1280 unsigned long gfx_power;
1281 u8 corr;
1282
1283 int c_m;
1284 int r_t;
1285};
1286
c6cb582e
ID
1287struct drm_i915_private;
1288struct i915_power_well;
1289
1290struct i915_power_well_ops {
1291 /*
1292 * Synchronize the well's hw state to match the current sw state, for
1293 * example enable/disable it based on the current refcount. Called
1294 * during driver init and resume time, possibly after first calling
1295 * the enable/disable handlers.
1296 */
1297 void (*sync_hw)(struct drm_i915_private *dev_priv,
1298 struct i915_power_well *power_well);
1299 /*
1300 * Enable the well and resources that depend on it (for example
1301 * interrupts located on the well). Called after the 0->1 refcount
1302 * transition.
1303 */
1304 void (*enable)(struct drm_i915_private *dev_priv,
1305 struct i915_power_well *power_well);
1306 /*
1307 * Disable the well and resources that depend on it. Called after
1308 * the 1->0 refcount transition.
1309 */
1310 void (*disable)(struct drm_i915_private *dev_priv,
1311 struct i915_power_well *power_well);
1312 /* Returns the hw enabled state. */
1313 bool (*is_enabled)(struct drm_i915_private *dev_priv,
1314 struct i915_power_well *power_well);
1315};
1316
a38911a3
WX
1317/* Power well structure for haswell */
1318struct i915_power_well {
c1ca727f 1319 const char *name;
6f3ef5dd 1320 bool always_on;
a38911a3
WX
1321 /* power well enable/disable usage count */
1322 int count;
bfafe93a
ID
1323 /* cached hw enabled state */
1324 bool hw_enabled;
c1ca727f 1325 unsigned long domains;
01c3faa7
ACO
1326 /* unique identifier for this power well */
1327 unsigned long id;
362624c9
ACO
1328 /*
1329 * Arbitraty data associated with this power well. Platform and power
1330 * well specific.
1331 */
1332 unsigned long data;
c6cb582e 1333 const struct i915_power_well_ops *ops;
a38911a3
WX
1334};
1335
83c00f55 1336struct i915_power_domains {
baa70707
ID
1337 /*
1338 * Power wells needed for initialization at driver init and suspend
1339 * time are on. They are kept on until after the first modeset.
1340 */
1341 bool init_power_on;
0d116a29 1342 bool initializing;
c1ca727f 1343 int power_well_count;
baa70707 1344
83c00f55 1345 struct mutex lock;
1da51581 1346 int domain_use_count[POWER_DOMAIN_NUM];
c1ca727f 1347 struct i915_power_well *power_wells;
83c00f55
ID
1348};
1349
35a85ac6 1350#define MAX_L3_SLICES 2
a4da4fa4 1351struct intel_l3_parity {
35a85ac6 1352 u32 *remap_info[MAX_L3_SLICES];
a4da4fa4 1353 struct work_struct error_work;
35a85ac6 1354 int which_slice;
a4da4fa4
DV
1355};
1356
4b5aed62 1357struct i915_gem_mm {
4b5aed62
DV
1358 /** Memory allocator for GTT stolen memory */
1359 struct drm_mm stolen;
92e97d2f
PZ
1360 /** Protects the usage of the GTT stolen memory allocator. This is
1361 * always the inner lock when overlapping with struct_mutex. */
1362 struct mutex stolen_lock;
1363
4b5aed62
DV
1364 /** List of all objects in gtt_space. Used to restore gtt
1365 * mappings on resume */
1366 struct list_head bound_list;
1367 /**
1368 * List of objects which are not bound to the GTT (thus
fbbd37b3
CW
1369 * are idle and not used by the GPU). These objects may or may
1370 * not actually have any pages attached.
4b5aed62
DV
1371 */
1372 struct list_head unbound_list;
1373
275f039d
CW
1374 /** List of all objects in gtt_space, currently mmaped by userspace.
1375 * All objects within this list must also be on bound_list.
1376 */
1377 struct list_head userfault_list;
1378
fbbd37b3
CW
1379 /**
1380 * List of objects which are pending destruction.
1381 */
1382 struct llist_head free_list;
1383 struct work_struct free_work;
1384
4b5aed62
DV
1385 /** Usable portion of the GTT for GEM */
1386 unsigned long stolen_base; /* limited to low memory (32-bit) */
1387
4b5aed62
DV
1388 /** PPGTT used for aliasing the PPGTT with the GTT */
1389 struct i915_hw_ppgtt *aliasing_ppgtt;
1390
2cfcd32a 1391 struct notifier_block oom_notifier;
e87666b5 1392 struct notifier_block vmap_notifier;
ceabbba5 1393 struct shrinker shrinker;
4b5aed62 1394
4b5aed62
DV
1395 /** LRU list of objects with fence regs on them. */
1396 struct list_head fence_list;
1397
4b5aed62
DV
1398 /**
1399 * Are we in a non-interruptible section of code like
1400 * modesetting?
1401 */
1402 bool interruptible;
1403
bdf1e7e3 1404 /* the indicator for dispatch video commands on two BSD rings */
6f633402 1405 atomic_t bsd_engine_dispatch_index;
bdf1e7e3 1406
4b5aed62
DV
1407 /** Bit 6 swizzling required for X tiling */
1408 uint32_t bit_6_swizzle_x;
1409 /** Bit 6 swizzling required for Y tiling */
1410 uint32_t bit_6_swizzle_y;
1411
4b5aed62 1412 /* accounting, useful for userland debugging */
c20e8355 1413 spinlock_t object_stat_lock;
3ef7f228 1414 u64 object_memory;
4b5aed62
DV
1415 u32 object_count;
1416};
1417
edc3d884 1418struct drm_i915_error_state_buf {
0a4cd7c8 1419 struct drm_i915_private *i915;
edc3d884
MK
1420 unsigned bytes;
1421 unsigned size;
1422 int err;
1423 u8 *buf;
1424 loff_t start;
1425 loff_t pos;
1426};
1427
fc16b48b
MK
1428struct i915_error_state_file_priv {
1429 struct drm_device *dev;
1430 struct drm_i915_error_state *error;
1431};
1432
b52992c0
CW
1433#define I915_RESET_TIMEOUT (10 * HZ) /* 10s */
1434#define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */
1435
99584db3
DV
1436struct i915_gpu_error {
1437 /* For hangcheck timer */
1438#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
1439#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
be62acb4
MK
1440 /* Hang gpu twice in this window and your context gets banned */
1441#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
1442
737b1506 1443 struct delayed_work hangcheck_work;
99584db3
DV
1444
1445 /* For reset and error_state handling. */
1446 spinlock_t lock;
1447 /* Protected by the above dev->gpu_error.lock. */
1448 struct drm_i915_error_state *first_error;
094f9a54
CW
1449
1450 unsigned long missed_irq_rings;
1451
1f83fee0 1452 /**
2ac0f450 1453 * State variable controlling the reset flow and count
1f83fee0 1454 *
2ac0f450 1455 * This is a counter which gets incremented when reset is triggered,
8af29b0c
CW
1456 *
1457 * Before the reset commences, the I915_RESET_IN_PROGRESS bit is set
1458 * meaning that any waiters holding onto the struct_mutex should
1459 * relinquish the lock immediately in order for the reset to start.
2ac0f450
MK
1460 *
1461 * If reset is not completed succesfully, the I915_WEDGE bit is
1462 * set meaning that hardware is terminally sour and there is no
1463 * recovery. All waiters on the reset_queue will be woken when
1464 * that happens.
1465 *
1466 * This counter is used by the wait_seqno code to notice that reset
1467 * event happened and it needs to restart the entire ioctl (since most
1468 * likely the seqno it waited for won't ever signal anytime soon).
f69061be
DV
1469 *
1470 * This is important for lock-free wait paths, where no contended lock
1471 * naturally enforces the correct ordering between the bail-out of the
1472 * waiter and the gpu reset work code.
1f83fee0 1473 */
8af29b0c 1474 unsigned long reset_count;
1f83fee0 1475
8af29b0c
CW
1476 unsigned long flags;
1477#define I915_RESET_IN_PROGRESS 0
1478#define I915_WEDGED (BITS_PER_LONG - 1)
1f83fee0 1479
1f15b76f
CW
1480 /**
1481 * Waitqueue to signal when a hang is detected. Used to for waiters
1482 * to release the struct_mutex for the reset to procede.
1483 */
1484 wait_queue_head_t wait_queue;
1485
1f83fee0
DV
1486 /**
1487 * Waitqueue to signal when the reset has completed. Used by clients
1488 * that wait for dev_priv->mm.wedged to settle.
1489 */
1490 wait_queue_head_t reset_queue;
33196ded 1491
094f9a54 1492 /* For missed irq/seqno simulation. */
688e6c72 1493 unsigned long test_irq_rings;
99584db3
DV
1494};
1495
b8efb17b
ZR
1496enum modeset_restore {
1497 MODESET_ON_LID_OPEN,
1498 MODESET_DONE,
1499 MODESET_SUSPENDED,
1500};
1501
500ea70d
RV
1502#define DP_AUX_A 0x40
1503#define DP_AUX_B 0x10
1504#define DP_AUX_C 0x20
1505#define DP_AUX_D 0x30
1506
11c1b657
XZ
1507#define DDC_PIN_B 0x05
1508#define DDC_PIN_C 0x04
1509#define DDC_PIN_D 0x06
1510
6acab15a 1511struct ddi_vbt_port_info {
ce4dd49e
DL
1512 /*
1513 * This is an index in the HDMI/DVI DDI buffer translation table.
1514 * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't
1515 * populate this field.
1516 */
1517#define HDMI_LEVEL_SHIFT_UNKNOWN 0xff
6acab15a 1518 uint8_t hdmi_level_shift;
311a2094
PZ
1519
1520 uint8_t supports_dvi:1;
1521 uint8_t supports_hdmi:1;
1522 uint8_t supports_dp:1;
500ea70d
RV
1523
1524 uint8_t alternate_aux_channel;
11c1b657 1525 uint8_t alternate_ddc_pin;
75067dde
AK
1526
1527 uint8_t dp_boost_level;
1528 uint8_t hdmi_boost_level;
6acab15a
PZ
1529};
1530
bfd7ebda
RV
1531enum psr_lines_to_wait {
1532 PSR_0_LINES_TO_WAIT = 0,
1533 PSR_1_LINE_TO_WAIT,
1534 PSR_4_LINES_TO_WAIT,
1535 PSR_8_LINES_TO_WAIT
83a7280e
PB
1536};
1537
41aa3448
RV
1538struct intel_vbt_data {
1539 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
1540 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
1541
1542 /* Feature bits */
1543 unsigned int int_tv_support:1;
1544 unsigned int lvds_dither:1;
1545 unsigned int lvds_vbt:1;
1546 unsigned int int_crt_support:1;
1547 unsigned int lvds_use_ssc:1;
1548 unsigned int display_clock_mode:1;
1549 unsigned int fdi_rx_polarity_inverted:1;
3e845c7a 1550 unsigned int panel_type:4;
41aa3448
RV
1551 int lvds_ssc_freq;
1552 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
1553
83a7280e
PB
1554 enum drrs_support_type drrs_type;
1555
6aa23e65
JN
1556 struct {
1557 int rate;
1558 int lanes;
1559 int preemphasis;
1560 int vswing;
06411f08 1561 bool low_vswing;
6aa23e65
JN
1562 bool initialized;
1563 bool support;
1564 int bpp;
1565 struct edp_power_seq pps;
1566 } edp;
41aa3448 1567
bfd7ebda
RV
1568 struct {
1569 bool full_link;
1570 bool require_aux_wakeup;
1571 int idle_frames;
1572 enum psr_lines_to_wait lines_to_wait;
1573 int tp1_wakeup_time;
1574 int tp2_tp3_wakeup_time;
1575 } psr;
1576
f00076d2
JN
1577 struct {
1578 u16 pwm_freq_hz;
39fbc9c8 1579 bool present;
f00076d2 1580 bool active_low_pwm;
1de6068e 1581 u8 min_brightness; /* min_brightness/255 of max */
9a41e17d 1582 enum intel_backlight_type type;
f00076d2
JN
1583 } backlight;
1584
d17c5443
SK
1585 /* MIPI DSI */
1586 struct {
1587 u16 panel_id;
d3b542fc
SK
1588 struct mipi_config *config;
1589 struct mipi_pps_data *pps;
1590 u8 seq_version;
1591 u32 size;
1592 u8 *data;
8d3ed2f3 1593 const u8 *sequence[MIPI_SEQ_MAX];
d17c5443
SK
1594 } dsi;
1595
41aa3448
RV
1596 int crt_ddc_pin;
1597
1598 int child_dev_num;
768f69c9 1599 union child_device_config *child_dev;
6acab15a
PZ
1600
1601 struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
9d6c875d 1602 struct sdvo_device_mapping sdvo_mappings[2];
41aa3448
RV
1603};
1604
77c122bc
VS
1605enum intel_ddb_partitioning {
1606 INTEL_DDB_PART_1_2,
1607 INTEL_DDB_PART_5_6, /* IVB+ */
1608};
1609
1fd527cc
VS
1610struct intel_wm_level {
1611 bool enable;
1612 uint32_t pri_val;
1613 uint32_t spr_val;
1614 uint32_t cur_val;
1615 uint32_t fbc_val;
1616};
1617
820c1980 1618struct ilk_wm_values {
609cedef
VS
1619 uint32_t wm_pipe[3];
1620 uint32_t wm_lp[3];
1621 uint32_t wm_lp_spr[3];
1622 uint32_t wm_linetime[3];
1623 bool enable_fbc_wm;
1624 enum intel_ddb_partitioning partitioning;
1625};
1626
262cd2e1
VS
1627struct vlv_pipe_wm {
1628 uint16_t primary;
1629 uint16_t sprite[2];
1630 uint8_t cursor;
1631};
ae80152d 1632
262cd2e1
VS
1633struct vlv_sr_wm {
1634 uint16_t plane;
1635 uint8_t cursor;
1636};
ae80152d 1637
262cd2e1
VS
1638struct vlv_wm_values {
1639 struct vlv_pipe_wm pipe[3];
1640 struct vlv_sr_wm sr;
0018fda1
VS
1641 struct {
1642 uint8_t cursor;
1643 uint8_t sprite[2];
1644 uint8_t primary;
1645 } ddl[3];
6eb1a681
VS
1646 uint8_t level;
1647 bool cxsr;
0018fda1
VS
1648};
1649
c193924e 1650struct skl_ddb_entry {
16160e3d 1651 uint16_t start, end; /* in number of blocks, 'end' is exclusive */
c193924e
DL
1652};
1653
1654static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry)
1655{
16160e3d 1656 return entry->end - entry->start;
c193924e
DL
1657}
1658
08db6652
DL
1659static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
1660 const struct skl_ddb_entry *e2)
1661{
1662 if (e1->start == e2->start && e1->end == e2->end)
1663 return true;
1664
1665 return false;
1666}
1667
c193924e 1668struct skl_ddb_allocation {
2cd601c6 1669 struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */
4969d33e 1670 struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES];
c193924e
DL
1671};
1672
2ac96d2a 1673struct skl_wm_values {
2b4b9f35 1674 unsigned dirty_pipes;
c193924e 1675 struct skl_ddb_allocation ddb;
2ac96d2a
PB
1676};
1677
1678struct skl_wm_level {
a62163e9
L
1679 bool plane_en;
1680 uint16_t plane_res_b;
1681 uint8_t plane_res_l;
2ac96d2a
PB
1682};
1683
c67a470b 1684/*
765dab67
PZ
1685 * This struct helps tracking the state needed for runtime PM, which puts the
1686 * device in PCI D3 state. Notice that when this happens, nothing on the
1687 * graphics device works, even register access, so we don't get interrupts nor
1688 * anything else.
c67a470b 1689 *
765dab67
PZ
1690 * Every piece of our code that needs to actually touch the hardware needs to
1691 * either call intel_runtime_pm_get or call intel_display_power_get with the
1692 * appropriate power domain.
a8a8bd54 1693 *
765dab67
PZ
1694 * Our driver uses the autosuspend delay feature, which means we'll only really
1695 * suspend if we stay with zero refcount for a certain amount of time. The
f458ebbc 1696 * default value is currently very conservative (see intel_runtime_pm_enable), but
765dab67 1697 * it can be changed with the standard runtime PM files from sysfs.
c67a470b
PZ
1698 *
1699 * The irqs_disabled variable becomes true exactly after we disable the IRQs and
1700 * goes back to false exactly before we reenable the IRQs. We use this variable
1701 * to check if someone is trying to enable/disable IRQs while they're supposed
1702 * to be disabled. This shouldn't happen and we'll print some error messages in
730488b2 1703 * case it happens.
c67a470b 1704 *
765dab67 1705 * For more, read the Documentation/power/runtime_pm.txt.
c67a470b 1706 */
5d584b2e 1707struct i915_runtime_pm {
1f814dac 1708 atomic_t wakeref_count;
5d584b2e 1709 bool suspended;
2aeb7d3a 1710 bool irqs_enabled;
c67a470b
PZ
1711};
1712
926321d5
DV
1713enum intel_pipe_crc_source {
1714 INTEL_PIPE_CRC_SOURCE_NONE,
1715 INTEL_PIPE_CRC_SOURCE_PLANE1,
1716 INTEL_PIPE_CRC_SOURCE_PLANE2,
1717 INTEL_PIPE_CRC_SOURCE_PF,
5b3a856b 1718 INTEL_PIPE_CRC_SOURCE_PIPE,
3d099a05
DV
1719 /* TV/DP on pre-gen5/vlv can't use the pipe source. */
1720 INTEL_PIPE_CRC_SOURCE_TV,
1721 INTEL_PIPE_CRC_SOURCE_DP_B,
1722 INTEL_PIPE_CRC_SOURCE_DP_C,
1723 INTEL_PIPE_CRC_SOURCE_DP_D,
46a19188 1724 INTEL_PIPE_CRC_SOURCE_AUTO,
926321d5
DV
1725 INTEL_PIPE_CRC_SOURCE_MAX,
1726};
1727
8bf1e9f1 1728struct intel_pipe_crc_entry {
ac2300d4 1729 uint32_t frame;
8bf1e9f1
SH
1730 uint32_t crc[5];
1731};
1732
b2c88f5b 1733#define INTEL_PIPE_CRC_ENTRIES_NR 128
8bf1e9f1 1734struct intel_pipe_crc {
d538bbdf
DL
1735 spinlock_t lock;
1736 bool opened; /* exclusive access to the result file */
e5f75aca 1737 struct intel_pipe_crc_entry *entries;
926321d5 1738 enum intel_pipe_crc_source source;
d538bbdf 1739 int head, tail;
07144428 1740 wait_queue_head_t wq;
8bf1e9f1
SH
1741};
1742
f99d7069 1743struct i915_frontbuffer_tracking {
b5add959 1744 spinlock_t lock;
f99d7069
DV
1745
1746 /*
1747 * Tracking bits for delayed frontbuffer flushing du to gpu activity or
1748 * scheduled flips.
1749 */
1750 unsigned busy_bits;
1751 unsigned flip_bits;
1752};
1753
7225342a 1754struct i915_wa_reg {
f0f59a00 1755 i915_reg_t addr;
7225342a
MK
1756 u32 value;
1757 /* bitmask representing WA bits */
1758 u32 mask;
1759};
1760
33136b06
AS
1761/*
1762 * RING_MAX_NONPRIV_SLOTS is per-engine but at this point we are only
1763 * allowing it for RCS as we don't foresee any requirement of having
1764 * a whitelist for other engines. When it is really required for
1765 * other engines then the limit need to be increased.
1766 */
1767#define I915_MAX_WA_REGS (16 + RING_MAX_NONPRIV_SLOTS)
7225342a
MK
1768
1769struct i915_workarounds {
1770 struct i915_wa_reg reg[I915_MAX_WA_REGS];
1771 u32 count;
666796da 1772 u32 hw_whitelist_count[I915_NUM_ENGINES];
7225342a
MK
1773};
1774
cf9d2890
YZ
1775struct i915_virtual_gpu {
1776 bool active;
1777};
1778
aa363136
MR
1779/* used in computing the new watermarks state */
1780struct intel_wm_config {
1781 unsigned int num_pipes_active;
1782 bool sprites_enabled;
1783 bool sprites_scaled;
1784};
1785
77fec556 1786struct drm_i915_private {
8f460e2c
CW
1787 struct drm_device drm;
1788
efab6d8d 1789 struct kmem_cache *objects;
e20d2ab7 1790 struct kmem_cache *vmas;
efab6d8d 1791 struct kmem_cache *requests;
f4c956ad 1792
5c969aa7 1793 const struct intel_device_info info;
f4c956ad
DV
1794
1795 int relative_constants_mode;
1796
1797 void __iomem *regs;
1798
907b28c5 1799 struct intel_uncore uncore;
f4c956ad 1800
cf9d2890
YZ
1801 struct i915_virtual_gpu vgpu;
1802
feddf6e8 1803 struct intel_gvt *gvt;
0ad35fed 1804
33a732f4
AD
1805 struct intel_guc guc;
1806
eb805623
DV
1807 struct intel_csr csr;
1808
5ea6e5e3 1809 struct intel_gmbus gmbus[GMBUS_NUM_PINS];
28c70f16 1810
f4c956ad
DV
1811 /** gmbus_mutex protects against concurrent usage of the single hw gmbus
1812 * controller on different i2c buses. */
1813 struct mutex gmbus_mutex;
1814
1815 /**
1816 * Base address of the gmbus and gpio block.
1817 */
1818 uint32_t gpio_mmio_base;
1819
b6fdd0f2
SS
1820 /* MMIO base address for MIPI regs */
1821 uint32_t mipi_mmio_base;
1822
443a389f
VS
1823 uint32_t psr_mmio_base;
1824
44cb734c
ID
1825 uint32_t pps_mmio_base;
1826
28c70f16
DV
1827 wait_queue_head_t gmbus_wait_queue;
1828
f4c956ad 1829 struct pci_dev *bridge_dev;
0ca5fa3a 1830 struct i915_gem_context *kernel_context;
3b3f1650 1831 struct intel_engine_cs *engine[I915_NUM_ENGINES];
51d545d0 1832 struct i915_vma *semaphore;
ddf07be7 1833 u32 next_seqno;
f4c956ad 1834
ba8286fa 1835 struct drm_dma_handle *status_page_dmah;
f4c956ad
DV
1836 struct resource mch_res;
1837
f4c956ad
DV
1838 /* protects the irq masks */
1839 spinlock_t irq_lock;
1840
84c33a64
SG
1841 /* protects the mmio flip data */
1842 spinlock_t mmio_flip_lock;
1843
f8b79e58
ID
1844 bool display_irqs_enabled;
1845
9ee32fea
DV
1846 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
1847 struct pm_qos_request pm_qos;
1848
a580516d
VS
1849 /* Sideband mailbox protection */
1850 struct mutex sb_lock;
f4c956ad
DV
1851
1852 /** Cached value of IMR to avoid reads in updating the bitfield */
abd58f01
BW
1853 union {
1854 u32 irq_mask;
1855 u32 de_irq_mask[I915_MAX_PIPES];
1856 };
f4c956ad 1857 u32 gt_irq_mask;
f4e9af4f
AG
1858 u32 pm_imr;
1859 u32 pm_ier;
a6706b45 1860 u32 pm_rps_events;
26705e20 1861 u32 pm_guc_events;
91d181dd 1862 u32 pipestat_irq_mask[I915_MAX_PIPES];
f4c956ad 1863
5fcece80 1864 struct i915_hotplug hotplug;
ab34a7e8 1865 struct intel_fbc fbc;
439d7ac0 1866 struct i915_drrs drrs;
f4c956ad 1867 struct intel_opregion opregion;
41aa3448 1868 struct intel_vbt_data vbt;
f4c956ad 1869
d9ceb816
JB
1870 bool preserve_bios_swizzle;
1871
f4c956ad
DV
1872 /* overlay */
1873 struct intel_overlay *overlay;
f4c956ad 1874
58c68779 1875 /* backlight registers and fields in struct intel_panel */
07f11d49 1876 struct mutex backlight_lock;
31ad8ec6 1877
f4c956ad 1878 /* LVDS info */
f4c956ad
DV
1879 bool no_aux_handshake;
1880
e39b999a
VS
1881 /* protects panel power sequencer state */
1882 struct mutex pps_mutex;
1883
f4c956ad 1884 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
f4c956ad
DV
1885 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
1886
1887 unsigned int fsb_freq, mem_freq, is_ddr3;
b2045352 1888 unsigned int skl_preferred_vco_freq;
1a617b77 1889 unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq;
adafdc6f 1890 unsigned int max_dotclk_freq;
e7dc33f3 1891 unsigned int rawclk_freq;
6bcda4f0 1892 unsigned int hpll_freq;
bfa7df01 1893 unsigned int czclk_freq;
f4c956ad 1894
63911d72 1895 struct {
709e05c3 1896 unsigned int vco, ref;
63911d72
VS
1897 } cdclk_pll;
1898
645416f5
DV
1899 /**
1900 * wq - Driver workqueue for GEM.
1901 *
1902 * NOTE: Work items scheduled here are not allowed to grab any modeset
1903 * locks, for otherwise the flushing done in the pageflip code will
1904 * result in deadlocks.
1905 */
f4c956ad
DV
1906 struct workqueue_struct *wq;
1907
1908 /* Display functions */
1909 struct drm_i915_display_funcs display;
1910
1911 /* PCH chipset type */
1912 enum intel_pch pch_type;
17a303ec 1913 unsigned short pch_id;
f4c956ad
DV
1914
1915 unsigned long quirks;
1916
b8efb17b
ZR
1917 enum modeset_restore modeset_restore;
1918 struct mutex modeset_restore_lock;
e2c8b870 1919 struct drm_atomic_state *modeset_restore_state;
73974893 1920 struct drm_modeset_acquire_ctx reset_ctx;
673a394b 1921
a7bbbd63 1922 struct list_head vm_list; /* Global list of all address spaces */
62106b4f 1923 struct i915_ggtt ggtt; /* VM representing the global address space */
5d4545ae 1924
4b5aed62 1925 struct i915_gem_mm mm;
ad46cb53
CW
1926 DECLARE_HASHTABLE(mm_structs, 7);
1927 struct mutex mm_lock;
8781342d 1928
5d1808ec
CW
1929 /* The hw wants to have a stable context identifier for the lifetime
1930 * of the context (for OA, PASID, faults, etc). This is limited
1931 * in execlists to 21 bits.
1932 */
1933 struct ida context_hw_ida;
1934#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
1935
8781342d
DV
1936 /* Kernel Modesetting */
1937
76c4ac04
DL
1938 struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
1939 struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
6b95a207
KH
1940 wait_queue_head_t pending_flip_queue;
1941
c4597872
DV
1942#ifdef CONFIG_DEBUG_FS
1943 struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
1944#endif
1945
565602d7 1946 /* dpll and cdclk state is protected by connection_mutex */
e72f9fbf
DV
1947 int num_shared_dpll;
1948 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
f9476a6c 1949 const struct intel_dpll_mgr *dpll_mgr;
565602d7 1950
fbf6d879
ML
1951 /*
1952 * dpll_lock serializes intel_{prepare,enable,disable}_shared_dpll.
1953 * Must be global rather than per dpll, because on some platforms
1954 * plls share registers.
1955 */
1956 struct mutex dpll_lock;
1957
565602d7
ML
1958 unsigned int active_crtcs;
1959 unsigned int min_pixclk[I915_MAX_PIPES];
1960
e4607fcf 1961 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
ee7b9f93 1962
7225342a 1963 struct i915_workarounds workarounds;
888b5995 1964
f99d7069
DV
1965 struct i915_frontbuffer_tracking fb_tracking;
1966
652c393a 1967 u16 orig_clock;
f97108d1 1968
c4804411 1969 bool mchbar_need_disable;
f97108d1 1970
a4da4fa4
DV
1971 struct intel_l3_parity l3_parity;
1972
59124506 1973 /* Cannot be determined by PCIID. You must always read a register. */
3accaf7e 1974 u32 edram_cap;
59124506 1975
c6a828d3 1976 /* gen6+ rps state */
c85aa885 1977 struct intel_gen6_power_mgmt rps;
c6a828d3 1978
20e4d407
DV
1979 /* ilk-only ips/rps state. Everything in here is protected by the global
1980 * mchdev_lock in intel_pm.c */
c85aa885 1981 struct intel_ilk_power_mgmt ips;
b5e50c3f 1982
83c00f55 1983 struct i915_power_domains power_domains;
a38911a3 1984
a031d709 1985 struct i915_psr psr;
3f51e471 1986
99584db3 1987 struct i915_gpu_error gpu_error;
ae681d96 1988
c9cddffc
JB
1989 struct drm_i915_gem_object *vlv_pctx;
1990
0695726e 1991#ifdef CONFIG_DRM_FBDEV_EMULATION
8be48d92
DA
1992 /* list of fbdev register on this device */
1993 struct intel_fbdev *fbdev;
82e3b8c1 1994 struct work_struct fbdev_suspend_work;
4520f53a 1995#endif
e953fd7b
CW
1996
1997 struct drm_property *broadcast_rgb_property;
3f43c48d 1998 struct drm_property *force_audio_property;
e3689190 1999
58fddc28 2000 /* hda/i915 audio component */
51e1d83c 2001 struct i915_audio_component *audio_component;
58fddc28 2002 bool audio_component_registered;
4a21ef7d
LY
2003 /**
2004 * av_mutex - mutex for audio/video sync
2005 *
2006 */
2007 struct mutex av_mutex;
58fddc28 2008
254f965c 2009 uint32_t hw_context_size;
a33afea5 2010 struct list_head context_list;
f4c956ad 2011
3e68320e 2012 u32 fdi_rx_config;
68d18ad7 2013
c231775c 2014 /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
70722468 2015 u32 chv_phy_control;
c231775c
VS
2016 /*
2017 * Shadows for CHV DPLL_MD regs to keep the state
2018 * checker somewhat working in the presence hardware
2019 * crappiness (can't read out DPLL_MD for pipes B & C).
2020 */
2021 u32 chv_dpll_md[I915_MAX_PIPES];
adc7f04b 2022 u32 bxt_phy_grc;
70722468 2023
842f1c8b 2024 u32 suspend_count;
bc87229f 2025 bool suspended_to_idle;
f4c956ad 2026 struct i915_suspend_saved_registers regfile;
ddeea5b0 2027 struct vlv_s0ix_state vlv_s0ix_state;
231f42a4 2028
656d1b89 2029 enum {
16dcdc4e
PZ
2030 I915_SAGV_UNKNOWN = 0,
2031 I915_SAGV_DISABLED,
2032 I915_SAGV_ENABLED,
2033 I915_SAGV_NOT_CONTROLLED
2034 } sagv_status;
656d1b89 2035
53615a5e
VS
2036 struct {
2037 /*
2038 * Raw watermark latency values:
2039 * in 0.1us units for WM0,
2040 * in 0.5us units for WM1+.
2041 */
2042 /* primary */
2043 uint16_t pri_latency[5];
2044 /* sprite */
2045 uint16_t spr_latency[5];
2046 /* cursor */
2047 uint16_t cur_latency[5];
2af30a5c
PB
2048 /*
2049 * Raw watermark memory latency values
2050 * for SKL for all 8 levels
2051 * in 1us units.
2052 */
2053 uint16_t skl_latency[8];
609cedef 2054
2d41c0b5
PB
2055 /*
2056 * The skl_wm_values structure is a bit too big for stack
2057 * allocation, so we keep the staging struct where we store
2058 * intermediate results here instead.
2059 */
2060 struct skl_wm_values skl_results;
2061
609cedef 2062 /* current hardware state */
2d41c0b5
PB
2063 union {
2064 struct ilk_wm_values hw;
2065 struct skl_wm_values skl_hw;
0018fda1 2066 struct vlv_wm_values vlv;
2d41c0b5 2067 };
58590c14
VS
2068
2069 uint8_t max_level;
ed4a6a7c
MR
2070
2071 /*
2072 * Should be held around atomic WM register writing; also
2073 * protects * intel_crtc->wm.active and
2074 * cstate->wm.need_postvbl_update.
2075 */
2076 struct mutex wm_mutex;
279e99d7
MR
2077
2078 /*
2079 * Set during HW readout of watermarks/DDB. Some platforms
2080 * need to know when we're still using BIOS-provided values
2081 * (which we don't fully trust).
2082 */
2083 bool distrust_bios_wm;
53615a5e
VS
2084 } wm;
2085
8a187455
PZ
2086 struct i915_runtime_pm pm;
2087
a83014d3
OM
2088 /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
2089 struct {
821ed7df 2090 void (*resume)(struct drm_i915_private *);
117897f4 2091 void (*cleanup_engine)(struct intel_engine_cs *engine);
67d97da3
CW
2092
2093 /**
2094 * Is the GPU currently considered idle, or busy executing
2095 * userspace requests? Whilst idle, we allow runtime power
2096 * management to power down the hardware and display clocks.
2097 * In order to reduce the effect on performance, there
2098 * is a slight delay before we do so.
2099 */
2100 unsigned int active_engines;
2101 bool awake;
2102
2103 /**
2104 * We leave the user IRQ off as much as possible,
2105 * but this means that requests will finish and never
2106 * be retired once the system goes idle. Set a timer to
2107 * fire periodically while the ring is running. When it
2108 * fires, go retire requests.
2109 */
2110 struct delayed_work retire_work;
2111
2112 /**
2113 * When we detect an idle GPU, we want to turn on
2114 * powersaving features. So once we see that there
2115 * are no more requests outstanding and no more
2116 * arrive within a small period of time, we fire
2117 * off the idle_work.
2118 */
2119 struct delayed_work idle_work;
de867c20
CW
2120
2121 ktime_t last_init_time;
a83014d3
OM
2122 } gt;
2123
3be60de9
VS
2124 /* perform PHY state sanity checks? */
2125 bool chv_phy_assert[2];
2126
f9318941
PD
2127 /* Used to save the pipe-to-encoder mapping for audio */
2128 struct intel_encoder *av_enc_map[I915_MAX_PIPES];
0bdf5a05 2129
bdf1e7e3
DV
2130 /*
2131 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
2132 * will be rejected. Instead look for a better place.
2133 */
77fec556 2134};
1da177e4 2135
2c1792a1
CW
2136static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
2137{
091387c1 2138 return container_of(dev, struct drm_i915_private, drm);
2c1792a1
CW
2139}
2140
c49d13ee 2141static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
888d0d42 2142{
c49d13ee 2143 return to_i915(dev_get_drvdata(kdev));
888d0d42
ID
2144}
2145
33a732f4
AD
2146static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
2147{
2148 return container_of(guc, struct drm_i915_private, guc);
2149}
2150
b4ac5afc 2151/* Simple iterator over all initialised engines */
3b3f1650
AG
2152#define for_each_engine(engine__, dev_priv__, id__) \
2153 for ((id__) = 0; \
2154 (id__) < I915_NUM_ENGINES; \
2155 (id__)++) \
2156 for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
c3232b18 2157
bafb0fce
CW
2158#define __mask_next_bit(mask) ({ \
2159 int __idx = ffs(mask) - 1; \
2160 mask &= ~BIT(__idx); \
2161 __idx; \
2162})
2163
c3232b18 2164/* Iterator over subset of engines selected by mask */
bafb0fce
CW
2165#define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
2166 for (tmp__ = mask__ & INTEL_INFO(dev_priv__)->ring_mask; \
3b3f1650 2167 tmp__ ? (engine__ = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : 0; )
ee4b6faf 2168
b1d7e4b4
WF
2169enum hdmi_force_audio {
2170 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
2171 HDMI_AUDIO_OFF, /* force turn off HDMI audio */
2172 HDMI_AUDIO_AUTO, /* trust EDID */
2173 HDMI_AUDIO_ON, /* force turn on HDMI audio */
2174};
2175
190d6cd5 2176#define I915_GTT_OFFSET_NONE ((u32)-1)
ed2f3452 2177
37e680a1 2178struct drm_i915_gem_object_ops {
de472664
CW
2179 unsigned int flags;
2180#define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1
2181
37e680a1
CW
2182 /* Interface between the GEM object and its backing storage.
2183 * get_pages() is called once prior to the use of the associated set
2184 * of pages before to binding them into the GTT, and put_pages() is
2185 * called after we no longer need them. As we expect there to be
2186 * associated cost with migrating pages between the backing storage
2187 * and making them available for the GPU (e.g. clflush), we may hold
2188 * onto the pages after they are no longer referenced by the GPU
2189 * in case they may be used again shortly (for example migrating the
2190 * pages to a different memory domain within the GTT). put_pages()
2191 * will therefore most likely be called when the object itself is
2192 * being released or under memory pressure (where we attempt to
2193 * reap pages for the shrinker).
2194 */
03ac84f1
CW
2195 struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
2196 void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
de472664 2197
5cc9ed4b
CW
2198 int (*dmabuf_export)(struct drm_i915_gem_object *);
2199 void (*release)(struct drm_i915_gem_object *);
37e680a1
CW
2200};
2201
a071fa00
DV
2202/*
2203 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
d1b9d039 2204 * considered to be the frontbuffer for the given plane interface-wise. This
a071fa00
DV
2205 * doesn't mean that the hw necessarily already scans it out, but that any
2206 * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
2207 *
2208 * We have one bit per pipe and per scanout plane type.
2209 */
d1b9d039
SAK
2210#define INTEL_MAX_SPRITE_BITS_PER_PIPE 5
2211#define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
a071fa00
DV
2212#define INTEL_FRONTBUFFER_PRIMARY(pipe) \
2213 (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
2214#define INTEL_FRONTBUFFER_CURSOR(pipe) \
d1b9d039
SAK
2215 (1 << (1 + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
2216#define INTEL_FRONTBUFFER_SPRITE(pipe, plane) \
2217 (1 << (2 + plane + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
a071fa00 2218#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
d1b9d039 2219 (1 << (2 + INTEL_MAX_SPRITE_BITS_PER_PIPE + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
cc36513c 2220#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
d1b9d039 2221 (0xff << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
a071fa00 2222
673a394b 2223struct drm_i915_gem_object {
c397b908 2224 struct drm_gem_object base;
673a394b 2225
37e680a1
CW
2226 const struct drm_i915_gem_object_ops *ops;
2227
2f633156
BW
2228 /** List of VMAs backed by this object */
2229 struct list_head vma_list;
2230
c1ad11fc
CW
2231 /** Stolen memory for this object, instead of being backed by shmem. */
2232 struct drm_mm_node *stolen;
35c20a60 2233 struct list_head global_list;
fbbd37b3
CW
2234 union {
2235 struct rcu_head rcu;
2236 struct llist_node freed;
2237 };
673a394b 2238
275f039d
CW
2239 /**
2240 * Whether the object is currently in the GGTT mmap.
2241 */
2242 struct list_head userfault_link;
2243
b25cb2f8
BW
2244 /** Used in execbuf to temporarily hold a ref */
2245 struct list_head obj_exec_link;
673a394b 2246
8d9d5744 2247 struct list_head batch_pool_link;
493018dc 2248
573adb39 2249 unsigned long flags;
673a394b 2250
f8a7fde4
CW
2251 /**
2252 * Have we taken a reference for the object for incomplete GPU
2253 * activity?
2254 */
d07f0e59 2255#define I915_BO_ACTIVE_REF 0
f8a7fde4 2256
24f3a8cf
AG
2257 /*
2258 * Is the object to be mapped as read-only to the GPU
2259 * Only honoured if hardware has relevant pte bit
2260 */
2261 unsigned long gt_ro:1;
651d794f 2262 unsigned int cache_level:3;
0f71979a 2263 unsigned int cache_dirty:1;
93dfb40c 2264
faf5bf0a 2265 atomic_t frontbuffer_bits;
50349247 2266 unsigned int frontbuffer_ggtt_origin; /* write once */
a071fa00 2267
9ad36761 2268 /** Current tiling stride for the object, if it's tiled. */
3e510a8e
CW
2269 unsigned int tiling_and_stride;
2270#define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
2271#define TILING_MASK (FENCE_MINIMUM_STRIDE-1)
2272#define STRIDE_MASK (~TILING_MASK)
9ad36761 2273
15717de2
CW
2274 /** Count of VMA actually bound by this object */
2275 unsigned int bind_count;
d07f0e59 2276 unsigned int active_count;
8a0c39b1
TU
2277 unsigned int pin_display;
2278
a4f5ea64 2279 struct {
1233e2db
CW
2280 struct mutex lock; /* protects the pages and their use */
2281 atomic_t pages_pin_count;
a4f5ea64
CW
2282
2283 struct sg_table *pages;
2284 void *mapping;
96d77634 2285
a4f5ea64
CW
2286 struct i915_gem_object_page_iter {
2287 struct scatterlist *sg_pos;
2288 unsigned int sg_idx; /* in pages, but 32bit eek! */
2289
2290 struct radix_tree_root radix;
2291 struct mutex lock; /* protects this cache */
2292 } get_page;
2293
2294 /**
2295 * Advice: are the backing pages purgeable?
2296 */
2297 unsigned int madv:2;
2298
2299 /**
2300 * This is set if the object has been written to since the
2301 * pages were last acquired.
2302 */
2303 bool dirty:1;
2304 } mm;
9a70cc2a 2305
b4716185
CW
2306 /** Breadcrumb of last rendering to the buffer.
2307 * There can only be one writer, but we allow for multiple readers.
2308 * If there is a writer that necessarily implies that all other
2309 * read requests are complete - but we may only be lazily clearing
2310 * the read requests. A read request is naturally the most recent
2311 * request on a ring, so we may have two different write and read
2312 * requests on one ring where the write request is older than the
2313 * read request. This allows for the CPU to read from an active
2314 * buffer by only waiting for the write to complete.
381f371b 2315 */
d07f0e59 2316 struct reservation_object *resv;
673a394b 2317
80075d49
DV
2318 /** References from framebuffers, locks out tiling changes. */
2319 unsigned long framebuffer_references;
2320
280b713b 2321 /** Record of address bit 17 of each page at last unbind. */
d312ec25 2322 unsigned long *bit_17;
280b713b 2323
5f12b80a
CW
2324 struct i915_gem_userptr {
2325 uintptr_t ptr;
2326 unsigned read_only :1;
5cc9ed4b 2327
5f12b80a
CW
2328 struct i915_mm_struct *mm;
2329 struct i915_mmu_object *mmu_object;
2330 struct work_struct *work;
2331 } userptr;
2332
2333 /** for phys allocated objects */
2334 struct drm_dma_handle *phys_handle;
d07f0e59
CW
2335
2336 struct reservation_object __builtin_resv;
5cc9ed4b 2337};
03ac0642
CW
2338
2339static inline struct drm_i915_gem_object *
2340to_intel_bo(struct drm_gem_object *gem)
2341{
2342 /* Assert that to_intel_bo(NULL) == NULL */
2343 BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
2344
2345 return container_of(gem, struct drm_i915_gem_object, base);
2346}
2347
fbbd37b3
CW
2348/**
2349 * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
2350 * @filp: DRM file private date
2351 * @handle: userspace handle
2352 *
2353 * Returns:
2354 *
2355 * A pointer to the object named by the handle if such exists on @filp, NULL
2356 * otherwise. This object is only valid whilst under the RCU read lock, and
2357 * note carefully the object may be in the process of being destroyed.
2358 */
2359static inline struct drm_i915_gem_object *
2360i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
2361{
2362#ifdef CONFIG_LOCKDEP
2363 WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
2364#endif
2365 return idr_find(&file->object_idr, handle);
2366}
2367
03ac0642
CW
2368static inline struct drm_i915_gem_object *
2369i915_gem_object_lookup(struct drm_file *file, u32 handle)
2370{
fbbd37b3
CW
2371 struct drm_i915_gem_object *obj;
2372
2373 rcu_read_lock();
2374 obj = i915_gem_object_lookup_rcu(file, handle);
2375 if (obj && !kref_get_unless_zero(&obj->base.refcount))
2376 obj = NULL;
2377 rcu_read_unlock();
2378
2379 return obj;
03ac0642
CW
2380}
2381
2382__deprecated
2383extern struct drm_gem_object *
2384drm_gem_object_lookup(struct drm_file *file, u32 handle);
23010e43 2385
25dc556a
CW
2386__attribute__((nonnull))
2387static inline struct drm_i915_gem_object *
2388i915_gem_object_get(struct drm_i915_gem_object *obj)
2389{
2390 drm_gem_object_reference(&obj->base);
2391 return obj;
2392}
2393
2394__deprecated
2395extern void drm_gem_object_reference(struct drm_gem_object *);
2396
f8c417cd
CW
2397__attribute__((nonnull))
2398static inline void
2399i915_gem_object_put(struct drm_i915_gem_object *obj)
2400{
f0cd5182 2401 __drm_gem_object_unreference(&obj->base);
f8c417cd
CW
2402}
2403
2404__deprecated
2405extern void drm_gem_object_unreference(struct drm_gem_object *);
2406
34911fd3
CW
2407__deprecated
2408extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
2409
03ac84f1
CW
2410static inline bool
2411i915_gem_object_is_dead(const struct drm_i915_gem_object *obj)
2412{
2413 return atomic_read(&obj->base.refcount.refcount) == 0;
2414}
2415
b9bcd14a
CW
2416static inline bool
2417i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
2418{
2419 return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
2420}
2421
573adb39
CW
2422static inline bool
2423i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
2424{
d07f0e59 2425 return obj->active_count;
573adb39
CW
2426}
2427
f8a7fde4
CW
2428static inline bool
2429i915_gem_object_has_active_reference(const struct drm_i915_gem_object *obj)
2430{
2431 return test_bit(I915_BO_ACTIVE_REF, &obj->flags);
2432}
2433
2434static inline void
2435i915_gem_object_set_active_reference(struct drm_i915_gem_object *obj)
2436{
2437 lockdep_assert_held(&obj->base.dev->struct_mutex);
2438 __set_bit(I915_BO_ACTIVE_REF, &obj->flags);
2439}
2440
2441static inline void
2442i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj)
2443{
2444 lockdep_assert_held(&obj->base.dev->struct_mutex);
2445 __clear_bit(I915_BO_ACTIVE_REF, &obj->flags);
2446}
2447
2448void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj);
2449
3e510a8e
CW
2450static inline unsigned int
2451i915_gem_object_get_tiling(struct drm_i915_gem_object *obj)
2452{
2453 return obj->tiling_and_stride & TILING_MASK;
2454}
2455
2456static inline bool
2457i915_gem_object_is_tiled(struct drm_i915_gem_object *obj)
2458{
2459 return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
2460}
2461
2462static inline unsigned int
2463i915_gem_object_get_stride(struct drm_i915_gem_object *obj)
2464{
2465 return obj->tiling_and_stride & STRIDE_MASK;
2466}
2467
d07f0e59
CW
2468static inline struct intel_engine_cs *
2469i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
2470{
2471 struct intel_engine_cs *engine = NULL;
2472 struct dma_fence *fence;
2473
2474 rcu_read_lock();
2475 fence = reservation_object_get_excl_rcu(obj->resv);
2476 rcu_read_unlock();
2477
2478 if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
2479 engine = to_request(fence)->engine;
2480 dma_fence_put(fence);
2481
2482 return engine;
2483}
2484
624192cf
CW
2485static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
2486{
2487 i915_gem_object_get(vma->obj);
2488 return vma;
2489}
2490
2491static inline void i915_vma_put(struct i915_vma *vma)
2492{
624192cf
CW
2493 i915_gem_object_put(vma->obj);
2494}
2495
85d1225e
DG
2496/*
2497 * Optimised SGL iterator for GEM objects
2498 */
2499static __always_inline struct sgt_iter {
2500 struct scatterlist *sgp;
2501 union {
2502 unsigned long pfn;
2503 dma_addr_t dma;
2504 };
2505 unsigned int curr;
2506 unsigned int max;
2507} __sgt_iter(struct scatterlist *sgl, bool dma) {
2508 struct sgt_iter s = { .sgp = sgl };
2509
2510 if (s.sgp) {
2511 s.max = s.curr = s.sgp->offset;
2512 s.max += s.sgp->length;
2513 if (dma)
2514 s.dma = sg_dma_address(s.sgp);
2515 else
2516 s.pfn = page_to_pfn(sg_page(s.sgp));
2517 }
2518
2519 return s;
2520}
2521
96d77634
CW
2522static inline struct scatterlist *____sg_next(struct scatterlist *sg)
2523{
2524 ++sg;
2525 if (unlikely(sg_is_chain(sg)))
2526 sg = sg_chain_ptr(sg);
2527 return sg;
2528}
2529
63d15326
DG
2530/**
2531 * __sg_next - return the next scatterlist entry in a list
2532 * @sg: The current sg entry
2533 *
2534 * Description:
2535 * If the entry is the last, return NULL; otherwise, step to the next
2536 * element in the array (@sg@+1). If that's a chain pointer, follow it;
2537 * otherwise just return the pointer to the current element.
2538 **/
2539static inline struct scatterlist *__sg_next(struct scatterlist *sg)
2540{
2541#ifdef CONFIG_DEBUG_SG
2542 BUG_ON(sg->sg_magic != SG_MAGIC);
2543#endif
96d77634 2544 return sg_is_last(sg) ? NULL : ____sg_next(sg);
63d15326
DG
2545}
2546
85d1225e
DG
2547/**
2548 * for_each_sgt_dma - iterate over the DMA addresses of the given sg_table
2549 * @__dmap: DMA address (output)
2550 * @__iter: 'struct sgt_iter' (iterator state, internal)
2551 * @__sgt: sg_table to iterate over (input)
2552 */
2553#define for_each_sgt_dma(__dmap, __iter, __sgt) \
2554 for ((__iter) = __sgt_iter((__sgt)->sgl, true); \
2555 ((__dmap) = (__iter).dma + (__iter).curr); \
2556 (((__iter).curr += PAGE_SIZE) < (__iter).max) || \
63d15326 2557 ((__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0))
85d1225e
DG
2558
2559/**
2560 * for_each_sgt_page - iterate over the pages of the given sg_table
2561 * @__pp: page pointer (output)
2562 * @__iter: 'struct sgt_iter' (iterator state, internal)
2563 * @__sgt: sg_table to iterate over (input)
2564 */
2565#define for_each_sgt_page(__pp, __iter, __sgt) \
2566 for ((__iter) = __sgt_iter((__sgt)->sgl, false); \
2567 ((__pp) = (__iter).pfn == 0 ? NULL : \
2568 pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \
2569 (((__iter).curr += PAGE_SIZE) < (__iter).max) || \
63d15326 2570 ((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0))
a071fa00 2571
351e3db2
BV
2572/*
2573 * A command that requires special handling by the command parser.
2574 */
2575struct drm_i915_cmd_descriptor {
2576 /*
2577 * Flags describing how the command parser processes the command.
2578 *
2579 * CMD_DESC_FIXED: The command has a fixed length if this is set,
2580 * a length mask if not set
2581 * CMD_DESC_SKIP: The command is allowed but does not follow the
2582 * standard length encoding for the opcode range in
2583 * which it falls
2584 * CMD_DESC_REJECT: The command is never allowed
2585 * CMD_DESC_REGISTER: The command should be checked against the
2586 * register whitelist for the appropriate ring
2587 * CMD_DESC_MASTER: The command is allowed if the submitting process
2588 * is the DRM master
2589 */
2590 u32 flags;
2591#define CMD_DESC_FIXED (1<<0)
2592#define CMD_DESC_SKIP (1<<1)
2593#define CMD_DESC_REJECT (1<<2)
2594#define CMD_DESC_REGISTER (1<<3)
2595#define CMD_DESC_BITMASK (1<<4)
2596#define CMD_DESC_MASTER (1<<5)
2597
2598 /*
2599 * The command's unique identification bits and the bitmask to get them.
2600 * This isn't strictly the opcode field as defined in the spec and may
2601 * also include type, subtype, and/or subop fields.
2602 */
2603 struct {
2604 u32 value;
2605 u32 mask;
2606 } cmd;
2607
2608 /*
2609 * The command's length. The command is either fixed length (i.e. does
2610 * not include a length field) or has a length field mask. The flag
2611 * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has
2612 * a length mask. All command entries in a command table must include
2613 * length information.
2614 */
2615 union {
2616 u32 fixed;
2617 u32 mask;
2618 } length;
2619
2620 /*
2621 * Describes where to find a register address in the command to check
2622 * against the ring's register whitelist. Only valid if flags has the
2623 * CMD_DESC_REGISTER bit set.
6a65c5b9
FJ
2624 *
2625 * A non-zero step value implies that the command may access multiple
2626 * registers in sequence (e.g. LRI), in that case step gives the
2627 * distance in dwords between individual offset fields.
351e3db2
BV
2628 */
2629 struct {
2630 u32 offset;
2631 u32 mask;
6a65c5b9 2632 u32 step;
351e3db2
BV
2633 } reg;
2634
2635#define MAX_CMD_DESC_BITMASKS 3
2636 /*
2637 * Describes command checks where a particular dword is masked and
2638 * compared against an expected value. If the command does not match
2639 * the expected value, the parser rejects it. Only valid if flags has
2640 * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero
2641 * are valid.
d4d48035
BV
2642 *
2643 * If the check specifies a non-zero condition_mask then the parser
2644 * only performs the check when the bits specified by condition_mask
2645 * are non-zero.
351e3db2
BV
2646 */
2647 struct {
2648 u32 offset;
2649 u32 mask;
2650 u32 expected;
d4d48035
BV
2651 u32 condition_offset;
2652 u32 condition_mask;
351e3db2
BV
2653 } bits[MAX_CMD_DESC_BITMASKS];
2654};
2655
2656/*
2657 * A table of commands requiring special handling by the command parser.
2658 *
33a051a5
CW
2659 * Each engine has an array of tables. Each table consists of an array of
2660 * command descriptors, which must be sorted with command opcodes in
2661 * ascending order.
351e3db2
BV
2662 */
2663struct drm_i915_cmd_table {
2664 const struct drm_i915_cmd_descriptor *table;
2665 int count;
2666};
2667
dbbe9127 2668/* Note that the (struct drm_i915_private *) cast is just to shut up gcc. */
7312e2dd
CW
2669#define __I915__(p) ({ \
2670 struct drm_i915_private *__p; \
2671 if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_private)) \
2672 __p = (struct drm_i915_private *)p; \
2673 else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \
2674 __p = to_i915((struct drm_device *)p); \
2675 else \
2676 BUILD_BUG(); \
2677 __p; \
2678})
351c3b53 2679#define INTEL_INFO(p) (&__I915__(p)->info)
50a0bc90 2680
55b8f2a7 2681#define INTEL_GEN(dev_priv) ((dev_priv)->info.gen)
50a0bc90 2682#define INTEL_DEVID(dev_priv) ((dev_priv)->info.device_id)
cae5852d 2683
e87a005d 2684#define REVID_FOREVER 0xff
091387c1 2685#define INTEL_REVID(p) (__I915__(p)->drm.pdev->revision)
ac657f64
TU
2686
2687#define GEN_FOREVER (0)
2688/*
2689 * Returns true if Gen is in inclusive range [Start, End].
2690 *
2691 * Use GEN_FOREVER for unbound start and or end.
2692 */
c1812bdb 2693#define IS_GEN(dev_priv, s, e) ({ \
ac657f64
TU
2694 unsigned int __s = (s), __e = (e); \
2695 BUILD_BUG_ON(!__builtin_constant_p(s)); \
2696 BUILD_BUG_ON(!__builtin_constant_p(e)); \
2697 if ((__s) != GEN_FOREVER) \
2698 __s = (s) - 1; \
2699 if ((__e) == GEN_FOREVER) \
2700 __e = BITS_PER_LONG - 1; \
2701 else \
2702 __e = (e) - 1; \
c1812bdb 2703 !!((dev_priv)->info.gen_mask & GENMASK((__e), (__s))); \
ac657f64
TU
2704})
2705
e87a005d
JN
2706/*
2707 * Return true if revision is in range [since,until] inclusive.
2708 *
2709 * Use 0 for open-ended since, and REVID_FOREVER for open-ended until.
2710 */
2711#define IS_REVID(p, since, until) \
2712 (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
2713
50a0bc90
TU
2714#define IS_I830(dev_priv) (INTEL_DEVID(dev_priv) == 0x3577)
2715#define IS_845G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2562)
cae5852d 2716#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
50a0bc90 2717#define IS_I865G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2572)
cae5852d 2718#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
50a0bc90
TU
2719#define IS_I915GM(dev_priv) (INTEL_DEVID(dev_priv) == 0x2592)
2720#define IS_I945G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2772)
cae5852d
ZN
2721#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
2722#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
2723#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
50a0bc90 2724#define IS_GM45(dev_priv) (INTEL_DEVID(dev_priv) == 0x2A42)
9beb5fea 2725#define IS_G4X(dev_priv) ((dev_priv)->info.is_g4x)
50a0bc90
TU
2726#define IS_PINEVIEW_G(dev_priv) (INTEL_DEVID(dev_priv) == 0xa001)
2727#define IS_PINEVIEW_M(dev_priv) (INTEL_DEVID(dev_priv) == 0xa011)
cae5852d
ZN
2728#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
2729#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
50a0bc90 2730#define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046)
fd6b8f43 2731#define IS_IVYBRIDGE(dev_priv) ((dev_priv)->info.is_ivybridge)
50a0bc90
TU
2732#define IS_IVB_GT1(dev_priv) (INTEL_DEVID(dev_priv) == 0x0156 || \
2733 INTEL_DEVID(dev_priv) == 0x0152 || \
2734 INTEL_DEVID(dev_priv) == 0x015a)
11a914c2 2735#define IS_VALLEYVIEW(dev_priv) ((dev_priv)->info.is_valleyview)
920a14b2 2736#define IS_CHERRYVIEW(dev_priv) ((dev_priv)->info.is_cherryview)
772c2a51 2737#define IS_HASWELL(dev_priv) ((dev_priv)->info.is_haswell)
8652744b 2738#define IS_BROADWELL(dev_priv) ((dev_priv)->info.is_broadwell)
d9486e65 2739#define IS_SKYLAKE(dev_priv) ((dev_priv)->info.is_skylake)
e2d214ae 2740#define IS_BROXTON(dev_priv) ((dev_priv)->info.is_broxton)
0853723b 2741#define IS_KABYLAKE(dev_priv) ((dev_priv)->info.is_kabylake)
cae5852d 2742#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
50a0bc90
TU
2743#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
2744 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
2745#define IS_BDW_ULT(dev_priv) (IS_BROADWELL(dev_priv) && \
2746 ((INTEL_DEVID(dev_priv) & 0xf) == 0x6 || \
2747 (INTEL_DEVID(dev_priv) & 0xf) == 0xb || \
2748 (INTEL_DEVID(dev_priv) & 0xf) == 0xe))
ebb72aad 2749/* ULX machines are also considered ULT. */
50a0bc90
TU
2750#define IS_BDW_ULX(dev_priv) (IS_BROADWELL(dev_priv) && \
2751 (INTEL_DEVID(dev_priv) & 0xf) == 0xe)
2752#define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \
2753 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
2754#define IS_HSW_ULT(dev_priv) (IS_HASWELL(dev_priv) && \
2755 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00)
2756#define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \
2757 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
9bbfd20a 2758/* ULX machines are also considered ULT. */
50a0bc90
TU
2759#define IS_HSW_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x0A0E || \
2760 INTEL_DEVID(dev_priv) == 0x0A1E)
2761#define IS_SKL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x1906 || \
2762 INTEL_DEVID(dev_priv) == 0x1913 || \
2763 INTEL_DEVID(dev_priv) == 0x1916 || \
2764 INTEL_DEVID(dev_priv) == 0x1921 || \
2765 INTEL_DEVID(dev_priv) == 0x1926)
2766#define IS_SKL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x190E || \
2767 INTEL_DEVID(dev_priv) == 0x1915 || \
2768 INTEL_DEVID(dev_priv) == 0x191E)
2769#define IS_KBL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x5906 || \
2770 INTEL_DEVID(dev_priv) == 0x5913 || \
2771 INTEL_DEVID(dev_priv) == 0x5916 || \
2772 INTEL_DEVID(dev_priv) == 0x5921 || \
2773 INTEL_DEVID(dev_priv) == 0x5926)
2774#define IS_KBL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x590E || \
2775 INTEL_DEVID(dev_priv) == 0x5915 || \
2776 INTEL_DEVID(dev_priv) == 0x591E)
2777#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
2778 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
2779#define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \
2780 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0030)
7a58bad0 2781
b833d685 2782#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
cae5852d 2783
ef712bb4
JN
2784#define SKL_REVID_A0 0x0
2785#define SKL_REVID_B0 0x1
2786#define SKL_REVID_C0 0x2
2787#define SKL_REVID_D0 0x3
2788#define SKL_REVID_E0 0x4
2789#define SKL_REVID_F0 0x5
4ba9c1f7
MK
2790#define SKL_REVID_G0 0x6
2791#define SKL_REVID_H0 0x7
ef712bb4 2792
e87a005d
JN
2793#define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until))
2794
ef712bb4 2795#define BXT_REVID_A0 0x0
fffda3f4 2796#define BXT_REVID_A1 0x1
ef712bb4
JN
2797#define BXT_REVID_B0 0x3
2798#define BXT_REVID_C0 0x9
6c74c87f 2799
e2d214ae
TU
2800#define IS_BXT_REVID(dev_priv, since, until) \
2801 (IS_BROXTON(dev_priv) && IS_REVID(dev_priv, since, until))
e87a005d 2802
c033a37c
MK
2803#define KBL_REVID_A0 0x0
2804#define KBL_REVID_B0 0x1
fe905819
MK
2805#define KBL_REVID_C0 0x2
2806#define KBL_REVID_D0 0x3
2807#define KBL_REVID_E0 0x4
c033a37c 2808
0853723b
TU
2809#define IS_KBL_REVID(dev_priv, since, until) \
2810 (IS_KABYLAKE(dev_priv) && IS_REVID(dev_priv, since, until))
c033a37c 2811
85436696
JB
2812/*
2813 * The genX designation typically refers to the render engine, so render
2814 * capability related checks should use IS_GEN, while display and other checks
2815 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
2816 * chips, etc.).
2817 */
5db94019
TU
2818#define IS_GEN2(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(1)))
2819#define IS_GEN3(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(2)))
2820#define IS_GEN4(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(3)))
2821#define IS_GEN5(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(4)))
2822#define IS_GEN6(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(5)))
2823#define IS_GEN7(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(6)))
2824#define IS_GEN8(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(7)))
2825#define IS_GEN9(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(8)))
cae5852d 2826
a19d6ff2
TU
2827#define ENGINE_MASK(id) BIT(id)
2828#define RENDER_RING ENGINE_MASK(RCS)
2829#define BSD_RING ENGINE_MASK(VCS)
2830#define BLT_RING ENGINE_MASK(BCS)
2831#define VEBOX_RING ENGINE_MASK(VECS)
2832#define BSD2_RING ENGINE_MASK(VCS2)
2833#define ALL_ENGINES (~0)
2834
2835#define HAS_ENGINE(dev_priv, id) \
af1346a0 2836 (!!(INTEL_INFO(dev_priv)->ring_mask & ENGINE_MASK(id)))
a19d6ff2
TU
2837
2838#define HAS_BSD(dev_priv) HAS_ENGINE(dev_priv, VCS)
2839#define HAS_BSD2(dev_priv) HAS_ENGINE(dev_priv, VCS2)
2840#define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS)
2841#define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS)
2842
63c42e56 2843#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
ca377809 2844#define HAS_SNOOP(dev) (INTEL_INFO(dev)->has_snoop)
af1346a0 2845#define HAS_EDRAM(dev) (!!(__I915__(dev)->edram_cap & EDRAM_ENABLED))
8652744b
TU
2846#define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \
2847 IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
3177659a 2848#define HWS_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->hws_needs_physical)
cae5852d 2849
e1a52536 2850#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->has_hw_contexts)
4586f1d0 2851#define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->has_logical_ring_contexts)
692ef70c 2852#define USES_PPGTT(dev) (i915.enable_ppgtt)
81ba8aef
MT
2853#define USES_FULL_PPGTT(dev) (i915.enable_ppgtt >= 2)
2854#define USES_FULL_48BIT_PPGTT(dev) (i915.enable_ppgtt == 3)
1d2a314c 2855
05394f39 2856#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
cae5852d
ZN
2857#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
2858
b45305fc 2859/* Early gen2 have a totally busted CS tlb and require pinned batches. */
50a0bc90 2860#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_845G(dev_priv))
06e668ac
MK
2861
2862/* WaRsDisableCoarsePowerGating:skl,bxt */
61251512
TU
2863#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
2864 (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) || \
2865 IS_SKL_GT3(dev_priv) || \
2866 IS_SKL_GT4(dev_priv))
185c66e5 2867
4e6b788c
DV
2868/*
2869 * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
2870 * even when in MSI mode. This results in spurious interrupt warnings if the
2871 * legacy irq no. is shared with another device. The kernel then disables that
2872 * interrupt source and so prevents the other device from working properly.
2873 */
2874#define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
b355f109 2875#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->has_gmbus_irq)
b45305fc 2876
cae5852d
ZN
2877/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
2878 * rows, which changed the alignment requirements and fence programming.
2879 */
50a0bc90
TU
2880#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \
2881 !(IS_I915G(dev_priv) || \
2882 IS_I915GM(dev_priv)))
cae5852d
ZN
2883#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
2884#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
cae5852d
ZN
2885
2886#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
2887#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
3a77c4c4 2888#define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
cae5852d 2889
50a0bc90 2890#define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
f5adf94e 2891
1d3fe53b 2892#define HAS_DP_MST(dev) (INTEL_INFO(dev)->has_dp_mst)
0c9b3715 2893
4f8036a2 2894#define HAS_DDI(dev_priv) ((dev_priv)->info.has_ddi)
30568c45 2895#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
6e3b84d8 2896#define HAS_PSR(dev) (INTEL_INFO(dev)->has_psr)
86f3624b 2897#define HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
33b5bf82 2898#define HAS_RC6p(dev) (INTEL_INFO(dev)->has_rc6p)
affa9354 2899
3bacde19 2900#define HAS_CSR(dev) (INTEL_INFO(dev)->has_csr)
eb805623 2901
6772ffe0 2902#define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm)
1a3d1898
DG
2903/*
2904 * For now, anything with a GuC requires uCode loading, and then supports
2905 * command submission once loaded. But these are logically independent
2906 * properties, so we have separate macros to test them.
2907 */
3d810fbe 2908#define HAS_GUC(dev) (INTEL_INFO(dev)->has_guc)
1a3d1898
DG
2909#define HAS_GUC_UCODE(dev) (HAS_GUC(dev))
2910#define HAS_GUC_SCHED(dev) (HAS_GUC(dev))
33a732f4 2911
53233f08 2912#define HAS_RESOURCE_STREAMER(dev) (INTEL_INFO(dev)->has_resource_streamer)
a9ed33ca 2913
33e141ed 2914#define HAS_POOLED_EU(dev) (INTEL_INFO(dev)->has_pooled_eu)
2915
17a303ec
PZ
2916#define INTEL_PCH_DEVICE_ID_MASK 0xff00
2917#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
2918#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
2919#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
2920#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
2921#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
e7e7ea20
S
2922#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
2923#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
22dea0be 2924#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA200
30c964a6 2925#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
1844a66b 2926#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
39bfcd52 2927#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
17a303ec 2928
6e266956
TU
2929#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
2930#define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP)
2931#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
2932#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
4f8036a2
TU
2933#define HAS_PCH_LPT_LP(dev_priv) \
2934 ((dev_priv)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
2935#define HAS_PCH_LPT_H(dev_priv) \
2936 ((dev_priv)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE)
6e266956
TU
2937#define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT)
2938#define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX)
2939#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
2940#define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
cae5852d 2941
49cff963 2942#define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.has_gmch_display)
5fafe292 2943
6389dd83
SS
2944#define HAS_LSPCON(dev_priv) (IS_GEN9(dev_priv))
2945
040d2baa 2946/* DPF == dynamic parity feature */
3c9192bc 2947#define HAS_L3_DPF(dev_priv) ((dev_priv)->info.has_l3_dpf)
50a0bc90
TU
2948#define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
2949 2 : HAS_L3_DPF(dev_priv))
e1ef7cc2 2950
c8735b0c 2951#define GT_FREQUENCY_MULTIPLIER 50
de43ae9d 2952#define GEN9_FREQ_SCALER 3
c8735b0c 2953
05394f39
CW
2954#include "i915_trace.h"
2955
48f112fe
CW
2956static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
2957{
2958#ifdef CONFIG_INTEL_IOMMU
2959 if (INTEL_GEN(dev_priv) >= 6 && intel_iommu_gfx_mapped)
2960 return true;
2961#endif
2962 return false;
2963}
2964
1751fcf9
ML
2965extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
2966extern int i915_resume_switcheroo(struct drm_device *dev);
7c1c2871 2967
c033666a 2968int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
351c3b53 2969 int enable_ppgtt);
0e4ca100 2970
39df9190
CW
2971bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value);
2972
0673ad47 2973/* i915_drv.c */
d15d7538
ID
2974void __printf(3, 4)
2975__i915_printk(struct drm_i915_private *dev_priv, const char *level,
2976 const char *fmt, ...);
2977
2978#define i915_report_error(dev_priv, fmt, ...) \
2979 __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__)
2980
c43b5634 2981#ifdef CONFIG_COMPAT
0d6aa60b
DA
2982extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
2983 unsigned long arg);
c43b5634 2984#endif
efab0698
JN
2985extern const struct dev_pm_ops i915_pm_ops;
2986
2987extern int i915_driver_load(struct pci_dev *pdev,
2988 const struct pci_device_id *ent);
2989extern void i915_driver_unload(struct drm_device *dev);
dc97997a
CW
2990extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
2991extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
780f262a 2992extern void i915_reset(struct drm_i915_private *dev_priv);
6b332fa2 2993extern int intel_guc_reset(struct drm_i915_private *dev_priv);
fc0768ce 2994extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
7648fa99
JB
2995extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
2996extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
2997extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
2998extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
650ad970 2999int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
7648fa99 3000
77913b39 3001/* intel_hotplug.c */
91d14251
TU
3002void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
3003 u32 pin_mask, u32 long_mask);
77913b39
JN
3004void intel_hpd_init(struct drm_i915_private *dev_priv);
3005void intel_hpd_init_work(struct drm_i915_private *dev_priv);
3006void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
cc24fcdc 3007bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port);
b236d7c8
L
3008bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
3009void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
77913b39 3010
1da177e4 3011/* i915_irq.c */
26a02b8f
CW
3012static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
3013{
3014 unsigned long delay;
3015
3016 if (unlikely(!i915.enable_hangcheck))
3017 return;
3018
3019 /* Don't continually defer the hangcheck so that it is always run at
3020 * least once after work has been scheduled on any ring. Otherwise,
3021 * we will ignore a hung ring if a second ring is kept busy.
3022 */
3023
3024 delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES);
3025 queue_delayed_work(system_long_wq,
3026 &dev_priv->gpu_error.hangcheck_work, delay);
3027}
3028
58174462 3029__printf(3, 4)
c033666a
CW
3030void i915_handle_error(struct drm_i915_private *dev_priv,
3031 u32 engine_mask,
58174462 3032 const char *fmt, ...);
1da177e4 3033
b963291c 3034extern void intel_irq_init(struct drm_i915_private *dev_priv);
2aeb7d3a
DV
3035int intel_irq_install(struct drm_i915_private *dev_priv);
3036void intel_irq_uninstall(struct drm_i915_private *dev_priv);
907b28c5 3037
dc97997a
CW
3038extern void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
3039extern void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
10018603 3040 bool restore_forcewake);
dc97997a 3041extern void intel_uncore_init(struct drm_i915_private *dev_priv);
fc97618b 3042extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
bc3b9346 3043extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
dc97997a
CW
3044extern void intel_uncore_fini(struct drm_i915_private *dev_priv);
3045extern void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
3046 bool restore);
48c1026a 3047const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
59bad947 3048void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
48c1026a 3049 enum forcewake_domains domains);
59bad947 3050void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
48c1026a 3051 enum forcewake_domains domains);
a6111f7b
CW
3052/* Like above but the caller must manage the uncore.lock itself.
3053 * Must be used with I915_READ_FW and friends.
3054 */
3055void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
3056 enum forcewake_domains domains);
3057void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
3058 enum forcewake_domains domains);
3accaf7e
MK
3059u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
3060
59bad947 3061void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
0ad35fed 3062
1758b90e
CW
3063int intel_wait_for_register(struct drm_i915_private *dev_priv,
3064 i915_reg_t reg,
3065 const u32 mask,
3066 const u32 value,
3067 const unsigned long timeout_ms);
3068int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
3069 i915_reg_t reg,
3070 const u32 mask,
3071 const u32 value,
3072 const unsigned long timeout_ms);
3073
0ad35fed
ZW
3074static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
3075{
feddf6e8 3076 return dev_priv->gvt;
0ad35fed
ZW
3077}
3078
c033666a 3079static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
cf9d2890 3080{
c033666a 3081 return dev_priv->vgpu.active;
cf9d2890 3082}
b1f14ad0 3083
7c463586 3084void
50227e1c 3085i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
755e9019 3086 u32 status_mask);
7c463586
KP
3087
3088void
50227e1c 3089i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
755e9019 3090 u32 status_mask);
7c463586 3091
f8b79e58
ID
3092void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
3093void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
0706f17c
EE
3094void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
3095 uint32_t mask,
3096 uint32_t bits);
fbdedaea
VS
3097void ilk_update_display_irq(struct drm_i915_private *dev_priv,
3098 uint32_t interrupt_mask,
3099 uint32_t enabled_irq_mask);
3100static inline void
3101ilk_enable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits)
3102{
3103 ilk_update_display_irq(dev_priv, bits, bits);
3104}
3105static inline void
3106ilk_disable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits)
3107{
3108 ilk_update_display_irq(dev_priv, bits, 0);
3109}
013d3752
VS
3110void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
3111 enum pipe pipe,
3112 uint32_t interrupt_mask,
3113 uint32_t enabled_irq_mask);
3114static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv,
3115 enum pipe pipe, uint32_t bits)
3116{
3117 bdw_update_pipe_irq(dev_priv, pipe, bits, bits);
3118}
3119static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv,
3120 enum pipe pipe, uint32_t bits)
3121{
3122 bdw_update_pipe_irq(dev_priv, pipe, bits, 0);
3123}
47339cd9
DV
3124void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
3125 uint32_t interrupt_mask,
3126 uint32_t enabled_irq_mask);
14443261
VS
3127static inline void
3128ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
3129{
3130 ibx_display_interrupt_update(dev_priv, bits, bits);
3131}
3132static inline void
3133ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
3134{
3135 ibx_display_interrupt_update(dev_priv, bits, 0);
3136}
3137
673a394b 3138/* i915_gem.c */
673a394b
EA
3139int i915_gem_create_ioctl(struct drm_device *dev, void *data,
3140 struct drm_file *file_priv);
3141int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
3142 struct drm_file *file_priv);
3143int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
3144 struct drm_file *file_priv);
3145int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
3146 struct drm_file *file_priv);
de151cf6
JB
3147int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
3148 struct drm_file *file_priv);
673a394b
EA
3149int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
3150 struct drm_file *file_priv);
3151int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
3152 struct drm_file *file_priv);
3153int i915_gem_execbuffer(struct drm_device *dev, void *data,
3154 struct drm_file *file_priv);
76446cac
JB
3155int i915_gem_execbuffer2(struct drm_device *dev, void *data,
3156 struct drm_file *file_priv);
673a394b
EA
3157int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3158 struct drm_file *file_priv);
199adf40
BW
3159int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3160 struct drm_file *file);
3161int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3162 struct drm_file *file);
673a394b
EA
3163int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3164 struct drm_file *file_priv);
3ef94daa
CW
3165int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3166 struct drm_file *file_priv);
673a394b
EA
3167int i915_gem_set_tiling(struct drm_device *dev, void *data,
3168 struct drm_file *file_priv);
3169int i915_gem_get_tiling(struct drm_device *dev, void *data,
3170 struct drm_file *file_priv);
72778cb2 3171void i915_gem_init_userptr(struct drm_i915_private *dev_priv);
5cc9ed4b
CW
3172int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
3173 struct drm_file *file);
5a125c3c
EA
3174int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
3175 struct drm_file *file_priv);
23ba4fd0
BW
3176int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
3177 struct drm_file *file_priv);
d64aa096
ID
3178void i915_gem_load_init(struct drm_device *dev);
3179void i915_gem_load_cleanup(struct drm_device *dev);
40ae4e16 3180void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
6a800eab 3181int i915_gem_freeze(struct drm_i915_private *dev_priv);
461fb99c
CW
3182int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
3183
42dcedd4
CW
3184void *i915_gem_object_alloc(struct drm_device *dev);
3185void i915_gem_object_free(struct drm_i915_gem_object *obj);
37e680a1
CW
3186void i915_gem_object_init(struct drm_i915_gem_object *obj,
3187 const struct drm_i915_gem_object_ops *ops);
d37cd8a8 3188struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
b4bcbe2a 3189 u64 size);
ea70299d
DG
3190struct drm_i915_gem_object *i915_gem_object_create_from_data(
3191 struct drm_device *dev, const void *data, size_t size);
b1f788c6 3192void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
673a394b 3193void i915_gem_free_object(struct drm_gem_object *obj);
42dcedd4 3194
058d88c4 3195struct i915_vma * __must_check
ec7adb6e
JL
3196i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
3197 const struct i915_ggtt_view *view,
91b2db6f 3198 u64 size,
2ffffd0f
CW
3199 u64 alignment,
3200 u64 flags);
fe14d5f4
TU
3201
3202int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
3203 u32 flags);
d0710abb 3204void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
07fe0b12 3205int __must_check i915_vma_unbind(struct i915_vma *vma);
b1f788c6
CW
3206void i915_vma_close(struct i915_vma *vma);
3207void i915_vma_destroy(struct i915_vma *vma);
aa653a68
CW
3208
3209int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
05394f39 3210void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
f787a5f5 3211
7c108fd8
CW
3212void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
3213
a4f5ea64 3214static inline int __sg_page_count(const struct scatterlist *sg)
9da3da66 3215{
ee286370
CW
3216 return sg->length >> PAGE_SHIFT;
3217}
67d5a50c 3218
96d77634
CW
3219struct scatterlist *
3220i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
3221 unsigned int n, unsigned int *offset);
341be1cd 3222
96d77634
CW
3223struct page *
3224i915_gem_object_get_page(struct drm_i915_gem_object *obj,
3225 unsigned int n);
67d5a50c 3226
96d77634
CW
3227struct page *
3228i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
3229 unsigned int n);
67d5a50c 3230
96d77634
CW
3231dma_addr_t
3232i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
3233 unsigned long n);
ee286370 3234
03ac84f1
CW
3235void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
3236 struct sg_table *pages);
a4f5ea64
CW
3237int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
3238
3239static inline int __must_check
3240i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
3241{
1233e2db 3242 might_lock(&obj->mm.lock);
a4f5ea64 3243
1233e2db 3244 if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
a4f5ea64
CW
3245 return 0;
3246
3247 return __i915_gem_object_get_pages(obj);
3248}
3249
3250static inline void
3251__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
a5570178 3252{
a4f5ea64
CW
3253 GEM_BUG_ON(!obj->mm.pages);
3254
1233e2db 3255 atomic_inc(&obj->mm.pages_pin_count);
a4f5ea64
CW
3256}
3257
3258static inline bool
3259i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
3260{
1233e2db 3261 return atomic_read(&obj->mm.pages_pin_count);
a4f5ea64
CW
3262}
3263
3264static inline void
3265__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
3266{
a4f5ea64
CW
3267 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
3268 GEM_BUG_ON(!obj->mm.pages);
3269
1233e2db
CW
3270 atomic_dec(&obj->mm.pages_pin_count);
3271 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
a5570178 3272}
0a798eb9 3273
1233e2db
CW
3274static inline void
3275i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
a5570178 3276{
a4f5ea64 3277 __i915_gem_object_unpin_pages(obj);
a5570178
CW
3278}
3279
03ac84f1
CW
3280void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
3281void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj);
a4f5ea64 3282
d31d7cb1
CW
3283enum i915_map_type {
3284 I915_MAP_WB = 0,
3285 I915_MAP_WC,
3286};
3287
0a798eb9
CW
3288/**
3289 * i915_gem_object_pin_map - return a contiguous mapping of the entire object
3290 * @obj - the object to map into kernel address space
d31d7cb1 3291 * @type - the type of mapping, used to select pgprot_t
0a798eb9
CW
3292 *
3293 * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
3294 * pages and then returns a contiguous mapping of the backing storage into
d31d7cb1
CW
3295 * the kernel address space. Based on the @type of mapping, the PTE will be
3296 * set to either WriteBack or WriteCombine (via pgprot_t).
0a798eb9 3297 *
1233e2db
CW
3298 * The caller is responsible for calling i915_gem_object_unpin_map() when the
3299 * mapping is no longer required.
0a798eb9 3300 *
8305216f
DG
3301 * Returns the pointer through which to access the mapped object, or an
3302 * ERR_PTR() on error.
0a798eb9 3303 */
d31d7cb1
CW
3304void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
3305 enum i915_map_type type);
0a798eb9
CW
3306
3307/**
3308 * i915_gem_object_unpin_map - releases an earlier mapping
3309 * @obj - the object to unmap
3310 *
3311 * After pinning the object and mapping its pages, once you are finished
3312 * with your access, call i915_gem_object_unpin_map() to release the pin
3313 * upon the mapping. Once the pin count reaches zero, that mapping may be
3314 * removed.
0a798eb9
CW
3315 */
3316static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
3317{
0a798eb9
CW
3318 i915_gem_object_unpin_pages(obj);
3319}
3320
43394c7d
CW
3321int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
3322 unsigned int *needs_clflush);
3323int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
3324 unsigned int *needs_clflush);
3325#define CLFLUSH_BEFORE 0x1
3326#define CLFLUSH_AFTER 0x2
3327#define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER)
3328
3329static inline void
3330i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
3331{
3332 i915_gem_object_unpin_pages(obj);
3333}
3334
54cf91dc 3335int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
e2d05a8b 3336void i915_vma_move_to_active(struct i915_vma *vma,
5cf3d280
CW
3337 struct drm_i915_gem_request *req,
3338 unsigned int flags);
ff72145b
DA
3339int i915_gem_dumb_create(struct drm_file *file_priv,
3340 struct drm_device *dev,
3341 struct drm_mode_create_dumb *args);
da6b51d0
DA
3342int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
3343 uint32_t handle, uint64_t *offset);
4cc69075 3344int i915_gem_mmap_gtt_version(void);
85d1225e
DG
3345
3346void i915_gem_track_fb(struct drm_i915_gem_object *old,
3347 struct drm_i915_gem_object *new,
3348 unsigned frontbuffer_bits);
3349
fca26bb4 3350int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
1690e1eb 3351
8d9fc7fd 3352struct drm_i915_gem_request *
0bc40be8 3353i915_gem_find_active_request(struct intel_engine_cs *engine);
8d9fc7fd 3354
67d97da3 3355void i915_gem_retire_requests(struct drm_i915_private *dev_priv);
84c33a64 3356
1f83fee0
DV
3357static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
3358{
8af29b0c 3359 return unlikely(test_bit(I915_RESET_IN_PROGRESS, &error->flags));
c19ae989
CW
3360}
3361
8af29b0c 3362static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
c19ae989 3363{
8af29b0c 3364 return unlikely(test_bit(I915_WEDGED, &error->flags));
1f83fee0
DV
3365}
3366
8af29b0c 3367static inline bool i915_reset_in_progress_or_wedged(struct i915_gpu_error *error)
1f83fee0 3368{
8af29b0c 3369 return i915_reset_in_progress(error) | i915_terminally_wedged(error);
2ac0f450
MK
3370}
3371
3372static inline u32 i915_reset_count(struct i915_gpu_error *error)
3373{
8af29b0c 3374 return READ_ONCE(error->reset_count);
1f83fee0 3375}
a71d8d94 3376
821ed7df
CW
3377void i915_gem_reset(struct drm_i915_private *dev_priv);
3378void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
000433b6 3379bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
1070a42b 3380int __must_check i915_gem_init(struct drm_device *dev);
f691e2f4
DV
3381int __must_check i915_gem_init_hw(struct drm_device *dev);
3382void i915_gem_init_swizzling(struct drm_device *dev);
117897f4 3383void i915_gem_cleanup_engines(struct drm_device *dev);
dcff85c8 3384int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
ea746f36 3385 unsigned int flags);
45c5f202 3386int __must_check i915_gem_suspend(struct drm_device *dev);
5ab57c70 3387void i915_gem_resume(struct drm_device *dev);
de151cf6 3388int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
e95433c7
CW
3389int i915_gem_object_wait(struct drm_i915_gem_object *obj,
3390 unsigned int flags,
3391 long timeout,
3392 struct intel_rps_client *rps);
2e2f351d 3393int __must_check
2021746e
CW
3394i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
3395 bool write);
3396int __must_check
dabdfe02 3397i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
058d88c4 3398struct i915_vma * __must_check
2da3b9b9
CW
3399i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3400 u32 alignment,
e6617330 3401 const struct i915_ggtt_view *view);
058d88c4 3402void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
00731155 3403int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
6eeefaf3 3404 int align);
b29c19b6 3405int i915_gem_open(struct drm_device *dev, struct drm_file *file);
05394f39 3406void i915_gem_release(struct drm_device *dev, struct drm_file *file);
673a394b 3407
a9f1481f
CW
3408u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv, u64 size,
3409 int tiling_mode);
3410u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
ad1a7d20 3411 int tiling_mode, bool fenced);
467cffba 3412
e4ffd173
CW
3413int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3414 enum i915_cache_level cache_level);
3415
1286ff73
DV
3416struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
3417 struct dma_buf *dma_buf);
3418
3419struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
3420 struct drm_gem_object *gem_obj, int flags);
3421
fe14d5f4 3422struct i915_vma *
ec7adb6e 3423i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
058d88c4
CW
3424 struct i915_address_space *vm,
3425 const struct i915_ggtt_view *view);
fe14d5f4 3426
accfef2e
BW
3427struct i915_vma *
3428i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
058d88c4
CW
3429 struct i915_address_space *vm,
3430 const struct i915_ggtt_view *view);
5c2abbea 3431
841cd773
DV
3432static inline struct i915_hw_ppgtt *
3433i915_vm_to_ppgtt(struct i915_address_space *vm)
3434{
841cd773
DV
3435 return container_of(vm, struct i915_hw_ppgtt, base);
3436}
3437
058d88c4
CW
3438static inline struct i915_vma *
3439i915_gem_object_to_ggtt(struct drm_i915_gem_object *obj,
3440 const struct i915_ggtt_view *view)
a70a3148 3441{
058d88c4 3442 return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view);
a70a3148
BW
3443}
3444
058d88c4
CW
3445static inline unsigned long
3446i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o,
3447 const struct i915_ggtt_view *view)
e6617330 3448{
bde13ebd 3449 return i915_ggtt_offset(i915_gem_object_to_ggtt(o, view));
e6617330 3450}
b287110e 3451
41a36b73 3452/* i915_gem_fence.c */
49ef5294
CW
3453int __must_check i915_vma_get_fence(struct i915_vma *vma);
3454int __must_check i915_vma_put_fence(struct i915_vma *vma);
3455
3456/**
3457 * i915_vma_pin_fence - pin fencing state
3458 * @vma: vma to pin fencing for
3459 *
3460 * This pins the fencing state (whether tiled or untiled) to make sure the
3461 * vma (and its object) is ready to be used as a scanout target. Fencing
3462 * status must be synchronize first by calling i915_vma_get_fence():
3463 *
3464 * The resulting fence pin reference must be released again with
3465 * i915_vma_unpin_fence().
3466 *
3467 * Returns:
3468 *
3469 * True if the vma has a fence, false otherwise.
3470 */
3471static inline bool
3472i915_vma_pin_fence(struct i915_vma *vma)
3473{
4c7d62c6 3474 lockdep_assert_held(&vma->vm->dev->struct_mutex);
49ef5294
CW
3475 if (vma->fence) {
3476 vma->fence->pin_count++;
3477 return true;
3478 } else
3479 return false;
3480}
41a36b73 3481
49ef5294
CW
3482/**
3483 * i915_vma_unpin_fence - unpin fencing state
3484 * @vma: vma to unpin fencing for
3485 *
3486 * This releases the fence pin reference acquired through
3487 * i915_vma_pin_fence. It will handle both objects with and without an
3488 * attached fence correctly, callers do not need to distinguish this.
3489 */
3490static inline void
3491i915_vma_unpin_fence(struct i915_vma *vma)
3492{
4c7d62c6 3493 lockdep_assert_held(&vma->vm->dev->struct_mutex);
49ef5294
CW
3494 if (vma->fence) {
3495 GEM_BUG_ON(vma->fence->pin_count <= 0);
3496 vma->fence->pin_count--;
3497 }
3498}
41a36b73
DV
3499
3500void i915_gem_restore_fences(struct drm_device *dev);
3501
7f96ecaf 3502void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
03ac84f1
CW
3503void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
3504 struct sg_table *pages);
3505void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
3506 struct sg_table *pages);
7f96ecaf 3507
254f965c 3508/* i915_gem_context.c */
8245be31 3509int __must_check i915_gem_context_init(struct drm_device *dev);
b2e862d0 3510void i915_gem_context_lost(struct drm_i915_private *dev_priv);
254f965c 3511void i915_gem_context_fini(struct drm_device *dev);
e422b888 3512int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
254f965c 3513void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
ba01cc93 3514int i915_switch_context(struct drm_i915_gem_request *req);
945657b4 3515int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
dce3271b 3516void i915_gem_context_free(struct kref *ctx_ref);
8c857917
OM
3517struct drm_i915_gem_object *
3518i915_gem_alloc_context_obj(struct drm_device *dev, size_t size);
c8c35799
ZW
3519struct i915_gem_context *
3520i915_gem_context_create_gvt(struct drm_device *dev);
ca585b5d
CW
3521
3522static inline struct i915_gem_context *
3523i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
3524{
3525 struct i915_gem_context *ctx;
3526
091387c1 3527 lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex);
ca585b5d
CW
3528
3529 ctx = idr_find(&file_priv->context_idr, id);
3530 if (!ctx)
3531 return ERR_PTR(-ENOENT);
3532
3533 return ctx;
3534}
3535
9a6feaf0
CW
3536static inline struct i915_gem_context *
3537i915_gem_context_get(struct i915_gem_context *ctx)
dce3271b 3538{
691e6415 3539 kref_get(&ctx->ref);
9a6feaf0 3540 return ctx;
dce3271b
MK
3541}
3542
9a6feaf0 3543static inline void i915_gem_context_put(struct i915_gem_context *ctx)
dce3271b 3544{
091387c1 3545 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
691e6415 3546 kref_put(&ctx->ref, i915_gem_context_free);
dce3271b
MK
3547}
3548
e2efd130 3549static inline bool i915_gem_context_is_default(const struct i915_gem_context *c)
3fac8978 3550{
821d66dd 3551 return c->user_handle == DEFAULT_CONTEXT_HANDLE;
3fac8978
MK
3552}
3553
84624813
BW
3554int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
3555 struct drm_file *file);
3556int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
3557 struct drm_file *file);
c9dc0f35
CW
3558int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
3559 struct drm_file *file_priv);
3560int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
3561 struct drm_file *file_priv);
d538704b
CW
3562int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
3563 struct drm_file *file);
1286ff73 3564
679845ed 3565/* i915_gem_evict.c */
e522ac23 3566int __must_check i915_gem_evict_something(struct i915_address_space *vm,
2ffffd0f 3567 u64 min_size, u64 alignment,
679845ed 3568 unsigned cache_level,
2ffffd0f 3569 u64 start, u64 end,
1ec9e26d 3570 unsigned flags);
506a8e87 3571int __must_check i915_gem_evict_for_vma(struct i915_vma *target);
679845ed 3572int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
1d2a314c 3573
0260c420 3574/* belongs in i915_gem_gtt.h */
c033666a 3575static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
e76e9aeb 3576{
600f4368 3577 wmb();
c033666a 3578 if (INTEL_GEN(dev_priv) < 6)
e76e9aeb
BW
3579 intel_gtt_chipset_flush();
3580}
246cbfb5 3581
9797fbfb 3582/* i915_gem_stolen.c */
d713fd49
PZ
3583int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
3584 struct drm_mm_node *node, u64 size,
3585 unsigned alignment);
a9da512b
PZ
3586int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
3587 struct drm_mm_node *node, u64 size,
3588 unsigned alignment, u64 start,
3589 u64 end);
d713fd49
PZ
3590void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
3591 struct drm_mm_node *node);
9797fbfb
CW
3592int i915_gem_init_stolen(struct drm_device *dev);
3593void i915_gem_cleanup_stolen(struct drm_device *dev);
0104fdbb
CW
3594struct drm_i915_gem_object *
3595i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
866d12b4
CW
3596struct drm_i915_gem_object *
3597i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
3598 u32 stolen_offset,
3599 u32 gtt_offset,
3600 u32 size);
9797fbfb 3601
920cf419
CW
3602/* i915_gem_internal.c */
3603struct drm_i915_gem_object *
3604i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
3605 unsigned int size);
3606
be6a0376
DV
3607/* i915_gem_shrinker.c */
3608unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
14387540 3609 unsigned long target,
be6a0376
DV
3610 unsigned flags);
3611#define I915_SHRINK_PURGEABLE 0x1
3612#define I915_SHRINK_UNBOUND 0x2
3613#define I915_SHRINK_BOUND 0x4
5763ff04 3614#define I915_SHRINK_ACTIVE 0x8
eae2c43b 3615#define I915_SHRINK_VMAPS 0x10
be6a0376
DV
3616unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
3617void i915_gem_shrinker_init(struct drm_i915_private *dev_priv);
a8a40589 3618void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv);
be6a0376
DV
3619
3620
673a394b 3621/* i915_gem_tiling.c */
2c1792a1 3622static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
e9b73c67 3623{
091387c1 3624 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
e9b73c67
CW
3625
3626 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
3e510a8e 3627 i915_gem_object_is_tiled(obj);
e9b73c67
CW
3628}
3629
2017263e 3630/* i915_debugfs.c */
f8c168fa 3631#ifdef CONFIG_DEBUG_FS
1dac891c
CW
3632int i915_debugfs_register(struct drm_i915_private *dev_priv);
3633void i915_debugfs_unregister(struct drm_i915_private *dev_priv);
249e87de 3634int i915_debugfs_connector_add(struct drm_connector *connector);
36cdd013 3635void intel_display_crc_init(struct drm_i915_private *dev_priv);
07144428 3636#else
8d35acba
CW
3637static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) {return 0;}
3638static inline void i915_debugfs_unregister(struct drm_i915_private *dev_priv) {}
101057fa
DV
3639static inline int i915_debugfs_connector_add(struct drm_connector *connector)
3640{ return 0; }
ce5e2ac1 3641static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
07144428 3642#endif
84734a04
MK
3643
3644/* i915_gpu_error.c */
98a2f411
CW
3645#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
3646
edc3d884
MK
3647__printf(2, 3)
3648void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
fc16b48b
MK
3649int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
3650 const struct i915_error_state_file_priv *error);
4dc955f7 3651int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
0a4cd7c8 3652 struct drm_i915_private *i915,
4dc955f7
MK
3653 size_t count, loff_t pos);
3654static inline void i915_error_state_buf_release(
3655 struct drm_i915_error_state_buf *eb)
3656{
3657 kfree(eb->buf);
3658}
c033666a
CW
3659void i915_capture_error_state(struct drm_i915_private *dev_priv,
3660 u32 engine_mask,
58174462 3661 const char *error_msg);
84734a04
MK
3662void i915_error_state_get(struct drm_device *dev,
3663 struct i915_error_state_file_priv *error_priv);
3664void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
3665void i915_destroy_error_state(struct drm_device *dev);
3666
98a2f411
CW
3667#else
3668
3669static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
3670 u32 engine_mask,
3671 const char *error_msg)
3672{
3673}
3674
3675static inline void i915_destroy_error_state(struct drm_device *dev)
3676{
3677}
3678
3679#endif
3680
0a4cd7c8 3681const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
2017263e 3682
351e3db2 3683/* i915_cmd_parser.c */
1ca3712c 3684int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
7756e454 3685void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
33a051a5
CW
3686void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
3687bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine);
3688int intel_engine_cmd_parser(struct intel_engine_cs *engine,
3689 struct drm_i915_gem_object *batch_obj,
3690 struct drm_i915_gem_object *shadow_batch_obj,
3691 u32 batch_start_offset,
3692 u32 batch_len,
3693 bool is_master);
351e3db2 3694
317c35d1
JB
3695/* i915_suspend.c */
3696extern int i915_save_state(struct drm_device *dev);
3697extern int i915_restore_state(struct drm_device *dev);
0a3e67a4 3698
0136db58 3699/* i915_sysfs.c */
694c2828
DW
3700void i915_setup_sysfs(struct drm_i915_private *dev_priv);
3701void i915_teardown_sysfs(struct drm_i915_private *dev_priv);
0136db58 3702
f899fc64
CW
3703/* intel_i2c.c */
3704extern int intel_setup_gmbus(struct drm_device *dev);
3705extern void intel_teardown_gmbus(struct drm_device *dev);
88ac7939
JN
3706extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
3707 unsigned int pin);
3bd7d909 3708
0184df46
JN
3709extern struct i2c_adapter *
3710intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin);
e957d772
CW
3711extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
3712extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
8f375e10 3713static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
b8232e90
CW
3714{
3715 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
3716}
f899fc64
CW
3717extern void intel_i2c_reset(struct drm_device *dev);
3718
8b8e1a89 3719/* intel_bios.c */
98f3a1dc 3720int intel_bios_init(struct drm_i915_private *dev_priv);
f0067a31 3721bool intel_bios_is_valid_vbt(const void *buf, size_t size);
3bdd14d5 3722bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
5a69d13d 3723bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
22f35042 3724bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
951d9efe 3725bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
d6199256 3726bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port);
7137aec1 3727bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
d252bf68
SS
3728bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
3729 enum port port);
6389dd83
SS
3730bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
3731 enum port port);
3732
8b8e1a89 3733
3b617967 3734/* intel_opregion.c */
44834a67 3735#ifdef CONFIG_ACPI
6f9f4b7a 3736extern int intel_opregion_setup(struct drm_i915_private *dev_priv);
03d92e47
CW
3737extern void intel_opregion_register(struct drm_i915_private *dev_priv);
3738extern void intel_opregion_unregister(struct drm_i915_private *dev_priv);
91d14251 3739extern void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
9c4b0a68
JN
3740extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
3741 bool enable);
6f9f4b7a 3742extern int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
ecbc5cf3 3743 pci_power_t state);
6f9f4b7a 3744extern int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
65e082c9 3745#else
6f9f4b7a 3746static inline int intel_opregion_setup(struct drm_i915_private *dev) { return 0; }
bdaa2dfb
RD
3747static inline void intel_opregion_register(struct drm_i915_private *dev_priv) { }
3748static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv) { }
91d14251
TU
3749static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
3750{
3751}
9c4b0a68
JN
3752static inline int
3753intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
3754{
3755 return 0;
3756}
ecbc5cf3 3757static inline int
6f9f4b7a 3758intel_opregion_notify_adapter(struct drm_i915_private *dev, pci_power_t state)
ecbc5cf3
JN
3759{
3760 return 0;
3761}
6f9f4b7a 3762static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev)
a0562819
VS
3763{
3764 return -ENODEV;
3765}
65e082c9 3766#endif
8ee1c3db 3767
723bfd70
JB
3768/* intel_acpi.c */
3769#ifdef CONFIG_ACPI
3770extern void intel_register_dsm_handler(void);
3771extern void intel_unregister_dsm_handler(void);
3772#else
3773static inline void intel_register_dsm_handler(void) { return; }
3774static inline void intel_unregister_dsm_handler(void) { return; }
3775#endif /* CONFIG_ACPI */
3776
94b4f3ba
CW
3777/* intel_device_info.c */
3778static inline struct intel_device_info *
3779mkwrite_device_info(struct drm_i915_private *dev_priv)
3780{
3781 return (struct intel_device_info *)&dev_priv->info;
3782}
3783
3784void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
3785void intel_device_info_dump(struct drm_i915_private *dev_priv);
3786
79e53945 3787/* modesetting */
f817586c 3788extern void intel_modeset_init_hw(struct drm_device *dev);
79e53945 3789extern void intel_modeset_init(struct drm_device *dev);
2c7111db 3790extern void intel_modeset_gem_init(struct drm_device *dev);
79e53945 3791extern void intel_modeset_cleanup(struct drm_device *dev);
1ebaa0b9 3792extern int intel_connector_register(struct drm_connector *);
c191eca1 3793extern void intel_connector_unregister(struct drm_connector *);
28d52043 3794extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
043e9bda 3795extern void intel_display_resume(struct drm_device *dev);
44cec740 3796extern void i915_redisable_vga(struct drm_device *dev);
04098753 3797extern void i915_redisable_vga_power_on(struct drm_device *dev);
91d14251 3798extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
dde86e2d 3799extern void intel_init_pch_refclk(struct drm_device *dev);
dc97997a 3800extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
5209b1f4
ID
3801extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
3802 bool enable);
3bad0781 3803
c0c7babc
BW
3804int i915_reg_read_ioctl(struct drm_device *dev, void *data,
3805 struct drm_file *file);
575155a9 3806
6ef3d427 3807/* overlay */
c033666a
CW
3808extern struct intel_overlay_error_state *
3809intel_overlay_capture_error_state(struct drm_i915_private *dev_priv);
edc3d884
MK
3810extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
3811 struct intel_overlay_error_state *error);
c4a1d9e4 3812
c033666a
CW
3813extern struct intel_display_error_state *
3814intel_display_capture_error_state(struct drm_i915_private *dev_priv);
edc3d884 3815extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
c4a1d9e4
CW
3816 struct drm_device *dev,
3817 struct intel_display_error_state *error);
6ef3d427 3818
151a49d0
TR
3819int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
3820int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
59de0813
JN
3821
3822/* intel_sideband.c */
707b6e3d
D
3823u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr);
3824void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val);
64936258 3825u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
dfb19ed2
D
3826u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg);
3827void vlv_iosf_sb_write(struct drm_i915_private *dev_priv, u8 port, u32 reg, u32 val);
e9f882a3
JN
3828u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
3829void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3830u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
3831void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
f3419158
JB
3832u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg);
3833void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
5e69f97f
CML
3834u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
3835void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
59de0813
JN
3836u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
3837 enum intel_sbi_destination destination);
3838void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
3839 enum intel_sbi_destination destination);
e9fe51c6
SK
3840u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
3841void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
0a073b84 3842
b7fa22d8 3843/* intel_dpio_phy.c */
ed37892e
ACO
3844void bxt_port_to_phy_channel(enum port port,
3845 enum dpio_phy *phy, enum dpio_channel *ch);
b6e08203
ACO
3846void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
3847 enum port port, u32 margin, u32 scale,
3848 u32 enable, u32 deemphasis);
47a6bc61
ACO
3849void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
3850void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
3851bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
3852 enum dpio_phy phy);
3853bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
3854 enum dpio_phy phy);
3855uint8_t bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
3856 uint8_t lane_count);
3857void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
3858 uint8_t lane_lat_optim_mask);
3859uint8_t bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
3860
b7fa22d8
ACO
3861void chv_set_phy_signal_level(struct intel_encoder *encoder,
3862 u32 deemph_reg_value, u32 margin_reg_value,
3863 bool uniq_trans_scale);
844b2f9a
ACO
3864void chv_data_lane_soft_reset(struct intel_encoder *encoder,
3865 bool reset);
419b1b7a 3866void chv_phy_pre_pll_enable(struct intel_encoder *encoder);
e7d2a717
ACO
3867void chv_phy_pre_encoder_enable(struct intel_encoder *encoder);
3868void chv_phy_release_cl2_override(struct intel_encoder *encoder);
204970b5 3869void chv_phy_post_pll_disable(struct intel_encoder *encoder);
b7fa22d8 3870
53d98725
ACO
3871void vlv_set_phy_signal_level(struct intel_encoder *encoder,
3872 u32 demph_reg_value, u32 preemph_reg_value,
3873 u32 uniqtranscale_reg_value, u32 tx3_demph);
6da2e616 3874void vlv_phy_pre_pll_enable(struct intel_encoder *encoder);
5f68c275 3875void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder);
0f572ebe 3876void vlv_phy_reset_lanes(struct intel_encoder *encoder);
53d98725 3877
616bc820
VS
3878int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
3879int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
c8d9a590 3880
0b274481
BW
3881#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
3882#define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
3883
3884#define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true)
3885#define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true)
3886#define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false)
3887#define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false)
3888
3889#define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true)
3890#define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true)
3891#define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
3892#define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
3893
698b3135
CW
3894/* Be very careful with read/write 64-bit values. On 32-bit machines, they
3895 * will be implemented using 2 32-bit writes in an arbitrary order with
3896 * an arbitrary delay between them. This can cause the hardware to
3897 * act upon the intermediate value, possibly leading to corruption and
b18c1bb4
CW
3898 * machine death. For this reason we do not support I915_WRITE64, or
3899 * dev_priv->uncore.funcs.mmio_writeq.
3900 *
3901 * When reading a 64-bit value as two 32-bit values, the delay may cause
3902 * the two reads to mismatch, e.g. a timestamp overflowing. Also note that
3903 * occasionally a 64-bit register does not actualy support a full readq
3904 * and must be read using two 32-bit reads.
3905 *
3906 * You have been warned.
698b3135 3907 */
0b274481 3908#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
cae5852d 3909
50877445 3910#define I915_READ64_2x32(lower_reg, upper_reg) ({ \
acd29f7b
CW
3911 u32 upper, lower, old_upper, loop = 0; \
3912 upper = I915_READ(upper_reg); \
ee0a227b 3913 do { \
acd29f7b 3914 old_upper = upper; \
ee0a227b 3915 lower = I915_READ(lower_reg); \
acd29f7b
CW
3916 upper = I915_READ(upper_reg); \
3917 } while (upper != old_upper && loop++ < 2); \
ee0a227b 3918 (u64)upper << 32 | lower; })
50877445 3919
cae5852d
ZN
3920#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
3921#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
3922
75aa3f63
VS
3923#define __raw_read(x, s) \
3924static inline uint##x##_t __raw_i915_read##x(struct drm_i915_private *dev_priv, \
f0f59a00 3925 i915_reg_t reg) \
75aa3f63 3926{ \
f0f59a00 3927 return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \
75aa3f63
VS
3928}
3929
3930#define __raw_write(x, s) \
3931static inline void __raw_i915_write##x(struct drm_i915_private *dev_priv, \
f0f59a00 3932 i915_reg_t reg, uint##x##_t val) \
75aa3f63 3933{ \
f0f59a00 3934 write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \
75aa3f63
VS
3935}
3936__raw_read(8, b)
3937__raw_read(16, w)
3938__raw_read(32, l)
3939__raw_read(64, q)
3940
3941__raw_write(8, b)
3942__raw_write(16, w)
3943__raw_write(32, l)
3944__raw_write(64, q)
3945
3946#undef __raw_read
3947#undef __raw_write
3948
a6111f7b 3949/* These are untraced mmio-accessors that are only valid to be used inside
aafee2eb 3950 * critical sections, such as inside IRQ handlers, where forcewake is explicitly
a6111f7b 3951 * controlled.
aafee2eb 3952 *
a6111f7b 3953 * Think twice, and think again, before using these.
aafee2eb
AH
3954 *
3955 * As an example, these accessors can possibly be used between:
3956 *
3957 * spin_lock_irq(&dev_priv->uncore.lock);
3958 * intel_uncore_forcewake_get__locked();
3959 *
3960 * and
3961 *
3962 * intel_uncore_forcewake_put__locked();
3963 * spin_unlock_irq(&dev_priv->uncore.lock);
3964 *
3965 *
3966 * Note: some registers may not need forcewake held, so
3967 * intel_uncore_forcewake_{get,put} can be omitted, see
3968 * intel_uncore_forcewake_for_reg().
3969 *
3970 * Certain architectures will die if the same cacheline is concurrently accessed
3971 * by different clients (e.g. on Ivybridge). Access to registers should
3972 * therefore generally be serialised, by either the dev_priv->uncore.lock or
3973 * a more localised lock guarding all access to that bank of registers.
a6111f7b 3974 */
75aa3f63
VS
3975#define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__))
3976#define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__))
76f8421f 3977#define I915_WRITE64_FW(reg__, val__) __raw_i915_write64(dev_priv, (reg__), (val__))
a6111f7b
CW
3978#define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__)
3979
55bc60db
VS
3980/* "Broadcast RGB" property */
3981#define INTEL_BROADCAST_RGB_AUTO 0
3982#define INTEL_BROADCAST_RGB_FULL 1
3983#define INTEL_BROADCAST_RGB_LIMITED 2
ba4f01a3 3984
920a14b2 3985static inline i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv)
766aa1c4 3986{
920a14b2 3987 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
766aa1c4 3988 return VLV_VGACNTRL;
920a14b2 3989 else if (INTEL_GEN(dev_priv) >= 5)
92e23b99 3990 return CPU_VGACNTRL;
766aa1c4
VS
3991 else
3992 return VGACNTRL;
3993}
3994
df97729f
ID
3995static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
3996{
3997 unsigned long j = msecs_to_jiffies(m);
3998
3999 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
4000}
4001
7bd0e226
DV
4002static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
4003{
4004 return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
4005}
4006
df97729f
ID
4007static inline unsigned long
4008timespec_to_jiffies_timeout(const struct timespec *value)
4009{
4010 unsigned long j = timespec_to_jiffies(value);
4011
4012 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
4013}
4014
dce56b3c
PZ
4015/*
4016 * If you need to wait X milliseconds between events A and B, but event B
4017 * doesn't happen exactly after event A, you record the timestamp (jiffies) of
4018 * when event A happened, then just before event B you call this function and
4019 * pass the timestamp as the first argument, and X as the second argument.
4020 */
4021static inline void
4022wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
4023{
ec5e0cfb 4024 unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
dce56b3c
PZ
4025
4026 /*
4027 * Don't re-read the value of "jiffies" every time since it may change
4028 * behind our back and break the math.
4029 */
4030 tmp_jiffies = jiffies;
4031 target_jiffies = timestamp_jiffies +
4032 msecs_to_jiffies_timeout(to_wait_ms);
4033
4034 if (time_after(target_jiffies, tmp_jiffies)) {
ec5e0cfb
ID
4035 remaining_jiffies = target_jiffies - tmp_jiffies;
4036 while (remaining_jiffies)
4037 remaining_jiffies =
4038 schedule_timeout_uninterruptible(remaining_jiffies);
dce56b3c
PZ
4039 }
4040}
221fe799
CW
4041
4042static inline bool
4043__i915_request_irq_complete(struct drm_i915_gem_request *req)
688e6c72 4044{
f69a02c9
CW
4045 struct intel_engine_cs *engine = req->engine;
4046
7ec2c73b
CW
4047 /* Before we do the heavier coherent read of the seqno,
4048 * check the value (hopefully) in the CPU cacheline.
4049 */
4050 if (i915_gem_request_completed(req))
4051 return true;
4052
688e6c72
CW
4053 /* Ensure our read of the seqno is coherent so that we
4054 * do not "miss an interrupt" (i.e. if this is the last
4055 * request and the seqno write from the GPU is not visible
4056 * by the time the interrupt fires, we will see that the
4057 * request is incomplete and go back to sleep awaiting
4058 * another interrupt that will never come.)
4059 *
4060 * Strictly, we only need to do this once after an interrupt,
4061 * but it is easier and safer to do it every time the waiter
4062 * is woken.
4063 */
3d5564e9 4064 if (engine->irq_seqno_barrier &&
dbd6ef29 4065 rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh) == current &&
aca34b6e 4066 cmpxchg_relaxed(&engine->breadcrumbs.irq_posted, 1, 0)) {
99fe4a5f
CW
4067 struct task_struct *tsk;
4068
3d5564e9
CW
4069 /* The ordering of irq_posted versus applying the barrier
4070 * is crucial. The clearing of the current irq_posted must
4071 * be visible before we perform the barrier operation,
4072 * such that if a subsequent interrupt arrives, irq_posted
4073 * is reasserted and our task rewoken (which causes us to
4074 * do another __i915_request_irq_complete() immediately
4075 * and reapply the barrier). Conversely, if the clear
4076 * occurs after the barrier, then an interrupt that arrived
4077 * whilst we waited on the barrier would not trigger a
4078 * barrier on the next pass, and the read may not see the
4079 * seqno update.
4080 */
f69a02c9 4081 engine->irq_seqno_barrier(engine);
99fe4a5f
CW
4082
4083 /* If we consume the irq, but we are no longer the bottom-half,
4084 * the real bottom-half may not have serialised their own
4085 * seqno check with the irq-barrier (i.e. may have inspected
4086 * the seqno before we believe it coherent since they see
4087 * irq_posted == false but we are still running).
4088 */
4089 rcu_read_lock();
dbd6ef29 4090 tsk = rcu_dereference(engine->breadcrumbs.irq_seqno_bh);
99fe4a5f
CW
4091 if (tsk && tsk != current)
4092 /* Note that if the bottom-half is changed as we
4093 * are sending the wake-up, the new bottom-half will
4094 * be woken by whomever made the change. We only have
4095 * to worry about when we steal the irq-posted for
4096 * ourself.
4097 */
4098 wake_up_process(tsk);
4099 rcu_read_unlock();
4100
7ec2c73b
CW
4101 if (i915_gem_request_completed(req))
4102 return true;
4103 }
688e6c72 4104
688e6c72
CW
4105 return false;
4106}
4107
0b1de5d5
CW
4108void i915_memcpy_init_early(struct drm_i915_private *dev_priv);
4109bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len);
4110
c58305af
CW
4111/* i915_mm.c */
4112int remap_io_mapping(struct vm_area_struct *vma,
4113 unsigned long addr, unsigned long pfn, unsigned long size,
4114 struct io_mapping *iomap);
4115
4b30cb23
CW
4116#define ptr_mask_bits(ptr) ({ \
4117 unsigned long __v = (unsigned long)(ptr); \
4118 (typeof(ptr))(__v & PAGE_MASK); \
4119})
4120
d31d7cb1
CW
4121#define ptr_unpack_bits(ptr, bits) ({ \
4122 unsigned long __v = (unsigned long)(ptr); \
4123 (bits) = __v & ~PAGE_MASK; \
4124 (typeof(ptr))(__v & PAGE_MASK); \
4125})
4126
4127#define ptr_pack_bits(ptr, bits) \
4128 ((typeof(ptr))((unsigned long)(ptr) | (bits)))
4129
78ef2d9a
CW
4130#define fetch_and_zero(ptr) ({ \
4131 typeof(*ptr) __T = *(ptr); \
4132 *(ptr) = (typeof(*ptr))0; \
4133 __T; \
4134})
4135
1da177e4 4136#endif