]>
Commit | Line | Data |
---|---|---|
62fdfeaf EA |
1 | /* |
2 | * Copyright © 2008-2010 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | * Authors: | |
24 | * Eric Anholt <eric@anholt.net> | |
25 | * Zou Nan hai <nanhai.zou@intel.com> | |
26 | * Xiang Hai hao<haihao.xiang@intel.com> | |
27 | * | |
28 | */ | |
29 | ||
30 | #include "drmP.h" | |
31 | #include "drm.h" | |
62fdfeaf | 32 | #include "i915_drv.h" |
8187a2b7 | 33 | #include "i915_drm.h" |
62fdfeaf | 34 | #include "i915_trace.h" |
881f47b6 | 35 | #include "intel_drv.h" |
62fdfeaf | 36 | |
8d315287 JB |
37 | /* |
38 | * 965+ support PIPE_CONTROL commands, which provide finer grained control | |
39 | * over cache flushing. | |
40 | */ | |
41 | struct pipe_control { | |
42 | struct drm_i915_gem_object *obj; | |
43 | volatile u32 *cpu_page; | |
44 | u32 gtt_offset; | |
45 | }; | |
46 | ||
c7dca47b CW |
47 | static inline int ring_space(struct intel_ring_buffer *ring) |
48 | { | |
49 | int space = (ring->head & HEAD_ADDR) - (ring->tail + 8); | |
50 | if (space < 0) | |
51 | space += ring->size; | |
52 | return space; | |
53 | } | |
54 | ||
b72f3acb | 55 | static int |
78501eac | 56 | render_ring_flush(struct intel_ring_buffer *ring, |
ab6f8e32 CW |
57 | u32 invalidate_domains, |
58 | u32 flush_domains) | |
62fdfeaf | 59 | { |
78501eac | 60 | struct drm_device *dev = ring->dev; |
6f392d54 | 61 | u32 cmd; |
b72f3acb | 62 | int ret; |
6f392d54 | 63 | |
36d527de CW |
64 | /* |
65 | * read/write caches: | |
66 | * | |
67 | * I915_GEM_DOMAIN_RENDER is always invalidated, but is | |
68 | * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is | |
69 | * also flushed at 2d versus 3d pipeline switches. | |
70 | * | |
71 | * read-only caches: | |
72 | * | |
73 | * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if | |
74 | * MI_READ_FLUSH is set, and is always flushed on 965. | |
75 | * | |
76 | * I915_GEM_DOMAIN_COMMAND may not exist? | |
77 | * | |
78 | * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is | |
79 | * invalidated when MI_EXE_FLUSH is set. | |
80 | * | |
81 | * I915_GEM_DOMAIN_VERTEX, which exists on 965, is | |
82 | * invalidated with every MI_FLUSH. | |
83 | * | |
84 | * TLBs: | |
85 | * | |
86 | * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND | |
87 | * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and | |
88 | * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER | |
89 | * are flushed at any MI_FLUSH. | |
90 | */ | |
91 | ||
92 | cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; | |
93 | if ((invalidate_domains|flush_domains) & | |
94 | I915_GEM_DOMAIN_RENDER) | |
95 | cmd &= ~MI_NO_WRITE_FLUSH; | |
96 | if (INTEL_INFO(dev)->gen < 4) { | |
62fdfeaf | 97 | /* |
36d527de CW |
98 | * On the 965, the sampler cache always gets flushed |
99 | * and this bit is reserved. | |
62fdfeaf | 100 | */ |
36d527de CW |
101 | if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) |
102 | cmd |= MI_READ_FLUSH; | |
103 | } | |
104 | if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) | |
105 | cmd |= MI_EXE_FLUSH; | |
62fdfeaf | 106 | |
36d527de CW |
107 | if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && |
108 | (IS_G4X(dev) || IS_GEN5(dev))) | |
109 | cmd |= MI_INVALIDATE_ISP; | |
70eac33e | 110 | |
36d527de CW |
111 | ret = intel_ring_begin(ring, 2); |
112 | if (ret) | |
113 | return ret; | |
b72f3acb | 114 | |
36d527de CW |
115 | intel_ring_emit(ring, cmd); |
116 | intel_ring_emit(ring, MI_NOOP); | |
117 | intel_ring_advance(ring); | |
b72f3acb CW |
118 | |
119 | return 0; | |
8187a2b7 ZN |
120 | } |
121 | ||
8d315287 JB |
122 | /** |
123 | * Emits a PIPE_CONTROL with a non-zero post-sync operation, for | |
124 | * implementing two workarounds on gen6. From section 1.4.7.1 | |
125 | * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1: | |
126 | * | |
127 | * [DevSNB-C+{W/A}] Before any depth stall flush (including those | |
128 | * produced by non-pipelined state commands), software needs to first | |
129 | * send a PIPE_CONTROL with no bits set except Post-Sync Operation != | |
130 | * 0. | |
131 | * | |
132 | * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable | |
133 | * =1, a PIPE_CONTROL with any non-zero post-sync-op is required. | |
134 | * | |
135 | * And the workaround for these two requires this workaround first: | |
136 | * | |
137 | * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent | |
138 | * BEFORE the pipe-control with a post-sync op and no write-cache | |
139 | * flushes. | |
140 | * | |
141 | * And this last workaround is tricky because of the requirements on | |
142 | * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM | |
143 | * volume 2 part 1: | |
144 | * | |
145 | * "1 of the following must also be set: | |
146 | * - Render Target Cache Flush Enable ([12] of DW1) | |
147 | * - Depth Cache Flush Enable ([0] of DW1) | |
148 | * - Stall at Pixel Scoreboard ([1] of DW1) | |
149 | * - Depth Stall ([13] of DW1) | |
150 | * - Post-Sync Operation ([13] of DW1) | |
151 | * - Notify Enable ([8] of DW1)" | |
152 | * | |
153 | * The cache flushes require the workaround flush that triggered this | |
154 | * one, so we can't use it. Depth stall would trigger the same. | |
155 | * Post-sync nonzero is what triggered this second workaround, so we | |
156 | * can't use that one either. Notify enable is IRQs, which aren't | |
157 | * really our business. That leaves only stall at scoreboard. | |
158 | */ | |
159 | static int | |
160 | intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring) | |
161 | { | |
162 | struct pipe_control *pc = ring->private; | |
163 | u32 scratch_addr = pc->gtt_offset + 128; | |
164 | int ret; | |
165 | ||
166 | ||
167 | ret = intel_ring_begin(ring, 6); | |
168 | if (ret) | |
169 | return ret; | |
170 | ||
171 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); | |
172 | intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | | |
173 | PIPE_CONTROL_STALL_AT_SCOREBOARD); | |
174 | intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ | |
175 | intel_ring_emit(ring, 0); /* low dword */ | |
176 | intel_ring_emit(ring, 0); /* high dword */ | |
177 | intel_ring_emit(ring, MI_NOOP); | |
178 | intel_ring_advance(ring); | |
179 | ||
180 | ret = intel_ring_begin(ring, 6); | |
181 | if (ret) | |
182 | return ret; | |
183 | ||
184 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); | |
185 | intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE); | |
186 | intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ | |
187 | intel_ring_emit(ring, 0); | |
188 | intel_ring_emit(ring, 0); | |
189 | intel_ring_emit(ring, MI_NOOP); | |
190 | intel_ring_advance(ring); | |
191 | ||
192 | return 0; | |
193 | } | |
194 | ||
195 | static int | |
196 | gen6_render_ring_flush(struct intel_ring_buffer *ring, | |
197 | u32 invalidate_domains, u32 flush_domains) | |
198 | { | |
199 | u32 flags = 0; | |
200 | struct pipe_control *pc = ring->private; | |
201 | u32 scratch_addr = pc->gtt_offset + 128; | |
202 | int ret; | |
203 | ||
204 | /* Force SNB workarounds for PIPE_CONTROL flushes */ | |
205 | intel_emit_post_sync_nonzero_flush(ring); | |
206 | ||
207 | /* Just flush everything. Experiments have shown that reducing the | |
208 | * number of bits based on the write domains has little performance | |
209 | * impact. | |
210 | */ | |
211 | flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; | |
212 | flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; | |
213 | flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; | |
214 | flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; | |
215 | flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; | |
216 | flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; | |
217 | flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; | |
218 | ||
219 | ret = intel_ring_begin(ring, 6); | |
220 | if (ret) | |
221 | return ret; | |
222 | ||
223 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); | |
224 | intel_ring_emit(ring, flags); | |
225 | intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); | |
226 | intel_ring_emit(ring, 0); /* lower dword */ | |
227 | intel_ring_emit(ring, 0); /* uppwer dword */ | |
228 | intel_ring_emit(ring, MI_NOOP); | |
229 | intel_ring_advance(ring); | |
230 | ||
231 | return 0; | |
232 | } | |
233 | ||
78501eac | 234 | static void ring_write_tail(struct intel_ring_buffer *ring, |
297b0c5b | 235 | u32 value) |
d46eefa2 | 236 | { |
78501eac | 237 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
297b0c5b | 238 | I915_WRITE_TAIL(ring, value); |
d46eefa2 XH |
239 | } |
240 | ||
78501eac | 241 | u32 intel_ring_get_active_head(struct intel_ring_buffer *ring) |
8187a2b7 | 242 | { |
78501eac CW |
243 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
244 | u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ? | |
3d281d8c | 245 | RING_ACTHD(ring->mmio_base) : ACTHD; |
8187a2b7 ZN |
246 | |
247 | return I915_READ(acthd_reg); | |
248 | } | |
249 | ||
78501eac | 250 | static int init_ring_common(struct intel_ring_buffer *ring) |
8187a2b7 | 251 | { |
78501eac | 252 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
05394f39 | 253 | struct drm_i915_gem_object *obj = ring->obj; |
8187a2b7 | 254 | u32 head; |
8187a2b7 ZN |
255 | |
256 | /* Stop the ring if it's running. */ | |
7f2ab699 | 257 | I915_WRITE_CTL(ring, 0); |
570ef608 | 258 | I915_WRITE_HEAD(ring, 0); |
78501eac | 259 | ring->write_tail(ring, 0); |
8187a2b7 ZN |
260 | |
261 | /* Initialize the ring. */ | |
05394f39 | 262 | I915_WRITE_START(ring, obj->gtt_offset); |
570ef608 | 263 | head = I915_READ_HEAD(ring) & HEAD_ADDR; |
8187a2b7 ZN |
264 | |
265 | /* G45 ring initialization fails to reset head to zero */ | |
266 | if (head != 0) { | |
6fd0d56e CW |
267 | DRM_DEBUG_KMS("%s head not reset to zero " |
268 | "ctl %08x head %08x tail %08x start %08x\n", | |
269 | ring->name, | |
270 | I915_READ_CTL(ring), | |
271 | I915_READ_HEAD(ring), | |
272 | I915_READ_TAIL(ring), | |
273 | I915_READ_START(ring)); | |
8187a2b7 | 274 | |
570ef608 | 275 | I915_WRITE_HEAD(ring, 0); |
8187a2b7 | 276 | |
6fd0d56e CW |
277 | if (I915_READ_HEAD(ring) & HEAD_ADDR) { |
278 | DRM_ERROR("failed to set %s head to zero " | |
279 | "ctl %08x head %08x tail %08x start %08x\n", | |
280 | ring->name, | |
281 | I915_READ_CTL(ring), | |
282 | I915_READ_HEAD(ring), | |
283 | I915_READ_TAIL(ring), | |
284 | I915_READ_START(ring)); | |
285 | } | |
8187a2b7 ZN |
286 | } |
287 | ||
7f2ab699 | 288 | I915_WRITE_CTL(ring, |
ae69b42a | 289 | ((ring->size - PAGE_SIZE) & RING_NR_PAGES) |
5d031e5b | 290 | | RING_VALID); |
8187a2b7 | 291 | |
8187a2b7 | 292 | /* If the head is still not zero, the ring is dead */ |
f01db988 SP |
293 | if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 && |
294 | I915_READ_START(ring) == obj->gtt_offset && | |
295 | (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) { | |
e74cfed5 CW |
296 | DRM_ERROR("%s initialization failed " |
297 | "ctl %08x head %08x tail %08x start %08x\n", | |
298 | ring->name, | |
299 | I915_READ_CTL(ring), | |
300 | I915_READ_HEAD(ring), | |
301 | I915_READ_TAIL(ring), | |
302 | I915_READ_START(ring)); | |
303 | return -EIO; | |
8187a2b7 ZN |
304 | } |
305 | ||
78501eac CW |
306 | if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) |
307 | i915_kernel_lost_context(ring->dev); | |
8187a2b7 | 308 | else { |
c7dca47b | 309 | ring->head = I915_READ_HEAD(ring); |
870e86dd | 310 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; |
c7dca47b | 311 | ring->space = ring_space(ring); |
8187a2b7 | 312 | } |
1ec14ad3 | 313 | |
8187a2b7 ZN |
314 | return 0; |
315 | } | |
316 | ||
c6df541c CW |
317 | static int |
318 | init_pipe_control(struct intel_ring_buffer *ring) | |
319 | { | |
320 | struct pipe_control *pc; | |
321 | struct drm_i915_gem_object *obj; | |
322 | int ret; | |
323 | ||
324 | if (ring->private) | |
325 | return 0; | |
326 | ||
327 | pc = kmalloc(sizeof(*pc), GFP_KERNEL); | |
328 | if (!pc) | |
329 | return -ENOMEM; | |
330 | ||
331 | obj = i915_gem_alloc_object(ring->dev, 4096); | |
332 | if (obj == NULL) { | |
333 | DRM_ERROR("Failed to allocate seqno page\n"); | |
334 | ret = -ENOMEM; | |
335 | goto err; | |
336 | } | |
e4ffd173 CW |
337 | |
338 | i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); | |
c6df541c CW |
339 | |
340 | ret = i915_gem_object_pin(obj, 4096, true); | |
341 | if (ret) | |
342 | goto err_unref; | |
343 | ||
344 | pc->gtt_offset = obj->gtt_offset; | |
345 | pc->cpu_page = kmap(obj->pages[0]); | |
346 | if (pc->cpu_page == NULL) | |
347 | goto err_unpin; | |
348 | ||
349 | pc->obj = obj; | |
350 | ring->private = pc; | |
351 | return 0; | |
352 | ||
353 | err_unpin: | |
354 | i915_gem_object_unpin(obj); | |
355 | err_unref: | |
356 | drm_gem_object_unreference(&obj->base); | |
357 | err: | |
358 | kfree(pc); | |
359 | return ret; | |
360 | } | |
361 | ||
362 | static void | |
363 | cleanup_pipe_control(struct intel_ring_buffer *ring) | |
364 | { | |
365 | struct pipe_control *pc = ring->private; | |
366 | struct drm_i915_gem_object *obj; | |
367 | ||
368 | if (!ring->private) | |
369 | return; | |
370 | ||
371 | obj = pc->obj; | |
372 | kunmap(obj->pages[0]); | |
373 | i915_gem_object_unpin(obj); | |
374 | drm_gem_object_unreference(&obj->base); | |
375 | ||
376 | kfree(pc); | |
377 | ring->private = NULL; | |
378 | } | |
379 | ||
78501eac | 380 | static int init_render_ring(struct intel_ring_buffer *ring) |
8187a2b7 | 381 | { |
78501eac | 382 | struct drm_device *dev = ring->dev; |
1ec14ad3 | 383 | struct drm_i915_private *dev_priv = dev->dev_private; |
78501eac | 384 | int ret = init_ring_common(ring); |
a69ffdbf | 385 | |
a6c45cf0 | 386 | if (INTEL_INFO(dev)->gen > 3) { |
78501eac | 387 | int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; |
a69ffdbf | 388 | I915_WRITE(MI_MODE, mode); |
b095cd0a JB |
389 | if (IS_GEN7(dev)) |
390 | I915_WRITE(GFX_MODE_GEN7, | |
391 | GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | | |
392 | GFX_MODE_ENABLE(GFX_REPLAY_MODE)); | |
8187a2b7 | 393 | } |
78501eac | 394 | |
8d315287 | 395 | if (INTEL_INFO(dev)->gen >= 5) { |
c6df541c CW |
396 | ret = init_pipe_control(ring); |
397 | if (ret) | |
398 | return ret; | |
399 | } | |
400 | ||
84f9f938 BW |
401 | if (INTEL_INFO(dev)->gen >= 6) { |
402 | I915_WRITE(INSTPM, | |
403 | INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING); | |
404 | } | |
405 | ||
8187a2b7 ZN |
406 | return ret; |
407 | } | |
408 | ||
c6df541c CW |
409 | static void render_ring_cleanup(struct intel_ring_buffer *ring) |
410 | { | |
411 | if (!ring->private) | |
412 | return; | |
413 | ||
414 | cleanup_pipe_control(ring); | |
415 | } | |
416 | ||
1ec14ad3 | 417 | static void |
c8c99b0f BW |
418 | update_mboxes(struct intel_ring_buffer *ring, |
419 | u32 seqno, | |
420 | u32 mmio_offset) | |
1ec14ad3 | 421 | { |
c8c99b0f BW |
422 | intel_ring_emit(ring, MI_SEMAPHORE_MBOX | |
423 | MI_SEMAPHORE_GLOBAL_GTT | | |
424 | MI_SEMAPHORE_REGISTER | | |
425 | MI_SEMAPHORE_UPDATE); | |
1ec14ad3 | 426 | intel_ring_emit(ring, seqno); |
c8c99b0f | 427 | intel_ring_emit(ring, mmio_offset); |
1ec14ad3 CW |
428 | } |
429 | ||
c8c99b0f BW |
430 | /** |
431 | * gen6_add_request - Update the semaphore mailbox registers | |
432 | * | |
433 | * @ring - ring that is adding a request | |
434 | * @seqno - return seqno stuck into the ring | |
435 | * | |
436 | * Update the mailbox registers in the *other* rings with the current seqno. | |
437 | * This acts like a signal in the canonical semaphore. | |
438 | */ | |
1ec14ad3 CW |
439 | static int |
440 | gen6_add_request(struct intel_ring_buffer *ring, | |
c8c99b0f | 441 | u32 *seqno) |
1ec14ad3 | 442 | { |
c8c99b0f BW |
443 | u32 mbox1_reg; |
444 | u32 mbox2_reg; | |
1ec14ad3 CW |
445 | int ret; |
446 | ||
447 | ret = intel_ring_begin(ring, 10); | |
448 | if (ret) | |
449 | return ret; | |
450 | ||
c8c99b0f BW |
451 | mbox1_reg = ring->signal_mbox[0]; |
452 | mbox2_reg = ring->signal_mbox[1]; | |
1ec14ad3 | 453 | |
53d227f2 | 454 | *seqno = i915_gem_next_request_seqno(ring); |
c8c99b0f BW |
455 | |
456 | update_mboxes(ring, *seqno, mbox1_reg); | |
457 | update_mboxes(ring, *seqno, mbox2_reg); | |
1ec14ad3 CW |
458 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); |
459 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | |
c8c99b0f | 460 | intel_ring_emit(ring, *seqno); |
1ec14ad3 CW |
461 | intel_ring_emit(ring, MI_USER_INTERRUPT); |
462 | intel_ring_advance(ring); | |
463 | ||
1ec14ad3 CW |
464 | return 0; |
465 | } | |
466 | ||
c8c99b0f BW |
467 | /** |
468 | * intel_ring_sync - sync the waiter to the signaller on seqno | |
469 | * | |
470 | * @waiter - ring that is waiting | |
471 | * @signaller - ring which has, or will signal | |
472 | * @seqno - seqno which the waiter will block on | |
473 | */ | |
474 | static int | |
475 | intel_ring_sync(struct intel_ring_buffer *waiter, | |
476 | struct intel_ring_buffer *signaller, | |
477 | int ring, | |
1ec14ad3 CW |
478 | u32 seqno) |
479 | { | |
480 | int ret; | |
c8c99b0f BW |
481 | u32 dw1 = MI_SEMAPHORE_MBOX | |
482 | MI_SEMAPHORE_COMPARE | | |
483 | MI_SEMAPHORE_REGISTER; | |
1ec14ad3 | 484 | |
1500f7ea BW |
485 | /* Throughout all of the GEM code, seqno passed implies our current |
486 | * seqno is >= the last seqno executed. However for hardware the | |
487 | * comparison is strictly greater than. | |
488 | */ | |
489 | seqno -= 1; | |
490 | ||
c8c99b0f | 491 | ret = intel_ring_begin(waiter, 4); |
1ec14ad3 CW |
492 | if (ret) |
493 | return ret; | |
494 | ||
c8c99b0f BW |
495 | intel_ring_emit(waiter, dw1 | signaller->semaphore_register[ring]); |
496 | intel_ring_emit(waiter, seqno); | |
497 | intel_ring_emit(waiter, 0); | |
498 | intel_ring_emit(waiter, MI_NOOP); | |
499 | intel_ring_advance(waiter); | |
1ec14ad3 CW |
500 | |
501 | return 0; | |
502 | } | |
503 | ||
c8c99b0f BW |
504 | /* VCS->RCS (RVSYNC) or BCS->RCS (RBSYNC) */ |
505 | int | |
506 | render_ring_sync_to(struct intel_ring_buffer *waiter, | |
507 | struct intel_ring_buffer *signaller, | |
508 | u32 seqno) | |
509 | { | |
510 | WARN_ON(signaller->semaphore_register[RCS] == MI_SEMAPHORE_SYNC_INVALID); | |
511 | return intel_ring_sync(waiter, | |
512 | signaller, | |
513 | RCS, | |
514 | seqno); | |
515 | } | |
516 | ||
517 | /* RCS->VCS (VRSYNC) or BCS->VCS (VBSYNC) */ | |
518 | int | |
519 | gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter, | |
520 | struct intel_ring_buffer *signaller, | |
521 | u32 seqno) | |
522 | { | |
523 | WARN_ON(signaller->semaphore_register[VCS] == MI_SEMAPHORE_SYNC_INVALID); | |
524 | return intel_ring_sync(waiter, | |
525 | signaller, | |
526 | VCS, | |
527 | seqno); | |
528 | } | |
529 | ||
530 | /* RCS->BCS (BRSYNC) or VCS->BCS (BVSYNC) */ | |
531 | int | |
532 | gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter, | |
533 | struct intel_ring_buffer *signaller, | |
534 | u32 seqno) | |
535 | { | |
536 | WARN_ON(signaller->semaphore_register[BCS] == MI_SEMAPHORE_SYNC_INVALID); | |
537 | return intel_ring_sync(waiter, | |
538 | signaller, | |
539 | BCS, | |
540 | seqno); | |
541 | } | |
542 | ||
543 | ||
544 | ||
c6df541c CW |
545 | #define PIPE_CONTROL_FLUSH(ring__, addr__) \ |
546 | do { \ | |
fcbc34e4 KG |
547 | intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \ |
548 | PIPE_CONTROL_DEPTH_STALL); \ | |
c6df541c CW |
549 | intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ |
550 | intel_ring_emit(ring__, 0); \ | |
551 | intel_ring_emit(ring__, 0); \ | |
552 | } while (0) | |
553 | ||
554 | static int | |
555 | pc_render_add_request(struct intel_ring_buffer *ring, | |
556 | u32 *result) | |
557 | { | |
53d227f2 | 558 | u32 seqno = i915_gem_next_request_seqno(ring); |
c6df541c CW |
559 | struct pipe_control *pc = ring->private; |
560 | u32 scratch_addr = pc->gtt_offset + 128; | |
561 | int ret; | |
562 | ||
563 | /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently | |
564 | * incoherent with writes to memory, i.e. completely fubar, | |
565 | * so we need to use PIPE_NOTIFY instead. | |
566 | * | |
567 | * However, we also need to workaround the qword write | |
568 | * incoherence by flushing the 6 PIPE_NOTIFY buffers out to | |
569 | * memory before requesting an interrupt. | |
570 | */ | |
571 | ret = intel_ring_begin(ring, 32); | |
572 | if (ret) | |
573 | return ret; | |
574 | ||
fcbc34e4 | 575 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | |
9d971b37 KG |
576 | PIPE_CONTROL_WRITE_FLUSH | |
577 | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); | |
c6df541c CW |
578 | intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); |
579 | intel_ring_emit(ring, seqno); | |
580 | intel_ring_emit(ring, 0); | |
581 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | |
582 | scratch_addr += 128; /* write to separate cachelines */ | |
583 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | |
584 | scratch_addr += 128; | |
585 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | |
586 | scratch_addr += 128; | |
587 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | |
588 | scratch_addr += 128; | |
589 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | |
590 | scratch_addr += 128; | |
591 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | |
a71d8d94 | 592 | |
fcbc34e4 | 593 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | |
9d971b37 KG |
594 | PIPE_CONTROL_WRITE_FLUSH | |
595 | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | | |
c6df541c CW |
596 | PIPE_CONTROL_NOTIFY); |
597 | intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); | |
598 | intel_ring_emit(ring, seqno); | |
599 | intel_ring_emit(ring, 0); | |
600 | intel_ring_advance(ring); | |
601 | ||
602 | *result = seqno; | |
603 | return 0; | |
604 | } | |
605 | ||
1ec14ad3 CW |
606 | static int |
607 | render_ring_add_request(struct intel_ring_buffer *ring, | |
608 | u32 *result) | |
609 | { | |
53d227f2 | 610 | u32 seqno = i915_gem_next_request_seqno(ring); |
1ec14ad3 | 611 | int ret; |
3cce469c | 612 | |
1ec14ad3 CW |
613 | ret = intel_ring_begin(ring, 4); |
614 | if (ret) | |
615 | return ret; | |
3cce469c | 616 | |
1ec14ad3 CW |
617 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); |
618 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | |
619 | intel_ring_emit(ring, seqno); | |
620 | intel_ring_emit(ring, MI_USER_INTERRUPT); | |
3cce469c | 621 | intel_ring_advance(ring); |
1ec14ad3 | 622 | |
3cce469c CW |
623 | *result = seqno; |
624 | return 0; | |
62fdfeaf EA |
625 | } |
626 | ||
4cd53c0c DV |
627 | static u32 |
628 | gen6_ring_get_seqno(struct intel_ring_buffer *ring) | |
629 | { | |
630 | struct drm_device *dev = ring->dev; | |
631 | ||
632 | /* Workaround to force correct ordering between irq and seqno writes on | |
633 | * ivb (and maybe also on snb) by reading from a CS register (like | |
634 | * ACTHD) before reading the status page. */ | |
1c7eaac7 | 635 | if (IS_GEN6(dev) || IS_GEN7(dev)) |
4cd53c0c DV |
636 | intel_ring_get_active_head(ring); |
637 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); | |
638 | } | |
639 | ||
8187a2b7 | 640 | static u32 |
1ec14ad3 | 641 | ring_get_seqno(struct intel_ring_buffer *ring) |
8187a2b7 | 642 | { |
1ec14ad3 CW |
643 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); |
644 | } | |
645 | ||
c6df541c CW |
646 | static u32 |
647 | pc_render_get_seqno(struct intel_ring_buffer *ring) | |
648 | { | |
649 | struct pipe_control *pc = ring->private; | |
650 | return pc->cpu_page[0]; | |
651 | } | |
652 | ||
0f46832f CW |
653 | static void |
654 | ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask) | |
655 | { | |
656 | dev_priv->gt_irq_mask &= ~mask; | |
657 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | |
658 | POSTING_READ(GTIMR); | |
659 | } | |
660 | ||
661 | static void | |
662 | ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask) | |
663 | { | |
664 | dev_priv->gt_irq_mask |= mask; | |
665 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | |
666 | POSTING_READ(GTIMR); | |
667 | } | |
668 | ||
669 | static void | |
670 | i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) | |
671 | { | |
672 | dev_priv->irq_mask &= ~mask; | |
673 | I915_WRITE(IMR, dev_priv->irq_mask); | |
674 | POSTING_READ(IMR); | |
675 | } | |
676 | ||
677 | static void | |
678 | i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) | |
679 | { | |
680 | dev_priv->irq_mask |= mask; | |
681 | I915_WRITE(IMR, dev_priv->irq_mask); | |
682 | POSTING_READ(IMR); | |
683 | } | |
684 | ||
b13c2b96 | 685 | static bool |
1ec14ad3 | 686 | render_ring_get_irq(struct intel_ring_buffer *ring) |
62fdfeaf | 687 | { |
78501eac | 688 | struct drm_device *dev = ring->dev; |
01a03331 | 689 | drm_i915_private_t *dev_priv = dev->dev_private; |
62fdfeaf | 690 | |
b13c2b96 CW |
691 | if (!dev->irq_enabled) |
692 | return false; | |
693 | ||
0dc79fb2 | 694 | spin_lock(&ring->irq_lock); |
01a03331 | 695 | if (ring->irq_refcount++ == 0) { |
901781b9 | 696 | if (INTEL_INFO(dev)->gen >= 5) |
0f46832f CW |
697 | ironlake_enable_irq(dev_priv, |
698 | GT_PIPE_NOTIFY | GT_USER_INTERRUPT); | |
62fdfeaf EA |
699 | else |
700 | i915_enable_irq(dev_priv, I915_USER_INTERRUPT); | |
701 | } | |
0dc79fb2 | 702 | spin_unlock(&ring->irq_lock); |
b13c2b96 CW |
703 | |
704 | return true; | |
62fdfeaf EA |
705 | } |
706 | ||
8187a2b7 | 707 | static void |
1ec14ad3 | 708 | render_ring_put_irq(struct intel_ring_buffer *ring) |
62fdfeaf | 709 | { |
78501eac | 710 | struct drm_device *dev = ring->dev; |
01a03331 | 711 | drm_i915_private_t *dev_priv = dev->dev_private; |
62fdfeaf | 712 | |
0dc79fb2 | 713 | spin_lock(&ring->irq_lock); |
01a03331 | 714 | if (--ring->irq_refcount == 0) { |
901781b9 | 715 | if (INTEL_INFO(dev)->gen >= 5) |
0f46832f CW |
716 | ironlake_disable_irq(dev_priv, |
717 | GT_USER_INTERRUPT | | |
718 | GT_PIPE_NOTIFY); | |
62fdfeaf EA |
719 | else |
720 | i915_disable_irq(dev_priv, I915_USER_INTERRUPT); | |
721 | } | |
0dc79fb2 | 722 | spin_unlock(&ring->irq_lock); |
62fdfeaf EA |
723 | } |
724 | ||
78501eac | 725 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring) |
8187a2b7 | 726 | { |
4593010b | 727 | struct drm_device *dev = ring->dev; |
78501eac | 728 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
4593010b EA |
729 | u32 mmio = 0; |
730 | ||
731 | /* The ring status page addresses are no longer next to the rest of | |
732 | * the ring registers as of gen7. | |
733 | */ | |
734 | if (IS_GEN7(dev)) { | |
735 | switch (ring->id) { | |
96154f2f | 736 | case RCS: |
4593010b EA |
737 | mmio = RENDER_HWS_PGA_GEN7; |
738 | break; | |
96154f2f | 739 | case BCS: |
4593010b EA |
740 | mmio = BLT_HWS_PGA_GEN7; |
741 | break; | |
96154f2f | 742 | case VCS: |
4593010b EA |
743 | mmio = BSD_HWS_PGA_GEN7; |
744 | break; | |
745 | } | |
746 | } else if (IS_GEN6(ring->dev)) { | |
747 | mmio = RING_HWS_PGA_GEN6(ring->mmio_base); | |
748 | } else { | |
749 | mmio = RING_HWS_PGA(ring->mmio_base); | |
750 | } | |
751 | ||
78501eac CW |
752 | I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); |
753 | POSTING_READ(mmio); | |
8187a2b7 ZN |
754 | } |
755 | ||
b72f3acb | 756 | static int |
78501eac CW |
757 | bsd_ring_flush(struct intel_ring_buffer *ring, |
758 | u32 invalidate_domains, | |
759 | u32 flush_domains) | |
d1b851fc | 760 | { |
b72f3acb CW |
761 | int ret; |
762 | ||
b72f3acb CW |
763 | ret = intel_ring_begin(ring, 2); |
764 | if (ret) | |
765 | return ret; | |
766 | ||
767 | intel_ring_emit(ring, MI_FLUSH); | |
768 | intel_ring_emit(ring, MI_NOOP); | |
769 | intel_ring_advance(ring); | |
770 | return 0; | |
d1b851fc ZN |
771 | } |
772 | ||
3cce469c | 773 | static int |
78501eac | 774 | ring_add_request(struct intel_ring_buffer *ring, |
3cce469c | 775 | u32 *result) |
d1b851fc ZN |
776 | { |
777 | u32 seqno; | |
3cce469c CW |
778 | int ret; |
779 | ||
780 | ret = intel_ring_begin(ring, 4); | |
781 | if (ret) | |
782 | return ret; | |
6f392d54 | 783 | |
53d227f2 | 784 | seqno = i915_gem_next_request_seqno(ring); |
6f392d54 | 785 | |
3cce469c CW |
786 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); |
787 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | |
788 | intel_ring_emit(ring, seqno); | |
789 | intel_ring_emit(ring, MI_USER_INTERRUPT); | |
790 | intel_ring_advance(ring); | |
d1b851fc | 791 | |
3cce469c CW |
792 | *result = seqno; |
793 | return 0; | |
d1b851fc ZN |
794 | } |
795 | ||
0f46832f | 796 | static bool |
25c06300 | 797 | gen6_ring_get_irq(struct intel_ring_buffer *ring) |
0f46832f CW |
798 | { |
799 | struct drm_device *dev = ring->dev; | |
01a03331 | 800 | drm_i915_private_t *dev_priv = dev->dev_private; |
0f46832f CW |
801 | |
802 | if (!dev->irq_enabled) | |
803 | return false; | |
804 | ||
4cd53c0c DV |
805 | /* It looks like we need to prevent the gt from suspending while waiting |
806 | * for an notifiy irq, otherwise irqs seem to get lost on at least the | |
807 | * blt/bsd rings on ivb. */ | |
99ffa162 | 808 | gen6_gt_force_wake_get(dev_priv); |
4cd53c0c | 809 | |
0dc79fb2 | 810 | spin_lock(&ring->irq_lock); |
01a03331 | 811 | if (ring->irq_refcount++ == 0) { |
6a848ccb DV |
812 | I915_WRITE_IMR(ring, ~ring->irq_enable_mask); |
813 | ironlake_enable_irq(dev_priv, ring->irq_enable_mask); | |
0f46832f | 814 | } |
0dc79fb2 | 815 | spin_unlock(&ring->irq_lock); |
0f46832f CW |
816 | |
817 | return true; | |
818 | } | |
819 | ||
820 | static void | |
25c06300 | 821 | gen6_ring_put_irq(struct intel_ring_buffer *ring) |
0f46832f CW |
822 | { |
823 | struct drm_device *dev = ring->dev; | |
01a03331 | 824 | drm_i915_private_t *dev_priv = dev->dev_private; |
0f46832f | 825 | |
0dc79fb2 | 826 | spin_lock(&ring->irq_lock); |
01a03331 | 827 | if (--ring->irq_refcount == 0) { |
6a848ccb DV |
828 | I915_WRITE_IMR(ring, ~0); |
829 | ironlake_disable_irq(dev_priv, ring->irq_enable_mask); | |
1ec14ad3 | 830 | } |
0dc79fb2 | 831 | spin_unlock(&ring->irq_lock); |
4cd53c0c | 832 | |
99ffa162 | 833 | gen6_gt_force_wake_put(dev_priv); |
d1b851fc ZN |
834 | } |
835 | ||
b13c2b96 | 836 | static bool |
1ec14ad3 | 837 | bsd_ring_get_irq(struct intel_ring_buffer *ring) |
d1b851fc | 838 | { |
5bfa1063 FB |
839 | struct drm_device *dev = ring->dev; |
840 | drm_i915_private_t *dev_priv = dev->dev_private; | |
841 | ||
842 | if (!dev->irq_enabled) | |
843 | return false; | |
844 | ||
845 | spin_lock(&ring->irq_lock); | |
846 | if (ring->irq_refcount++ == 0) { | |
847 | if (IS_G4X(dev)) | |
848 | i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT); | |
849 | else | |
850 | ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT); | |
851 | } | |
852 | spin_unlock(&ring->irq_lock); | |
853 | ||
854 | return true; | |
1ec14ad3 CW |
855 | } |
856 | static void | |
857 | bsd_ring_put_irq(struct intel_ring_buffer *ring) | |
858 | { | |
5bfa1063 FB |
859 | struct drm_device *dev = ring->dev; |
860 | drm_i915_private_t *dev_priv = dev->dev_private; | |
861 | ||
862 | spin_lock(&ring->irq_lock); | |
863 | if (--ring->irq_refcount == 0) { | |
864 | if (IS_G4X(dev)) | |
865 | i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT); | |
866 | else | |
867 | ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT); | |
868 | } | |
869 | spin_unlock(&ring->irq_lock); | |
d1b851fc ZN |
870 | } |
871 | ||
872 | static int | |
c4e7a414 | 873 | ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) |
d1b851fc | 874 | { |
e1f99ce6 | 875 | int ret; |
78501eac | 876 | |
e1f99ce6 CW |
877 | ret = intel_ring_begin(ring, 2); |
878 | if (ret) | |
879 | return ret; | |
880 | ||
78501eac | 881 | intel_ring_emit(ring, |
c4e7a414 | 882 | MI_BATCH_BUFFER_START | (2 << 6) | |
78501eac | 883 | MI_BATCH_NON_SECURE_I965); |
c4e7a414 | 884 | intel_ring_emit(ring, offset); |
78501eac CW |
885 | intel_ring_advance(ring); |
886 | ||
d1b851fc ZN |
887 | return 0; |
888 | } | |
889 | ||
8187a2b7 | 890 | static int |
78501eac | 891 | render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, |
c4e7a414 | 892 | u32 offset, u32 len) |
62fdfeaf | 893 | { |
78501eac | 894 | struct drm_device *dev = ring->dev; |
c4e7a414 | 895 | int ret; |
62fdfeaf | 896 | |
c4e7a414 CW |
897 | if (IS_I830(dev) || IS_845G(dev)) { |
898 | ret = intel_ring_begin(ring, 4); | |
899 | if (ret) | |
900 | return ret; | |
62fdfeaf | 901 | |
c4e7a414 CW |
902 | intel_ring_emit(ring, MI_BATCH_BUFFER); |
903 | intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); | |
904 | intel_ring_emit(ring, offset + len - 8); | |
905 | intel_ring_emit(ring, 0); | |
906 | } else { | |
907 | ret = intel_ring_begin(ring, 2); | |
908 | if (ret) | |
909 | return ret; | |
e1f99ce6 | 910 | |
c4e7a414 CW |
911 | if (INTEL_INFO(dev)->gen >= 4) { |
912 | intel_ring_emit(ring, | |
913 | MI_BATCH_BUFFER_START | (2 << 6) | | |
914 | MI_BATCH_NON_SECURE_I965); | |
915 | intel_ring_emit(ring, offset); | |
62fdfeaf | 916 | } else { |
c4e7a414 CW |
917 | intel_ring_emit(ring, |
918 | MI_BATCH_BUFFER_START | (2 << 6)); | |
919 | intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); | |
62fdfeaf EA |
920 | } |
921 | } | |
c4e7a414 | 922 | intel_ring_advance(ring); |
62fdfeaf | 923 | |
62fdfeaf EA |
924 | return 0; |
925 | } | |
926 | ||
78501eac | 927 | static void cleanup_status_page(struct intel_ring_buffer *ring) |
62fdfeaf | 928 | { |
78501eac | 929 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
05394f39 | 930 | struct drm_i915_gem_object *obj; |
62fdfeaf | 931 | |
8187a2b7 ZN |
932 | obj = ring->status_page.obj; |
933 | if (obj == NULL) | |
62fdfeaf | 934 | return; |
62fdfeaf | 935 | |
05394f39 | 936 | kunmap(obj->pages[0]); |
62fdfeaf | 937 | i915_gem_object_unpin(obj); |
05394f39 | 938 | drm_gem_object_unreference(&obj->base); |
8187a2b7 | 939 | ring->status_page.obj = NULL; |
62fdfeaf EA |
940 | |
941 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | |
62fdfeaf EA |
942 | } |
943 | ||
78501eac | 944 | static int init_status_page(struct intel_ring_buffer *ring) |
62fdfeaf | 945 | { |
78501eac | 946 | struct drm_device *dev = ring->dev; |
62fdfeaf | 947 | drm_i915_private_t *dev_priv = dev->dev_private; |
05394f39 | 948 | struct drm_i915_gem_object *obj; |
62fdfeaf EA |
949 | int ret; |
950 | ||
62fdfeaf EA |
951 | obj = i915_gem_alloc_object(dev, 4096); |
952 | if (obj == NULL) { | |
953 | DRM_ERROR("Failed to allocate status page\n"); | |
954 | ret = -ENOMEM; | |
955 | goto err; | |
956 | } | |
e4ffd173 CW |
957 | |
958 | i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); | |
62fdfeaf | 959 | |
75e9e915 | 960 | ret = i915_gem_object_pin(obj, 4096, true); |
62fdfeaf | 961 | if (ret != 0) { |
62fdfeaf EA |
962 | goto err_unref; |
963 | } | |
964 | ||
05394f39 CW |
965 | ring->status_page.gfx_addr = obj->gtt_offset; |
966 | ring->status_page.page_addr = kmap(obj->pages[0]); | |
8187a2b7 | 967 | if (ring->status_page.page_addr == NULL) { |
62fdfeaf | 968 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); |
62fdfeaf EA |
969 | goto err_unpin; |
970 | } | |
8187a2b7 ZN |
971 | ring->status_page.obj = obj; |
972 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); | |
62fdfeaf | 973 | |
78501eac | 974 | intel_ring_setup_status_page(ring); |
8187a2b7 ZN |
975 | DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", |
976 | ring->name, ring->status_page.gfx_addr); | |
62fdfeaf EA |
977 | |
978 | return 0; | |
979 | ||
980 | err_unpin: | |
981 | i915_gem_object_unpin(obj); | |
982 | err_unref: | |
05394f39 | 983 | drm_gem_object_unreference(&obj->base); |
62fdfeaf | 984 | err: |
8187a2b7 | 985 | return ret; |
62fdfeaf EA |
986 | } |
987 | ||
8187a2b7 | 988 | int intel_init_ring_buffer(struct drm_device *dev, |
ab6f8e32 | 989 | struct intel_ring_buffer *ring) |
62fdfeaf | 990 | { |
05394f39 | 991 | struct drm_i915_gem_object *obj; |
dd785e35 CW |
992 | int ret; |
993 | ||
8187a2b7 | 994 | ring->dev = dev; |
23bc5982 CW |
995 | INIT_LIST_HEAD(&ring->active_list); |
996 | INIT_LIST_HEAD(&ring->request_list); | |
64193406 | 997 | INIT_LIST_HEAD(&ring->gpu_write_list); |
0dc79fb2 | 998 | |
b259f673 | 999 | init_waitqueue_head(&ring->irq_queue); |
0dc79fb2 | 1000 | spin_lock_init(&ring->irq_lock); |
62fdfeaf | 1001 | |
8187a2b7 | 1002 | if (I915_NEED_GFX_HWS(dev)) { |
78501eac | 1003 | ret = init_status_page(ring); |
8187a2b7 ZN |
1004 | if (ret) |
1005 | return ret; | |
1006 | } | |
62fdfeaf | 1007 | |
8187a2b7 | 1008 | obj = i915_gem_alloc_object(dev, ring->size); |
62fdfeaf EA |
1009 | if (obj == NULL) { |
1010 | DRM_ERROR("Failed to allocate ringbuffer\n"); | |
8187a2b7 | 1011 | ret = -ENOMEM; |
dd785e35 | 1012 | goto err_hws; |
62fdfeaf | 1013 | } |
62fdfeaf | 1014 | |
05394f39 | 1015 | ring->obj = obj; |
8187a2b7 | 1016 | |
75e9e915 | 1017 | ret = i915_gem_object_pin(obj, PAGE_SIZE, true); |
dd785e35 CW |
1018 | if (ret) |
1019 | goto err_unref; | |
62fdfeaf | 1020 | |
8187a2b7 | 1021 | ring->map.size = ring->size; |
05394f39 | 1022 | ring->map.offset = dev->agp->base + obj->gtt_offset; |
62fdfeaf EA |
1023 | ring->map.type = 0; |
1024 | ring->map.flags = 0; | |
1025 | ring->map.mtrr = 0; | |
1026 | ||
1027 | drm_core_ioremap_wc(&ring->map, dev); | |
1028 | if (ring->map.handle == NULL) { | |
1029 | DRM_ERROR("Failed to map ringbuffer.\n"); | |
8187a2b7 | 1030 | ret = -EINVAL; |
dd785e35 | 1031 | goto err_unpin; |
62fdfeaf EA |
1032 | } |
1033 | ||
8187a2b7 | 1034 | ring->virtual_start = ring->map.handle; |
78501eac | 1035 | ret = ring->init(ring); |
dd785e35 CW |
1036 | if (ret) |
1037 | goto err_unmap; | |
62fdfeaf | 1038 | |
55249baa CW |
1039 | /* Workaround an erratum on the i830 which causes a hang if |
1040 | * the TAIL pointer points to within the last 2 cachelines | |
1041 | * of the buffer. | |
1042 | */ | |
1043 | ring->effective_size = ring->size; | |
1044 | if (IS_I830(ring->dev)) | |
1045 | ring->effective_size -= 128; | |
1046 | ||
c584fe47 | 1047 | return 0; |
dd785e35 CW |
1048 | |
1049 | err_unmap: | |
1050 | drm_core_ioremapfree(&ring->map, dev); | |
1051 | err_unpin: | |
1052 | i915_gem_object_unpin(obj); | |
1053 | err_unref: | |
05394f39 CW |
1054 | drm_gem_object_unreference(&obj->base); |
1055 | ring->obj = NULL; | |
dd785e35 | 1056 | err_hws: |
78501eac | 1057 | cleanup_status_page(ring); |
8187a2b7 | 1058 | return ret; |
62fdfeaf EA |
1059 | } |
1060 | ||
78501eac | 1061 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) |
62fdfeaf | 1062 | { |
33626e6a CW |
1063 | struct drm_i915_private *dev_priv; |
1064 | int ret; | |
1065 | ||
05394f39 | 1066 | if (ring->obj == NULL) |
62fdfeaf EA |
1067 | return; |
1068 | ||
33626e6a CW |
1069 | /* Disable the ring buffer. The ring must be idle at this point */ |
1070 | dev_priv = ring->dev->dev_private; | |
96f298aa | 1071 | ret = intel_wait_ring_idle(ring); |
29ee3991 CW |
1072 | if (ret) |
1073 | DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", | |
1074 | ring->name, ret); | |
1075 | ||
33626e6a CW |
1076 | I915_WRITE_CTL(ring, 0); |
1077 | ||
78501eac | 1078 | drm_core_ioremapfree(&ring->map, ring->dev); |
62fdfeaf | 1079 | |
05394f39 CW |
1080 | i915_gem_object_unpin(ring->obj); |
1081 | drm_gem_object_unreference(&ring->obj->base); | |
1082 | ring->obj = NULL; | |
78501eac | 1083 | |
8d19215b ZN |
1084 | if (ring->cleanup) |
1085 | ring->cleanup(ring); | |
1086 | ||
78501eac | 1087 | cleanup_status_page(ring); |
62fdfeaf EA |
1088 | } |
1089 | ||
78501eac | 1090 | static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) |
62fdfeaf | 1091 | { |
8187a2b7 | 1092 | unsigned int *virt; |
55249baa | 1093 | int rem = ring->size - ring->tail; |
62fdfeaf | 1094 | |
8187a2b7 | 1095 | if (ring->space < rem) { |
78501eac | 1096 | int ret = intel_wait_ring_buffer(ring, rem); |
62fdfeaf EA |
1097 | if (ret) |
1098 | return ret; | |
1099 | } | |
62fdfeaf | 1100 | |
8187a2b7 | 1101 | virt = (unsigned int *)(ring->virtual_start + ring->tail); |
1741dd4a CW |
1102 | rem /= 8; |
1103 | while (rem--) { | |
62fdfeaf | 1104 | *virt++ = MI_NOOP; |
1741dd4a CW |
1105 | *virt++ = MI_NOOP; |
1106 | } | |
62fdfeaf | 1107 | |
8187a2b7 | 1108 | ring->tail = 0; |
c7dca47b | 1109 | ring->space = ring_space(ring); |
62fdfeaf EA |
1110 | |
1111 | return 0; | |
1112 | } | |
1113 | ||
a71d8d94 CW |
1114 | static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno) |
1115 | { | |
1116 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | |
1117 | bool was_interruptible; | |
1118 | int ret; | |
1119 | ||
1120 | /* XXX As we have not yet audited all the paths to check that | |
1121 | * they are ready for ERESTARTSYS from intel_ring_begin, do not | |
1122 | * allow us to be interruptible by a signal. | |
1123 | */ | |
1124 | was_interruptible = dev_priv->mm.interruptible; | |
1125 | dev_priv->mm.interruptible = false; | |
1126 | ||
1127 | ret = i915_wait_request(ring, seqno, true); | |
1128 | ||
1129 | dev_priv->mm.interruptible = was_interruptible; | |
1130 | ||
1131 | return ret; | |
1132 | } | |
1133 | ||
1134 | static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n) | |
1135 | { | |
1136 | struct drm_i915_gem_request *request; | |
1137 | u32 seqno = 0; | |
1138 | int ret; | |
1139 | ||
1140 | i915_gem_retire_requests_ring(ring); | |
1141 | ||
1142 | if (ring->last_retired_head != -1) { | |
1143 | ring->head = ring->last_retired_head; | |
1144 | ring->last_retired_head = -1; | |
1145 | ring->space = ring_space(ring); | |
1146 | if (ring->space >= n) | |
1147 | return 0; | |
1148 | } | |
1149 | ||
1150 | list_for_each_entry(request, &ring->request_list, list) { | |
1151 | int space; | |
1152 | ||
1153 | if (request->tail == -1) | |
1154 | continue; | |
1155 | ||
1156 | space = request->tail - (ring->tail + 8); | |
1157 | if (space < 0) | |
1158 | space += ring->size; | |
1159 | if (space >= n) { | |
1160 | seqno = request->seqno; | |
1161 | break; | |
1162 | } | |
1163 | ||
1164 | /* Consume this request in case we need more space than | |
1165 | * is available and so need to prevent a race between | |
1166 | * updating last_retired_head and direct reads of | |
1167 | * I915_RING_HEAD. It also provides a nice sanity check. | |
1168 | */ | |
1169 | request->tail = -1; | |
1170 | } | |
1171 | ||
1172 | if (seqno == 0) | |
1173 | return -ENOSPC; | |
1174 | ||
1175 | ret = intel_ring_wait_seqno(ring, seqno); | |
1176 | if (ret) | |
1177 | return ret; | |
1178 | ||
1179 | if (WARN_ON(ring->last_retired_head == -1)) | |
1180 | return -ENOSPC; | |
1181 | ||
1182 | ring->head = ring->last_retired_head; | |
1183 | ring->last_retired_head = -1; | |
1184 | ring->space = ring_space(ring); | |
1185 | if (WARN_ON(ring->space < n)) | |
1186 | return -ENOSPC; | |
1187 | ||
1188 | return 0; | |
1189 | } | |
1190 | ||
78501eac | 1191 | int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) |
62fdfeaf | 1192 | { |
78501eac | 1193 | struct drm_device *dev = ring->dev; |
cae5852d | 1194 | struct drm_i915_private *dev_priv = dev->dev_private; |
78501eac | 1195 | unsigned long end; |
a71d8d94 | 1196 | int ret; |
c7dca47b | 1197 | |
a71d8d94 CW |
1198 | ret = intel_ring_wait_request(ring, n); |
1199 | if (ret != -ENOSPC) | |
1200 | return ret; | |
1201 | ||
db53a302 | 1202 | trace_i915_ring_wait_begin(ring); |
e6bfaf85 DV |
1203 | if (drm_core_check_feature(dev, DRIVER_GEM)) |
1204 | /* With GEM the hangcheck timer should kick us out of the loop, | |
1205 | * leaving it early runs the risk of corrupting GEM state (due | |
1206 | * to running on almost untested codepaths). But on resume | |
1207 | * timers don't work yet, so prevent a complete hang in that | |
1208 | * case by choosing an insanely large timeout. */ | |
1209 | end = jiffies + 60 * HZ; | |
1210 | else | |
1211 | end = jiffies + 3 * HZ; | |
1212 | ||
8187a2b7 | 1213 | do { |
c7dca47b CW |
1214 | ring->head = I915_READ_HEAD(ring); |
1215 | ring->space = ring_space(ring); | |
62fdfeaf | 1216 | if (ring->space >= n) { |
db53a302 | 1217 | trace_i915_ring_wait_end(ring); |
62fdfeaf EA |
1218 | return 0; |
1219 | } | |
1220 | ||
1221 | if (dev->primary->master) { | |
1222 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | |
1223 | if (master_priv->sarea_priv) | |
1224 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; | |
1225 | } | |
d1b851fc | 1226 | |
e60a0b10 | 1227 | msleep(1); |
f4e0b29b CW |
1228 | if (atomic_read(&dev_priv->mm.wedged)) |
1229 | return -EAGAIN; | |
8187a2b7 | 1230 | } while (!time_after(jiffies, end)); |
db53a302 | 1231 | trace_i915_ring_wait_end(ring); |
8187a2b7 ZN |
1232 | return -EBUSY; |
1233 | } | |
62fdfeaf | 1234 | |
e1f99ce6 CW |
1235 | int intel_ring_begin(struct intel_ring_buffer *ring, |
1236 | int num_dwords) | |
8187a2b7 | 1237 | { |
21dd3734 | 1238 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
be26a10b | 1239 | int n = 4*num_dwords; |
e1f99ce6 | 1240 | int ret; |
78501eac | 1241 | |
21dd3734 CW |
1242 | if (unlikely(atomic_read(&dev_priv->mm.wedged))) |
1243 | return -EIO; | |
1244 | ||
55249baa | 1245 | if (unlikely(ring->tail + n > ring->effective_size)) { |
e1f99ce6 CW |
1246 | ret = intel_wrap_ring_buffer(ring); |
1247 | if (unlikely(ret)) | |
1248 | return ret; | |
1249 | } | |
78501eac | 1250 | |
e1f99ce6 CW |
1251 | if (unlikely(ring->space < n)) { |
1252 | ret = intel_wait_ring_buffer(ring, n); | |
1253 | if (unlikely(ret)) | |
1254 | return ret; | |
1255 | } | |
d97ed339 CW |
1256 | |
1257 | ring->space -= n; | |
e1f99ce6 | 1258 | return 0; |
8187a2b7 | 1259 | } |
62fdfeaf | 1260 | |
78501eac | 1261 | void intel_ring_advance(struct intel_ring_buffer *ring) |
8187a2b7 | 1262 | { |
d97ed339 | 1263 | ring->tail &= ring->size - 1; |
78501eac | 1264 | ring->write_tail(ring, ring->tail); |
8187a2b7 | 1265 | } |
62fdfeaf | 1266 | |
e070868e | 1267 | static const struct intel_ring_buffer render_ring = { |
8187a2b7 | 1268 | .name = "render ring", |
96154f2f | 1269 | .id = RCS, |
333e9fe9 | 1270 | .mmio_base = RENDER_RING_BASE, |
8187a2b7 | 1271 | .size = 32 * PAGE_SIZE, |
8187a2b7 | 1272 | .init = init_render_ring, |
297b0c5b | 1273 | .write_tail = ring_write_tail, |
8187a2b7 ZN |
1274 | .flush = render_ring_flush, |
1275 | .add_request = render_ring_add_request, | |
1ec14ad3 CW |
1276 | .get_seqno = ring_get_seqno, |
1277 | .irq_get = render_ring_get_irq, | |
1278 | .irq_put = render_ring_put_irq, | |
78501eac | 1279 | .dispatch_execbuffer = render_ring_dispatch_execbuffer, |
0206e353 | 1280 | .cleanup = render_ring_cleanup, |
c8c99b0f BW |
1281 | .sync_to = render_ring_sync_to, |
1282 | .semaphore_register = {MI_SEMAPHORE_SYNC_INVALID, | |
1283 | MI_SEMAPHORE_SYNC_RV, | |
1284 | MI_SEMAPHORE_SYNC_RB}, | |
1285 | .signal_mbox = {GEN6_VRSYNC, GEN6_BRSYNC}, | |
8187a2b7 | 1286 | }; |
d1b851fc ZN |
1287 | |
1288 | /* ring buffer for bit-stream decoder */ | |
1289 | ||
e070868e | 1290 | static const struct intel_ring_buffer bsd_ring = { |
d1b851fc | 1291 | .name = "bsd ring", |
96154f2f | 1292 | .id = VCS, |
333e9fe9 | 1293 | .mmio_base = BSD_RING_BASE, |
d1b851fc | 1294 | .size = 32 * PAGE_SIZE, |
78501eac | 1295 | .init = init_ring_common, |
297b0c5b | 1296 | .write_tail = ring_write_tail, |
d1b851fc | 1297 | .flush = bsd_ring_flush, |
549f7365 | 1298 | .add_request = ring_add_request, |
1ec14ad3 CW |
1299 | .get_seqno = ring_get_seqno, |
1300 | .irq_get = bsd_ring_get_irq, | |
1301 | .irq_put = bsd_ring_put_irq, | |
78501eac | 1302 | .dispatch_execbuffer = ring_dispatch_execbuffer, |
d1b851fc | 1303 | }; |
5c1143bb | 1304 | |
881f47b6 | 1305 | |
78501eac | 1306 | static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, |
297b0c5b | 1307 | u32 value) |
881f47b6 | 1308 | { |
0206e353 | 1309 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
881f47b6 XH |
1310 | |
1311 | /* Every tail move must follow the sequence below */ | |
0206e353 AJ |
1312 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, |
1313 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK | | |
1314 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE); | |
1315 | I915_WRITE(GEN6_BSD_RNCID, 0x0); | |
1316 | ||
1317 | if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & | |
1318 | GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0, | |
1319 | 50)) | |
1320 | DRM_ERROR("timed out waiting for IDLE Indicator\n"); | |
1321 | ||
1322 | I915_WRITE_TAIL(ring, value); | |
1323 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, | |
1324 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK | | |
1325 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); | |
881f47b6 XH |
1326 | } |
1327 | ||
b72f3acb | 1328 | static int gen6_ring_flush(struct intel_ring_buffer *ring, |
71a77e07 | 1329 | u32 invalidate, u32 flush) |
881f47b6 | 1330 | { |
71a77e07 | 1331 | uint32_t cmd; |
b72f3acb CW |
1332 | int ret; |
1333 | ||
b72f3acb CW |
1334 | ret = intel_ring_begin(ring, 4); |
1335 | if (ret) | |
1336 | return ret; | |
1337 | ||
71a77e07 CW |
1338 | cmd = MI_FLUSH_DW; |
1339 | if (invalidate & I915_GEM_GPU_DOMAINS) | |
1340 | cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD; | |
1341 | intel_ring_emit(ring, cmd); | |
b72f3acb CW |
1342 | intel_ring_emit(ring, 0); |
1343 | intel_ring_emit(ring, 0); | |
71a77e07 | 1344 | intel_ring_emit(ring, MI_NOOP); |
b72f3acb CW |
1345 | intel_ring_advance(ring); |
1346 | return 0; | |
881f47b6 XH |
1347 | } |
1348 | ||
1349 | static int | |
78501eac | 1350 | gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, |
c4e7a414 | 1351 | u32 offset, u32 len) |
881f47b6 | 1352 | { |
0206e353 | 1353 | int ret; |
ab6f8e32 | 1354 | |
0206e353 AJ |
1355 | ret = intel_ring_begin(ring, 2); |
1356 | if (ret) | |
1357 | return ret; | |
e1f99ce6 | 1358 | |
0206e353 AJ |
1359 | intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); |
1360 | /* bit0-7 is the length on GEN6+ */ | |
1361 | intel_ring_emit(ring, offset); | |
1362 | intel_ring_advance(ring); | |
ab6f8e32 | 1363 | |
0206e353 | 1364 | return 0; |
881f47b6 XH |
1365 | } |
1366 | ||
1367 | /* ring buffer for Video Codec for Gen6+ */ | |
e070868e | 1368 | static const struct intel_ring_buffer gen6_bsd_ring = { |
1ec14ad3 | 1369 | .name = "gen6 bsd ring", |
96154f2f | 1370 | .id = VCS, |
1ec14ad3 CW |
1371 | .mmio_base = GEN6_BSD_RING_BASE, |
1372 | .size = 32 * PAGE_SIZE, | |
1373 | .init = init_ring_common, | |
1374 | .write_tail = gen6_bsd_ring_write_tail, | |
1375 | .flush = gen6_ring_flush, | |
1376 | .add_request = gen6_add_request, | |
4cd53c0c | 1377 | .get_seqno = gen6_ring_get_seqno, |
6a848ccb | 1378 | .irq_enable_mask = GEN6_BSD_USER_INTERRUPT, |
25c06300 BW |
1379 | .irq_get = gen6_ring_get_irq, |
1380 | .irq_put = gen6_ring_put_irq, | |
1ec14ad3 | 1381 | .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, |
c8c99b0f BW |
1382 | .sync_to = gen6_bsd_ring_sync_to, |
1383 | .semaphore_register = {MI_SEMAPHORE_SYNC_VR, | |
1384 | MI_SEMAPHORE_SYNC_INVALID, | |
1385 | MI_SEMAPHORE_SYNC_VB}, | |
1386 | .signal_mbox = {GEN6_RVSYNC, GEN6_BVSYNC}, | |
549f7365 CW |
1387 | }; |
1388 | ||
1389 | /* Blitter support (SandyBridge+) */ | |
1390 | ||
b72f3acb | 1391 | static int blt_ring_flush(struct intel_ring_buffer *ring, |
71a77e07 | 1392 | u32 invalidate, u32 flush) |
8d19215b | 1393 | { |
71a77e07 | 1394 | uint32_t cmd; |
b72f3acb CW |
1395 | int ret; |
1396 | ||
6a233c78 | 1397 | ret = intel_ring_begin(ring, 4); |
b72f3acb CW |
1398 | if (ret) |
1399 | return ret; | |
1400 | ||
71a77e07 CW |
1401 | cmd = MI_FLUSH_DW; |
1402 | if (invalidate & I915_GEM_DOMAIN_RENDER) | |
1403 | cmd |= MI_INVALIDATE_TLB; | |
1404 | intel_ring_emit(ring, cmd); | |
b72f3acb CW |
1405 | intel_ring_emit(ring, 0); |
1406 | intel_ring_emit(ring, 0); | |
71a77e07 | 1407 | intel_ring_emit(ring, MI_NOOP); |
b72f3acb CW |
1408 | intel_ring_advance(ring); |
1409 | return 0; | |
8d19215b ZN |
1410 | } |
1411 | ||
549f7365 | 1412 | static const struct intel_ring_buffer gen6_blt_ring = { |
0206e353 | 1413 | .name = "blt ring", |
96154f2f | 1414 | .id = BCS, |
0206e353 AJ |
1415 | .mmio_base = BLT_RING_BASE, |
1416 | .size = 32 * PAGE_SIZE, | |
6a233c78 | 1417 | .init = init_ring_common, |
0206e353 AJ |
1418 | .write_tail = ring_write_tail, |
1419 | .flush = blt_ring_flush, | |
1420 | .add_request = gen6_add_request, | |
4cd53c0c | 1421 | .get_seqno = gen6_ring_get_seqno, |
25c06300 BW |
1422 | .irq_get = gen6_ring_get_irq, |
1423 | .irq_put = gen6_ring_put_irq, | |
6a848ccb | 1424 | .irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT, |
0206e353 | 1425 | .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, |
c8c99b0f BW |
1426 | .sync_to = gen6_blt_ring_sync_to, |
1427 | .semaphore_register = {MI_SEMAPHORE_SYNC_BR, | |
1428 | MI_SEMAPHORE_SYNC_BV, | |
1429 | MI_SEMAPHORE_SYNC_INVALID}, | |
1430 | .signal_mbox = {GEN6_RBSYNC, GEN6_VBSYNC}, | |
881f47b6 XH |
1431 | }; |
1432 | ||
5c1143bb XH |
1433 | int intel_init_render_ring_buffer(struct drm_device *dev) |
1434 | { | |
1435 | drm_i915_private_t *dev_priv = dev->dev_private; | |
1ec14ad3 | 1436 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; |
5c1143bb | 1437 | |
1ec14ad3 CW |
1438 | *ring = render_ring; |
1439 | if (INTEL_INFO(dev)->gen >= 6) { | |
1440 | ring->add_request = gen6_add_request; | |
8d315287 | 1441 | ring->flush = gen6_render_ring_flush; |
25c06300 BW |
1442 | ring->irq_get = gen6_ring_get_irq; |
1443 | ring->irq_put = gen6_ring_put_irq; | |
6a848ccb | 1444 | ring->irq_enable_mask = GT_USER_INTERRUPT; |
4cd53c0c | 1445 | ring->get_seqno = gen6_ring_get_seqno; |
c6df541c CW |
1446 | } else if (IS_GEN5(dev)) { |
1447 | ring->add_request = pc_render_add_request; | |
1448 | ring->get_seqno = pc_render_get_seqno; | |
1ec14ad3 | 1449 | } |
5c1143bb XH |
1450 | |
1451 | if (!I915_NEED_GFX_HWS(dev)) { | |
1ec14ad3 CW |
1452 | ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; |
1453 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); | |
5c1143bb XH |
1454 | } |
1455 | ||
1ec14ad3 | 1456 | return intel_init_ring_buffer(dev, ring); |
5c1143bb XH |
1457 | } |
1458 | ||
e8616b6c CW |
1459 | int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) |
1460 | { | |
1461 | drm_i915_private_t *dev_priv = dev->dev_private; | |
1462 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; | |
1463 | ||
1464 | *ring = render_ring; | |
1465 | if (INTEL_INFO(dev)->gen >= 6) { | |
1466 | ring->add_request = gen6_add_request; | |
25c06300 BW |
1467 | ring->irq_get = gen6_ring_get_irq; |
1468 | ring->irq_put = gen6_ring_put_irq; | |
6a848ccb | 1469 | ring->irq_enable_mask = GT_USER_INTERRUPT; |
e8616b6c CW |
1470 | } else if (IS_GEN5(dev)) { |
1471 | ring->add_request = pc_render_add_request; | |
1472 | ring->get_seqno = pc_render_get_seqno; | |
1473 | } | |
1474 | ||
f3234706 KP |
1475 | if (!I915_NEED_GFX_HWS(dev)) |
1476 | ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; | |
1477 | ||
e8616b6c CW |
1478 | ring->dev = dev; |
1479 | INIT_LIST_HEAD(&ring->active_list); | |
1480 | INIT_LIST_HEAD(&ring->request_list); | |
1481 | INIT_LIST_HEAD(&ring->gpu_write_list); | |
1482 | ||
1483 | ring->size = size; | |
1484 | ring->effective_size = ring->size; | |
1485 | if (IS_I830(ring->dev)) | |
1486 | ring->effective_size -= 128; | |
1487 | ||
1488 | ring->map.offset = start; | |
1489 | ring->map.size = size; | |
1490 | ring->map.type = 0; | |
1491 | ring->map.flags = 0; | |
1492 | ring->map.mtrr = 0; | |
1493 | ||
1494 | drm_core_ioremap_wc(&ring->map, dev); | |
1495 | if (ring->map.handle == NULL) { | |
1496 | DRM_ERROR("can not ioremap virtual address for" | |
1497 | " ring buffer\n"); | |
1498 | return -ENOMEM; | |
1499 | } | |
1500 | ||
1501 | ring->virtual_start = (void __force __iomem *)ring->map.handle; | |
1502 | return 0; | |
1503 | } | |
1504 | ||
5c1143bb XH |
1505 | int intel_init_bsd_ring_buffer(struct drm_device *dev) |
1506 | { | |
1507 | drm_i915_private_t *dev_priv = dev->dev_private; | |
1ec14ad3 | 1508 | struct intel_ring_buffer *ring = &dev_priv->ring[VCS]; |
5c1143bb | 1509 | |
65d3eb1e | 1510 | if (IS_GEN6(dev) || IS_GEN7(dev)) |
1ec14ad3 | 1511 | *ring = gen6_bsd_ring; |
881f47b6 | 1512 | else |
1ec14ad3 | 1513 | *ring = bsd_ring; |
5c1143bb | 1514 | |
1ec14ad3 | 1515 | return intel_init_ring_buffer(dev, ring); |
5c1143bb | 1516 | } |
549f7365 CW |
1517 | |
1518 | int intel_init_blt_ring_buffer(struct drm_device *dev) | |
1519 | { | |
1520 | drm_i915_private_t *dev_priv = dev->dev_private; | |
1ec14ad3 | 1521 | struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; |
549f7365 | 1522 | |
1ec14ad3 | 1523 | *ring = gen6_blt_ring; |
549f7365 | 1524 | |
1ec14ad3 | 1525 | return intel_init_ring_buffer(dev, ring); |
549f7365 | 1526 | } |