1 #ifndef _INTEL_RINGBUFFER_H_
2 #define _INTEL_RINGBUFFER_H_
4 struct intel_hw_status_page
{
7 struct drm_gem_object
*obj
;
10 #define I915_READ_TAIL(ring) I915_READ(RING_TAIL(ring->mmio_base))
11 #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val)
12 #define I915_READ_START(ring) I915_READ(RING_START(ring->mmio_base))
13 #define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val)
14 #define I915_READ_HEAD(ring) I915_READ(RING_HEAD(ring->mmio_base))
15 #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val)
16 #define I915_READ_CTL(ring) I915_READ(RING_CTL(ring->mmio_base))
17 #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val)
19 struct drm_i915_gem_execbuffer2
;
20 struct intel_ring_buffer
{
30 struct drm_device
*dev
;
31 struct drm_gem_object
*gem_object
;
36 struct intel_hw_status_page status_page
;
38 u32 irq_gem_seqno
; /* last seq seem at irq time */
39 u32 waiting_gem_seqno
;
40 int user_irq_refcount
;
41 void (*user_irq_get
)(struct drm_device
*dev
,
42 struct intel_ring_buffer
*ring
);
43 void (*user_irq_put
)(struct drm_device
*dev
,
44 struct intel_ring_buffer
*ring
);
46 int (*init
)(struct drm_device
*dev
,
47 struct intel_ring_buffer
*ring
);
49 void (*write_tail
)(struct drm_device
*dev
,
50 struct intel_ring_buffer
*ring
,
52 void (*flush
)(struct drm_device
*dev
,
53 struct intel_ring_buffer
*ring
,
54 u32 invalidate_domains
,
56 u32 (*add_request
)(struct drm_device
*dev
,
57 struct intel_ring_buffer
*ring
,
59 u32 (*get_seqno
)(struct drm_device
*dev
,
60 struct intel_ring_buffer
*ring
);
61 int (*dispatch_gem_execbuffer
)(struct drm_device
*dev
,
62 struct intel_ring_buffer
*ring
,
63 struct drm_i915_gem_execbuffer2
*exec
,
64 struct drm_clip_rect
*cliprects
,
65 uint64_t exec_offset
);
68 * List of objects currently involved in rendering from the
71 * Includes buffers having the contents of their GPU caches
72 * flushed, not necessarily primitives. last_rendering_seqno
73 * represents when the rendering involved will be completed.
75 * A reference is held on the buffer while on this list.
77 struct list_head active_list
;
80 * List of breadcrumbs associated with GPU requests currently
83 struct list_head request_list
;
86 * List of objects currently pending a GPU write flush.
88 * All elements on this list will belong to either the
89 * active_list or flushing_list, last_rendering_seqno can
90 * be used to differentiate between the two elements.
92 struct list_head gpu_write_list
;
95 * Do we have some not yet emitted requests outstanding?
97 bool outstanding_lazy_request
;
99 wait_queue_head_t irq_queue
;
104 intel_read_status_page(struct intel_ring_buffer
*ring
,
107 u32
*regs
= ring
->status_page
.page_addr
;
111 int intel_init_ring_buffer(struct drm_device
*dev
,
112 struct intel_ring_buffer
*ring
);
113 void intel_cleanup_ring_buffer(struct drm_device
*dev
,
114 struct intel_ring_buffer
*ring
);
115 int intel_wait_ring_buffer(struct drm_device
*dev
,
116 struct intel_ring_buffer
*ring
, int n
);
117 void intel_ring_begin(struct drm_device
*dev
,
118 struct intel_ring_buffer
*ring
, int n
);
120 static inline void intel_ring_emit(struct drm_device
*dev
,
121 struct intel_ring_buffer
*ring
,
124 unsigned int *virt
= ring
->virtual_start
+ ring
->tail
;
129 void intel_ring_advance(struct drm_device
*dev
,
130 struct intel_ring_buffer
*ring
);
132 u32
intel_ring_get_seqno(struct drm_device
*dev
,
133 struct intel_ring_buffer
*ring
);
135 int intel_init_render_ring_buffer(struct drm_device
*dev
);
136 int intel_init_bsd_ring_buffer(struct drm_device
*dev
);
137 int intel_init_blt_ring_buffer(struct drm_device
*dev
);
139 u32
intel_ring_get_active_head(struct drm_device
*dev
,
140 struct intel_ring_buffer
*ring
);
141 void intel_ring_setup_status_page(struct drm_device
*dev
,
142 struct intel_ring_buffer
*ring
);
144 #endif /* _INTEL_RINGBUFFER_H_ */