]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/gpu/drm/vc4/vc4_drv.h
drm/vc4: Add a mode for using the closed firmware for display.
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / vc4 / vc4_drv.h
CommitLineData
c8b75bca
EA
1/*
2 * Copyright (C) 2015 Broadcom
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
cdec4d36 9#include <linux/reservation.h>
b7e8e25b 10#include <drm/drmP.h>
9338203c 11#include <drm/drm_encoder.h>
b7e8e25b 12#include <drm/drm_gem_cma_helper.h>
9338203c 13
c8b75bca
EA
14struct vc4_dev {
15 struct drm_device *dev;
16
60866ee7
EA
17 bool firmware_kms;
18 struct rpi_firmware *firmware;
19
c8b75bca
EA
20 struct vc4_hdmi *hdmi;
21 struct vc4_hvs *hvs;
d3f5168a 22 struct vc4_v3d *v3d;
08302c35 23 struct vc4_dpi *dpi;
4078f575 24 struct vc4_dsi *dsi1;
e4b81f8c 25 struct vc4_vec *vec;
48666d56
DF
26
27 struct drm_fbdev_cma *fbdev;
c826a6e1 28
21461365
EA
29 struct vc4_hang_state *hang_state;
30
c826a6e1
EA
31 /* The kernel-space BO cache. Tracks buffers that have been
32 * unreferenced by all other users (refcounts of 0!) but not
33 * yet freed, so we can do cheap allocations.
34 */
35 struct vc4_bo_cache {
36 /* Array of list heads for entries in the BO cache,
37 * based on number of pages, so we can do O(1) lookups
38 * in the cache when allocating.
39 */
40 struct list_head *size_list;
41 uint32_t size_list_size;
42
43 /* List of all BOs in the cache, ordered by age, so we
44 * can do O(1) lookups when trying to free old
45 * buffers.
46 */
47 struct list_head time_list;
48 struct work_struct time_work;
49 struct timer_list time_timer;
50 } bo_cache;
51
52 struct vc4_bo_stats {
53 u32 num_allocated;
54 u32 size_allocated;
55 u32 num_cached;
56 u32 size_cached;
57 } bo_stats;
58
59 /* Protects bo_cache and the BO stats. */
60 struct mutex bo_lock;
d5b1a78a 61
cdec4d36
EA
62 uint64_t dma_fence_context;
63
ca26d28b 64 /* Sequence number for the last job queued in bin_job_list.
d5b1a78a
EA
65 * Starts at 0 (no jobs emitted).
66 */
67 uint64_t emit_seqno;
68
69 /* Sequence number for the last completed job on the GPU.
70 * Starts at 0 (no jobs completed).
71 */
72 uint64_t finished_seqno;
73
ca26d28b
VG
74 /* List of all struct vc4_exec_info for jobs to be executed in
75 * the binner. The first job in the list is the one currently
76 * programmed into ct0ca for execution.
d5b1a78a 77 */
ca26d28b
VG
78 struct list_head bin_job_list;
79
80 /* List of all struct vc4_exec_info for jobs that have
81 * completed binning and are ready for rendering. The first
82 * job in the list is the one currently programmed into ct1ca
83 * for execution.
84 */
85 struct list_head render_job_list;
86
d5b1a78a
EA
87 /* List of the finished vc4_exec_infos waiting to be freed by
88 * job_done_work.
89 */
90 struct list_head job_done_list;
91 /* Spinlock used to synchronize the job_list and seqno
92 * accesses between the IRQ handler and GEM ioctls.
93 */
94 spinlock_t job_lock;
95 wait_queue_head_t job_wait_queue;
96 struct work_struct job_done_work;
97
b501bacc
EA
98 /* List of struct vc4_seqno_cb for callbacks to be made from a
99 * workqueue when the given seqno is passed.
100 */
101 struct list_head seqno_cb_list;
102
553c942f
EA
103 /* The memory used for storing binner tile alloc, tile state,
104 * and overflow memory allocations. This is freed when V3D
105 * powers down.
d5b1a78a 106 */
553c942f
EA
107 struct vc4_bo *bin_bo;
108
109 /* Size of blocks allocated within bin_bo. */
110 uint32_t bin_alloc_size;
111
112 /* Bitmask of the bin_alloc_size chunks in bin_bo that are
113 * used.
114 */
115 uint32_t bin_alloc_used;
116
117 /* Bitmask of the current bin_alloc used for overflow memory. */
118 uint32_t bin_alloc_overflow;
119
d5b1a78a
EA
120 struct work_struct overflow_mem_work;
121
36cb6253
EA
122 int power_refcount;
123
124 /* Mutex controlling the power refcount. */
125 struct mutex power_lock;
126
d5b1a78a 127 struct {
d5b1a78a
EA
128 struct timer_list timer;
129 struct work_struct reset_work;
130 } hangcheck;
131
132 struct semaphore async_modeset;
c8b75bca
EA
133};
134
135static inline struct vc4_dev *
136to_vc4_dev(struct drm_device *dev)
137{
138 return (struct vc4_dev *)dev->dev_private;
139}
140
141struct vc4_bo {
142 struct drm_gem_cma_object base;
c826a6e1 143
7edabee0 144 /* seqno of the last job to render using this BO. */
d5b1a78a
EA
145 uint64_t seqno;
146
7edabee0
EA
147 /* seqno of the last job to use the RCL to write to this BO.
148 *
149 * Note that this doesn't include binner overflow memory
150 * writes.
151 */
152 uint64_t write_seqno;
153
83753117
EA
154 bool t_format;
155
c826a6e1
EA
156 /* List entry for the BO's position in either
157 * vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list
158 */
159 struct list_head unref_head;
160
161 /* Time in jiffies when the BO was put in vc4->bo_cache. */
162 unsigned long free_time;
163
164 /* List entry for the BO's position in vc4_dev->bo_cache.size_list */
165 struct list_head size_head;
463873d5
EA
166
167 /* Struct for shader validation state, if created by
168 * DRM_IOCTL_VC4_CREATE_SHADER_BO.
169 */
170 struct vc4_validated_shader_info *validated_shader;
cdec4d36
EA
171
172 /* normally (resv == &_resv) except for imported bo's */
173 struct reservation_object *resv;
174 struct reservation_object _resv;
c8b75bca
EA
175};
176
177static inline struct vc4_bo *
178to_vc4_bo(struct drm_gem_object *bo)
179{
180 return (struct vc4_bo *)bo;
181}
182
cdec4d36
EA
183struct vc4_fence {
184 struct dma_fence base;
185 struct drm_device *dev;
186 /* vc4 seqno for signaled() test */
187 uint64_t seqno;
188};
189
190static inline struct vc4_fence *
191to_vc4_fence(struct dma_fence *fence)
192{
193 return (struct vc4_fence *)fence;
194}
195
b501bacc
EA
196struct vc4_seqno_cb {
197 struct work_struct work;
198 uint64_t seqno;
199 void (*func)(struct vc4_seqno_cb *cb);
200};
201
d3f5168a 202struct vc4_v3d {
001bdb55 203 struct vc4_dev *vc4;
d3f5168a
EA
204 struct platform_device *pdev;
205 void __iomem *regs;
b72a2816 206 struct clk *clk;
d3f5168a
EA
207};
208
c8b75bca
EA
209struct vc4_hvs {
210 struct platform_device *pdev;
211 void __iomem *regs;
d8dbf44f
EA
212 u32 __iomem *dlist;
213
214 /* Memory manager for CRTCs to allocate space in the display
215 * list. Units are dwords.
216 */
217 struct drm_mm dlist_mm;
21af94cf
EA
218 /* Memory manager for the LBM memory used by HVS scaling. */
219 struct drm_mm lbm_mm;
d8dbf44f 220 spinlock_t mm_lock;
21af94cf
EA
221
222 struct drm_mm_node mitchell_netravali_filter;
c8b75bca
EA
223};
224
225struct vc4_plane {
226 struct drm_plane base;
227};
228
229static inline struct vc4_plane *
230to_vc4_plane(struct drm_plane *plane)
231{
232 return (struct vc4_plane *)plane;
233}
234
235enum vc4_encoder_type {
ab8df60e 236 VC4_ENCODER_TYPE_NONE,
c8b75bca
EA
237 VC4_ENCODER_TYPE_HDMI,
238 VC4_ENCODER_TYPE_VEC,
239 VC4_ENCODER_TYPE_DSI0,
240 VC4_ENCODER_TYPE_DSI1,
241 VC4_ENCODER_TYPE_SMI,
242 VC4_ENCODER_TYPE_DPI,
243};
244
245struct vc4_encoder {
246 struct drm_encoder base;
247 enum vc4_encoder_type type;
248 u32 clock_select;
249};
250
251static inline struct vc4_encoder *
252to_vc4_encoder(struct drm_encoder *encoder)
253{
254 return container_of(encoder, struct vc4_encoder, base);
255}
256
d3f5168a
EA
257#define V3D_READ(offset) readl(vc4->v3d->regs + offset)
258#define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset)
c8b75bca
EA
259#define HVS_READ(offset) readl(vc4->hvs->regs + offset)
260#define HVS_WRITE(offset, val) writel(val, vc4->hvs->regs + offset)
261
d5b1a78a
EA
262struct vc4_exec_info {
263 /* Sequence number for this bin/render job. */
264 uint64_t seqno;
265
7edabee0
EA
266 /* Latest write_seqno of any BO that binning depends on. */
267 uint64_t bin_dep_seqno;
268
cdec4d36
EA
269 struct dma_fence *fence;
270
c4ce60dc
EA
271 /* Last current addresses the hardware was processing when the
272 * hangcheck timer checked on us.
273 */
274 uint32_t last_ct0ca, last_ct1ca;
275
d5b1a78a
EA
276 /* Kernel-space copy of the ioctl arguments */
277 struct drm_vc4_submit_cl *args;
278
279 /* This is the array of BOs that were looked up at the start of exec.
280 * Command validation will use indices into this array.
281 */
282 struct drm_gem_cma_object **bo;
283 uint32_t bo_count;
284
7edabee0
EA
285 /* List of BOs that are being written by the RCL. Other than
286 * the binner temporary storage, this is all the BOs written
287 * by the job.
288 */
289 struct drm_gem_cma_object *rcl_write_bo[4];
290 uint32_t rcl_write_bo_count;
291
d5b1a78a
EA
292 /* Pointers for our position in vc4->job_list */
293 struct list_head head;
294
295 /* List of other BOs used in the job that need to be released
296 * once the job is complete.
297 */
298 struct list_head unref_list;
299
300 /* Current unvalidated indices into @bo loaded by the non-hardware
301 * VC4_PACKET_GEM_HANDLES.
302 */
303 uint32_t bo_index[2];
304
305 /* This is the BO where we store the validated command lists, shader
306 * records, and uniforms.
307 */
308 struct drm_gem_cma_object *exec_bo;
309
310 /**
311 * This tracks the per-shader-record state (packet 64) that
312 * determines the length of the shader record and the offset
313 * it's expected to be found at. It gets read in from the
314 * command lists.
315 */
316 struct vc4_shader_state {
317 uint32_t addr;
318 /* Maximum vertex index referenced by any primitive using this
319 * shader state.
320 */
321 uint32_t max_index;
322 } *shader_state;
323
324 /** How many shader states the user declared they were using. */
325 uint32_t shader_state_size;
326 /** How many shader state records the validator has seen. */
327 uint32_t shader_state_count;
328
329 bool found_tile_binning_mode_config_packet;
330 bool found_start_tile_binning_packet;
331 bool found_increment_semaphore_packet;
332 bool found_flush;
333 uint8_t bin_tiles_x, bin_tiles_y;
553c942f
EA
334 /* Physical address of the start of the tile alloc array
335 * (where each tile's binned CL will start)
336 */
d5b1a78a 337 uint32_t tile_alloc_offset;
553c942f
EA
338 /* Bitmask of which binner slots are freed when this job completes. */
339 uint32_t bin_slots;
d5b1a78a
EA
340
341 /**
342 * Computed addresses pointing into exec_bo where we start the
343 * bin thread (ct0) and render thread (ct1).
344 */
345 uint32_t ct0ca, ct0ea;
346 uint32_t ct1ca, ct1ea;
347
348 /* Pointer to the unvalidated bin CL (if present). */
349 void *bin_u;
350
351 /* Pointers to the shader recs. These paddr gets incremented as CL
352 * packets are relocated in validate_gl_shader_state, and the vaddrs
353 * (u and v) get incremented and size decremented as the shader recs
354 * themselves are validated.
355 */
356 void *shader_rec_u;
357 void *shader_rec_v;
358 uint32_t shader_rec_p;
359 uint32_t shader_rec_size;
360
361 /* Pointers to the uniform data. These pointers are incremented, and
362 * size decremented, as each batch of uniforms is uploaded.
363 */
364 void *uniforms_u;
365 void *uniforms_v;
366 uint32_t uniforms_p;
367 uint32_t uniforms_size;
368};
369
370static inline struct vc4_exec_info *
ca26d28b
VG
371vc4_first_bin_job(struct vc4_dev *vc4)
372{
57b9f569
MY
373 return list_first_entry_or_null(&vc4->bin_job_list,
374 struct vc4_exec_info, head);
ca26d28b
VG
375}
376
377static inline struct vc4_exec_info *
378vc4_first_render_job(struct vc4_dev *vc4)
d5b1a78a 379{
57b9f569
MY
380 return list_first_entry_or_null(&vc4->render_job_list,
381 struct vc4_exec_info, head);
9326e6f2
EA
382}
383
384static inline struct vc4_exec_info *
385vc4_last_render_job(struct vc4_dev *vc4)
386{
387 if (list_empty(&vc4->render_job_list))
388 return NULL;
389 return list_last_entry(&vc4->render_job_list,
390 struct vc4_exec_info, head);
d5b1a78a
EA
391}
392
463873d5
EA
393/**
394 * struct vc4_texture_sample_info - saves the offsets into the UBO for texture
395 * setup parameters.
396 *
397 * This will be used at draw time to relocate the reference to the texture
398 * contents in p0, and validate that the offset combined with
399 * width/height/stride/etc. from p1 and p2/p3 doesn't sample outside the BO.
400 * Note that the hardware treats unprovided config parameters as 0, so not all
401 * of them need to be set up for every texure sample, and we'll store ~0 as
402 * the offset to mark the unused ones.
403 *
404 * See the VC4 3D architecture guide page 41 ("Texture and Memory Lookup Unit
405 * Setup") for definitions of the texture parameters.
406 */
407struct vc4_texture_sample_info {
408 bool is_direct;
409 uint32_t p_offset[4];
410};
411
412/**
413 * struct vc4_validated_shader_info - information about validated shaders that
414 * needs to be used from command list validation.
415 *
416 * For a given shader, each time a shader state record references it, we need
417 * to verify that the shader doesn't read more uniforms than the shader state
418 * record's uniform BO pointer can provide, and we need to apply relocations
419 * and validate the shader state record's uniforms that define the texture
420 * samples.
421 */
422struct vc4_validated_shader_info {
423 uint32_t uniforms_size;
424 uint32_t uniforms_src_size;
425 uint32_t num_texture_samples;
426 struct vc4_texture_sample_info *texture_samples;
6d45c81d
EA
427
428 uint32_t num_uniform_addr_offsets;
429 uint32_t *uniform_addr_offsets;
c778cc5d
JP
430
431 bool is_threaded;
463873d5
EA
432};
433
c8b75bca
EA
434/**
435 * _wait_for - magic (register) wait macro
436 *
437 * Does the right thing for modeset paths when run under kdgb or similar atomic
438 * contexts. Note that it's important that we check the condition again after
439 * having timed out, since the timeout could be due to preemption or similar and
440 * we've never had a chance to check the condition before the timeout.
441 */
442#define _wait_for(COND, MS, W) ({ \
443 unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
444 int ret__ = 0; \
445 while (!(COND)) { \
446 if (time_after(jiffies, timeout__)) { \
447 if (!(COND)) \
448 ret__ = -ETIMEDOUT; \
449 break; \
450 } \
451 if (W && drm_can_sleep()) { \
452 msleep(W); \
453 } else { \
454 cpu_relax(); \
455 } \
456 } \
457 ret__; \
458})
459
460#define wait_for(COND, MS) _wait_for(COND, MS, 1)
461
462/* vc4_bo.c */
c826a6e1 463struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size);
c8b75bca 464void vc4_free_object(struct drm_gem_object *gem_obj);
c826a6e1
EA
465struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size,
466 bool from_cache);
c8b75bca
EA
467int vc4_dumb_create(struct drm_file *file_priv,
468 struct drm_device *dev,
469 struct drm_mode_create_dumb *args);
470struct dma_buf *vc4_prime_export(struct drm_device *dev,
471 struct drm_gem_object *obj, int flags);
d5bc60f6
EA
472int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
473 struct drm_file *file_priv);
463873d5
EA
474int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
475 struct drm_file *file_priv);
d5bc60f6
EA
476int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
477 struct drm_file *file_priv);
83753117
EA
478int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
479 struct drm_file *file_priv);
480int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
481 struct drm_file *file_priv);
21461365
EA
482int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
483 struct drm_file *file_priv);
463873d5 484int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
cdec4d36 485struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj);
463873d5 486int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
cdec4d36
EA
487struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev,
488 struct dma_buf_attachment *attach,
489 struct sg_table *sgt);
463873d5 490void *vc4_prime_vmap(struct drm_gem_object *obj);
c826a6e1
EA
491void vc4_bo_cache_init(struct drm_device *dev);
492void vc4_bo_cache_destroy(struct drm_device *dev);
493int vc4_bo_stats_debugfs(struct seq_file *m, void *arg);
c8b75bca
EA
494
495/* vc4_crtc.c */
496extern struct platform_driver vc4_crtc_driver;
26fc78f6 497bool vc4_event_pending(struct drm_crtc *crtc);
c8b75bca 498int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg);
1bf6ad62
DV
499bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
500 bool in_vblank_irq, int *vpos, int *hpos,
501 ktime_t *stime, ktime_t *etime,
502 const struct drm_display_mode *mode);
c8b75bca
EA
503
504/* vc4_debugfs.c */
505int vc4_debugfs_init(struct drm_minor *minor);
c8b75bca
EA
506
507/* vc4_drv.c */
508void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index);
509
08302c35
EA
510/* vc4_dpi.c */
511extern struct platform_driver vc4_dpi_driver;
512int vc4_dpi_debugfs_regs(struct seq_file *m, void *unused);
513
4078f575
EA
514/* vc4_dsi.c */
515extern struct platform_driver vc4_dsi_driver;
516int vc4_dsi_debugfs_regs(struct seq_file *m, void *unused);
517
cdec4d36
EA
518/* vc4_fence.c */
519extern const struct dma_fence_ops vc4_fence_ops;
520
60866ee7
EA
521/* vc4_firmware_kms.c */
522extern struct platform_driver vc4_firmware_kms_driver;
523void vc4_fkms_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file);
524
d5b1a78a
EA
525/* vc4_gem.c */
526void vc4_gem_init(struct drm_device *dev);
527void vc4_gem_destroy(struct drm_device *dev);
528int vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
529 struct drm_file *file_priv);
530int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
531 struct drm_file *file_priv);
532int vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
533 struct drm_file *file_priv);
ca26d28b
VG
534void vc4_submit_next_bin_job(struct drm_device *dev);
535void vc4_submit_next_render_job(struct drm_device *dev);
536void vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec);
d5b1a78a
EA
537int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno,
538 uint64_t timeout_ns, bool interruptible);
539void vc4_job_handle_completed(struct vc4_dev *vc4);
b501bacc
EA
540int vc4_queue_seqno_cb(struct drm_device *dev,
541 struct vc4_seqno_cb *cb, uint64_t seqno,
542 void (*func)(struct vc4_seqno_cb *cb));
d5b1a78a 543
c8b75bca
EA
544/* vc4_hdmi.c */
545extern struct platform_driver vc4_hdmi_driver;
546int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused);
547
9a8d5e4a 548/* vc4_vec.c */
e4b81f8c
BB
549extern struct platform_driver vc4_vec_driver;
550int vc4_vec_debugfs_regs(struct seq_file *m, void *unused);
551
d5b1a78a
EA
552/* vc4_irq.c */
553irqreturn_t vc4_irq(int irq, void *arg);
554void vc4_irq_preinstall(struct drm_device *dev);
555int vc4_irq_postinstall(struct drm_device *dev);
556void vc4_irq_uninstall(struct drm_device *dev);
557void vc4_irq_reset(struct drm_device *dev);
558
c8b75bca
EA
559/* vc4_hvs.c */
560extern struct platform_driver vc4_hvs_driver;
561void vc4_hvs_dump_state(struct drm_device *dev);
562int vc4_hvs_debugfs_regs(struct seq_file *m, void *unused);
563
564/* vc4_kms.c */
565int vc4_kms_load(struct drm_device *dev);
566
567/* vc4_plane.c */
568struct drm_plane *vc4_plane_init(struct drm_device *dev,
569 enum drm_plane_type type);
570u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist);
2f196b7c 571u32 vc4_plane_dlist_size(const struct drm_plane_state *state);
b501bacc
EA
572void vc4_plane_async_set_fb(struct drm_plane *plane,
573 struct drm_framebuffer *fb);
463873d5 574
d3f5168a
EA
575/* vc4_v3d.c */
576extern struct platform_driver vc4_v3d_driver;
577int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused);
578int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused);
553c942f 579int vc4_v3d_get_bin_slot(struct vc4_dev *vc4);
d5b1a78a
EA
580
581/* vc4_validate.c */
582int
583vc4_validate_bin_cl(struct drm_device *dev,
584 void *validated,
585 void *unvalidated,
586 struct vc4_exec_info *exec);
587
588int
589vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec);
590
591struct drm_gem_cma_object *vc4_use_bo(struct vc4_exec_info *exec,
592 uint32_t hindex);
593
594int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec);
595
596bool vc4_check_tex_size(struct vc4_exec_info *exec,
597 struct drm_gem_cma_object *fbo,
598 uint32_t offset, uint8_t tiling_format,
599 uint32_t width, uint32_t height, uint8_t cpp);
d3f5168a 600
463873d5
EA
601/* vc4_validate_shader.c */
602struct vc4_validated_shader_info *
603vc4_validate_shader(struct drm_gem_cma_object *shader_obj);