]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - drivers/gpu/drm/vc4/vc4_drv.h
Merge tag 'for-linus-20190715' of git://git.kernel.dk/linux-block
[mirror_ubuntu-kernels.git] / drivers / gpu / drm / vc4 / vc4_drv.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2015 Broadcom
4 */
5
6 #include <linux/mm_types.h>
7 #include <drm/drmP.h>
8 #include <drm/drm_util.h>
9 #include <drm/drm_encoder.h>
10 #include <drm/drm_gem_cma_helper.h>
11 #include <drm/drm_atomic.h>
12 #include <drm/drm_syncobj.h>
13
14 #include "uapi/drm/vc4_drm.h"
15
16 /* Don't forget to update vc4_bo.c: bo_type_names[] when adding to
17 * this.
18 */
19 enum vc4_kernel_bo_type {
20 /* Any kernel allocation (gem_create_object hook) before it
21 * gets another type set.
22 */
23 VC4_BO_TYPE_KERNEL,
24 VC4_BO_TYPE_V3D,
25 VC4_BO_TYPE_V3D_SHADER,
26 VC4_BO_TYPE_DUMB,
27 VC4_BO_TYPE_BIN,
28 VC4_BO_TYPE_RCL,
29 VC4_BO_TYPE_BCL,
30 VC4_BO_TYPE_KERNEL_CACHE,
31 VC4_BO_TYPE_COUNT
32 };
33
34 /* Performance monitor object. The perform lifetime is controlled by userspace
35 * using perfmon related ioctls. A perfmon can be attached to a submit_cl
36 * request, and when this is the case, HW perf counters will be activated just
37 * before the submit_cl is submitted to the GPU and disabled when the job is
38 * done. This way, only events related to a specific job will be counted.
39 */
40 struct vc4_perfmon {
41 /* Tracks the number of users of the perfmon, when this counter reaches
42 * zero the perfmon is destroyed.
43 */
44 refcount_t refcnt;
45
46 /* Number of counters activated in this perfmon instance
47 * (should be less than DRM_VC4_MAX_PERF_COUNTERS).
48 */
49 u8 ncounters;
50
51 /* Events counted by the HW perf counters. */
52 u8 events[DRM_VC4_MAX_PERF_COUNTERS];
53
54 /* Storage for counter values. Counters are incremented by the HW
55 * perf counter values every time the perfmon is attached to a GPU job.
56 * This way, perfmon users don't have to retrieve the results after
57 * each job if they want to track events covering several submissions.
58 * Note that counter values can't be reset, but you can fake a reset by
59 * destroying the perfmon and creating a new one.
60 */
61 u64 counters[0];
62 };
63
64 struct vc4_dev {
65 struct drm_device *dev;
66
67 struct vc4_hdmi *hdmi;
68 struct vc4_hvs *hvs;
69 struct vc4_v3d *v3d;
70 struct vc4_dpi *dpi;
71 struct vc4_dsi *dsi1;
72 struct vc4_vec *vec;
73 struct vc4_txp *txp;
74
75 struct vc4_hang_state *hang_state;
76
77 /* The kernel-space BO cache. Tracks buffers that have been
78 * unreferenced by all other users (refcounts of 0!) but not
79 * yet freed, so we can do cheap allocations.
80 */
81 struct vc4_bo_cache {
82 /* Array of list heads for entries in the BO cache,
83 * based on number of pages, so we can do O(1) lookups
84 * in the cache when allocating.
85 */
86 struct list_head *size_list;
87 uint32_t size_list_size;
88
89 /* List of all BOs in the cache, ordered by age, so we
90 * can do O(1) lookups when trying to free old
91 * buffers.
92 */
93 struct list_head time_list;
94 struct work_struct time_work;
95 struct timer_list time_timer;
96 } bo_cache;
97
98 u32 num_labels;
99 struct vc4_label {
100 const char *name;
101 u32 num_allocated;
102 u32 size_allocated;
103 } *bo_labels;
104
105 /* Protects bo_cache and bo_labels. */
106 struct mutex bo_lock;
107
108 /* Purgeable BO pool. All BOs in this pool can have their memory
109 * reclaimed if the driver is unable to allocate new BOs. We also
110 * keep stats related to the purge mechanism here.
111 */
112 struct {
113 struct list_head list;
114 unsigned int num;
115 size_t size;
116 unsigned int purged_num;
117 size_t purged_size;
118 struct mutex lock;
119 } purgeable;
120
121 uint64_t dma_fence_context;
122
123 /* Sequence number for the last job queued in bin_job_list.
124 * Starts at 0 (no jobs emitted).
125 */
126 uint64_t emit_seqno;
127
128 /* Sequence number for the last completed job on the GPU.
129 * Starts at 0 (no jobs completed).
130 */
131 uint64_t finished_seqno;
132
133 /* List of all struct vc4_exec_info for jobs to be executed in
134 * the binner. The first job in the list is the one currently
135 * programmed into ct0ca for execution.
136 */
137 struct list_head bin_job_list;
138
139 /* List of all struct vc4_exec_info for jobs that have
140 * completed binning and are ready for rendering. The first
141 * job in the list is the one currently programmed into ct1ca
142 * for execution.
143 */
144 struct list_head render_job_list;
145
146 /* List of the finished vc4_exec_infos waiting to be freed by
147 * job_done_work.
148 */
149 struct list_head job_done_list;
150 /* Spinlock used to synchronize the job_list and seqno
151 * accesses between the IRQ handler and GEM ioctls.
152 */
153 spinlock_t job_lock;
154 wait_queue_head_t job_wait_queue;
155 struct work_struct job_done_work;
156
157 /* Used to track the active perfmon if any. Access to this field is
158 * protected by job_lock.
159 */
160 struct vc4_perfmon *active_perfmon;
161
162 /* List of struct vc4_seqno_cb for callbacks to be made from a
163 * workqueue when the given seqno is passed.
164 */
165 struct list_head seqno_cb_list;
166
167 /* The memory used for storing binner tile alloc, tile state,
168 * and overflow memory allocations. This is freed when V3D
169 * powers down.
170 */
171 struct vc4_bo *bin_bo;
172
173 /* Size of blocks allocated within bin_bo. */
174 uint32_t bin_alloc_size;
175
176 /* Bitmask of the bin_alloc_size chunks in bin_bo that are
177 * used.
178 */
179 uint32_t bin_alloc_used;
180
181 /* Bitmask of the current bin_alloc used for overflow memory. */
182 uint32_t bin_alloc_overflow;
183
184 /* Incremented when an underrun error happened after an atomic commit.
185 * This is particularly useful to detect when a specific modeset is too
186 * demanding in term of memory or HVS bandwidth which is hard to guess
187 * at atomic check time.
188 */
189 atomic_t underrun;
190
191 struct work_struct overflow_mem_work;
192
193 int power_refcount;
194
195 /* Set to true when the load tracker is active. */
196 bool load_tracker_enabled;
197
198 /* Mutex controlling the power refcount. */
199 struct mutex power_lock;
200
201 struct {
202 struct timer_list timer;
203 struct work_struct reset_work;
204 } hangcheck;
205
206 struct semaphore async_modeset;
207
208 struct drm_modeset_lock ctm_state_lock;
209 struct drm_private_obj ctm_manager;
210 struct drm_private_obj load_tracker;
211
212 /* List of vc4_debugfs_info_entry for adding to debugfs once
213 * the minor is available (after drm_dev_register()).
214 */
215 struct list_head debugfs_list;
216
217 /* Mutex for binner bo allocation. */
218 struct mutex bin_bo_lock;
219 /* Reference count for our binner bo. */
220 struct kref bin_bo_kref;
221 };
222
223 static inline struct vc4_dev *
224 to_vc4_dev(struct drm_device *dev)
225 {
226 return (struct vc4_dev *)dev->dev_private;
227 }
228
229 struct vc4_bo {
230 struct drm_gem_cma_object base;
231
232 /* seqno of the last job to render using this BO. */
233 uint64_t seqno;
234
235 /* seqno of the last job to use the RCL to write to this BO.
236 *
237 * Note that this doesn't include binner overflow memory
238 * writes.
239 */
240 uint64_t write_seqno;
241
242 bool t_format;
243
244 /* List entry for the BO's position in either
245 * vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list
246 */
247 struct list_head unref_head;
248
249 /* Time in jiffies when the BO was put in vc4->bo_cache. */
250 unsigned long free_time;
251
252 /* List entry for the BO's position in vc4_dev->bo_cache.size_list */
253 struct list_head size_head;
254
255 /* Struct for shader validation state, if created by
256 * DRM_IOCTL_VC4_CREATE_SHADER_BO.
257 */
258 struct vc4_validated_shader_info *validated_shader;
259
260 /* One of enum vc4_kernel_bo_type, or VC4_BO_TYPE_COUNT + i
261 * for user-allocated labels.
262 */
263 int label;
264
265 /* Count the number of active users. This is needed to determine
266 * whether we can move the BO to the purgeable list or not (when the BO
267 * is used by the GPU or the display engine we can't purge it).
268 */
269 refcount_t usecnt;
270
271 /* Store purgeable/purged state here */
272 u32 madv;
273 struct mutex madv_lock;
274 };
275
276 static inline struct vc4_bo *
277 to_vc4_bo(struct drm_gem_object *bo)
278 {
279 return (struct vc4_bo *)bo;
280 }
281
282 struct vc4_fence {
283 struct dma_fence base;
284 struct drm_device *dev;
285 /* vc4 seqno for signaled() test */
286 uint64_t seqno;
287 };
288
289 static inline struct vc4_fence *
290 to_vc4_fence(struct dma_fence *fence)
291 {
292 return (struct vc4_fence *)fence;
293 }
294
295 struct vc4_seqno_cb {
296 struct work_struct work;
297 uint64_t seqno;
298 void (*func)(struct vc4_seqno_cb *cb);
299 };
300
301 struct vc4_v3d {
302 struct vc4_dev *vc4;
303 struct platform_device *pdev;
304 void __iomem *regs;
305 struct clk *clk;
306 struct debugfs_regset32 regset;
307 };
308
309 struct vc4_hvs {
310 struct platform_device *pdev;
311 void __iomem *regs;
312 u32 __iomem *dlist;
313
314 /* Memory manager for CRTCs to allocate space in the display
315 * list. Units are dwords.
316 */
317 struct drm_mm dlist_mm;
318 /* Memory manager for the LBM memory used by HVS scaling. */
319 struct drm_mm lbm_mm;
320 spinlock_t mm_lock;
321
322 struct drm_mm_node mitchell_netravali_filter;
323 struct debugfs_regset32 regset;
324 };
325
326 struct vc4_plane {
327 struct drm_plane base;
328 };
329
330 static inline struct vc4_plane *
331 to_vc4_plane(struct drm_plane *plane)
332 {
333 return (struct vc4_plane *)plane;
334 }
335
336 enum vc4_scaling_mode {
337 VC4_SCALING_NONE,
338 VC4_SCALING_TPZ,
339 VC4_SCALING_PPF,
340 };
341
342 struct vc4_plane_state {
343 struct drm_plane_state base;
344 /* System memory copy of the display list for this element, computed
345 * at atomic_check time.
346 */
347 u32 *dlist;
348 u32 dlist_size; /* Number of dwords allocated for the display list */
349 u32 dlist_count; /* Number of used dwords in the display list. */
350
351 /* Offset in the dlist to various words, for pageflip or
352 * cursor updates.
353 */
354 u32 pos0_offset;
355 u32 pos2_offset;
356 u32 ptr0_offset;
357 u32 lbm_offset;
358
359 /* Offset where the plane's dlist was last stored in the
360 * hardware at vc4_crtc_atomic_flush() time.
361 */
362 u32 __iomem *hw_dlist;
363
364 /* Clipped coordinates of the plane on the display. */
365 int crtc_x, crtc_y, crtc_w, crtc_h;
366 /* Clipped area being scanned from in the FB. */
367 u32 src_x, src_y;
368
369 u32 src_w[2], src_h[2];
370
371 /* Scaling selection for the RGB/Y plane and the Cb/Cr planes. */
372 enum vc4_scaling_mode x_scaling[2], y_scaling[2];
373 bool is_unity;
374 bool is_yuv;
375
376 /* Offset to start scanning out from the start of the plane's
377 * BO.
378 */
379 u32 offsets[3];
380
381 /* Our allocation in LBM for temporary storage during scaling. */
382 struct drm_mm_node lbm;
383
384 /* Set when the plane has per-pixel alpha content or does not cover
385 * the entire screen. This is a hint to the CRTC that it might need
386 * to enable background color fill.
387 */
388 bool needs_bg_fill;
389
390 /* Mark the dlist as initialized. Useful to avoid initializing it twice
391 * when async update is not possible.
392 */
393 bool dlist_initialized;
394
395 /* Load of this plane on the HVS block. The load is expressed in HVS
396 * cycles/sec.
397 */
398 u64 hvs_load;
399
400 /* Memory bandwidth needed for this plane. This is expressed in
401 * bytes/sec.
402 */
403 u64 membus_load;
404 };
405
406 static inline struct vc4_plane_state *
407 to_vc4_plane_state(struct drm_plane_state *state)
408 {
409 return (struct vc4_plane_state *)state;
410 }
411
412 enum vc4_encoder_type {
413 VC4_ENCODER_TYPE_NONE,
414 VC4_ENCODER_TYPE_HDMI,
415 VC4_ENCODER_TYPE_VEC,
416 VC4_ENCODER_TYPE_DSI0,
417 VC4_ENCODER_TYPE_DSI1,
418 VC4_ENCODER_TYPE_SMI,
419 VC4_ENCODER_TYPE_DPI,
420 };
421
422 struct vc4_encoder {
423 struct drm_encoder base;
424 enum vc4_encoder_type type;
425 u32 clock_select;
426 };
427
428 static inline struct vc4_encoder *
429 to_vc4_encoder(struct drm_encoder *encoder)
430 {
431 return container_of(encoder, struct vc4_encoder, base);
432 }
433
434 struct vc4_crtc_data {
435 /* Which channel of the HVS this pixelvalve sources from. */
436 int hvs_channel;
437
438 enum vc4_encoder_type encoder_types[4];
439 const char *debugfs_name;
440 };
441
442 struct vc4_crtc {
443 struct drm_crtc base;
444 struct platform_device *pdev;
445 const struct vc4_crtc_data *data;
446 void __iomem *regs;
447
448 /* Timestamp at start of vblank irq - unaffected by lock delays. */
449 ktime_t t_vblank;
450
451 /* Which HVS channel we're using for our CRTC. */
452 int channel;
453
454 u8 lut_r[256];
455 u8 lut_g[256];
456 u8 lut_b[256];
457 /* Size in pixels of the COB memory allocated to this CRTC. */
458 u32 cob_size;
459
460 struct drm_pending_vblank_event *event;
461
462 struct debugfs_regset32 regset;
463 };
464
465 static inline struct vc4_crtc *
466 to_vc4_crtc(struct drm_crtc *crtc)
467 {
468 return (struct vc4_crtc *)crtc;
469 }
470
471 #define V3D_READ(offset) readl(vc4->v3d->regs + offset)
472 #define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset)
473 #define HVS_READ(offset) readl(vc4->hvs->regs + offset)
474 #define HVS_WRITE(offset, val) writel(val, vc4->hvs->regs + offset)
475
476 #define VC4_REG32(reg) { .name = #reg, .offset = reg }
477
478 struct vc4_exec_info {
479 /* Sequence number for this bin/render job. */
480 uint64_t seqno;
481
482 /* Latest write_seqno of any BO that binning depends on. */
483 uint64_t bin_dep_seqno;
484
485 struct dma_fence *fence;
486
487 /* Last current addresses the hardware was processing when the
488 * hangcheck timer checked on us.
489 */
490 uint32_t last_ct0ca, last_ct1ca;
491
492 /* Kernel-space copy of the ioctl arguments */
493 struct drm_vc4_submit_cl *args;
494
495 /* This is the array of BOs that were looked up at the start of exec.
496 * Command validation will use indices into this array.
497 */
498 struct drm_gem_cma_object **bo;
499 uint32_t bo_count;
500
501 /* List of BOs that are being written by the RCL. Other than
502 * the binner temporary storage, this is all the BOs written
503 * by the job.
504 */
505 struct drm_gem_cma_object *rcl_write_bo[4];
506 uint32_t rcl_write_bo_count;
507
508 /* Pointers for our position in vc4->job_list */
509 struct list_head head;
510
511 /* List of other BOs used in the job that need to be released
512 * once the job is complete.
513 */
514 struct list_head unref_list;
515
516 /* Current unvalidated indices into @bo loaded by the non-hardware
517 * VC4_PACKET_GEM_HANDLES.
518 */
519 uint32_t bo_index[2];
520
521 /* This is the BO where we store the validated command lists, shader
522 * records, and uniforms.
523 */
524 struct drm_gem_cma_object *exec_bo;
525
526 /**
527 * This tracks the per-shader-record state (packet 64) that
528 * determines the length of the shader record and the offset
529 * it's expected to be found at. It gets read in from the
530 * command lists.
531 */
532 struct vc4_shader_state {
533 uint32_t addr;
534 /* Maximum vertex index referenced by any primitive using this
535 * shader state.
536 */
537 uint32_t max_index;
538 } *shader_state;
539
540 /** How many shader states the user declared they were using. */
541 uint32_t shader_state_size;
542 /** How many shader state records the validator has seen. */
543 uint32_t shader_state_count;
544
545 bool found_tile_binning_mode_config_packet;
546 bool found_start_tile_binning_packet;
547 bool found_increment_semaphore_packet;
548 bool found_flush;
549 uint8_t bin_tiles_x, bin_tiles_y;
550 /* Physical address of the start of the tile alloc array
551 * (where each tile's binned CL will start)
552 */
553 uint32_t tile_alloc_offset;
554 /* Bitmask of which binner slots are freed when this job completes. */
555 uint32_t bin_slots;
556
557 /**
558 * Computed addresses pointing into exec_bo where we start the
559 * bin thread (ct0) and render thread (ct1).
560 */
561 uint32_t ct0ca, ct0ea;
562 uint32_t ct1ca, ct1ea;
563
564 /* Pointer to the unvalidated bin CL (if present). */
565 void *bin_u;
566
567 /* Pointers to the shader recs. These paddr gets incremented as CL
568 * packets are relocated in validate_gl_shader_state, and the vaddrs
569 * (u and v) get incremented and size decremented as the shader recs
570 * themselves are validated.
571 */
572 void *shader_rec_u;
573 void *shader_rec_v;
574 uint32_t shader_rec_p;
575 uint32_t shader_rec_size;
576
577 /* Pointers to the uniform data. These pointers are incremented, and
578 * size decremented, as each batch of uniforms is uploaded.
579 */
580 void *uniforms_u;
581 void *uniforms_v;
582 uint32_t uniforms_p;
583 uint32_t uniforms_size;
584
585 /* Pointer to a performance monitor object if the user requested it,
586 * NULL otherwise.
587 */
588 struct vc4_perfmon *perfmon;
589
590 /* Whether the exec has taken a reference to the binner BO, which should
591 * happen with a VC4_PACKET_TILE_BINNING_MODE_CONFIG packet.
592 */
593 bool bin_bo_used;
594 };
595
596 /* Per-open file private data. Any driver-specific resource that has to be
597 * released when the DRM file is closed should be placed here.
598 */
599 struct vc4_file {
600 struct {
601 struct idr idr;
602 struct mutex lock;
603 } perfmon;
604
605 bool bin_bo_used;
606 };
607
608 static inline struct vc4_exec_info *
609 vc4_first_bin_job(struct vc4_dev *vc4)
610 {
611 return list_first_entry_or_null(&vc4->bin_job_list,
612 struct vc4_exec_info, head);
613 }
614
615 static inline struct vc4_exec_info *
616 vc4_first_render_job(struct vc4_dev *vc4)
617 {
618 return list_first_entry_or_null(&vc4->render_job_list,
619 struct vc4_exec_info, head);
620 }
621
622 static inline struct vc4_exec_info *
623 vc4_last_render_job(struct vc4_dev *vc4)
624 {
625 if (list_empty(&vc4->render_job_list))
626 return NULL;
627 return list_last_entry(&vc4->render_job_list,
628 struct vc4_exec_info, head);
629 }
630
631 /**
632 * struct vc4_texture_sample_info - saves the offsets into the UBO for texture
633 * setup parameters.
634 *
635 * This will be used at draw time to relocate the reference to the texture
636 * contents in p0, and validate that the offset combined with
637 * width/height/stride/etc. from p1 and p2/p3 doesn't sample outside the BO.
638 * Note that the hardware treats unprovided config parameters as 0, so not all
639 * of them need to be set up for every texure sample, and we'll store ~0 as
640 * the offset to mark the unused ones.
641 *
642 * See the VC4 3D architecture guide page 41 ("Texture and Memory Lookup Unit
643 * Setup") for definitions of the texture parameters.
644 */
645 struct vc4_texture_sample_info {
646 bool is_direct;
647 uint32_t p_offset[4];
648 };
649
650 /**
651 * struct vc4_validated_shader_info - information about validated shaders that
652 * needs to be used from command list validation.
653 *
654 * For a given shader, each time a shader state record references it, we need
655 * to verify that the shader doesn't read more uniforms than the shader state
656 * record's uniform BO pointer can provide, and we need to apply relocations
657 * and validate the shader state record's uniforms that define the texture
658 * samples.
659 */
660 struct vc4_validated_shader_info {
661 uint32_t uniforms_size;
662 uint32_t uniforms_src_size;
663 uint32_t num_texture_samples;
664 struct vc4_texture_sample_info *texture_samples;
665
666 uint32_t num_uniform_addr_offsets;
667 uint32_t *uniform_addr_offsets;
668
669 bool is_threaded;
670 };
671
672 /**
673 * _wait_for - magic (register) wait macro
674 *
675 * Does the right thing for modeset paths when run under kdgb or similar atomic
676 * contexts. Note that it's important that we check the condition again after
677 * having timed out, since the timeout could be due to preemption or similar and
678 * we've never had a chance to check the condition before the timeout.
679 */
680 #define _wait_for(COND, MS, W) ({ \
681 unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
682 int ret__ = 0; \
683 while (!(COND)) { \
684 if (time_after(jiffies, timeout__)) { \
685 if (!(COND)) \
686 ret__ = -ETIMEDOUT; \
687 break; \
688 } \
689 if (W && drm_can_sleep()) { \
690 msleep(W); \
691 } else { \
692 cpu_relax(); \
693 } \
694 } \
695 ret__; \
696 })
697
698 #define wait_for(COND, MS) _wait_for(COND, MS, 1)
699
700 /* vc4_bo.c */
701 struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size);
702 void vc4_free_object(struct drm_gem_object *gem_obj);
703 struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size,
704 bool from_cache, enum vc4_kernel_bo_type type);
705 int vc4_dumb_create(struct drm_file *file_priv,
706 struct drm_device *dev,
707 struct drm_mode_create_dumb *args);
708 struct dma_buf *vc4_prime_export(struct drm_device *dev,
709 struct drm_gem_object *obj, int flags);
710 int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
711 struct drm_file *file_priv);
712 int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
713 struct drm_file *file_priv);
714 int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
715 struct drm_file *file_priv);
716 int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
717 struct drm_file *file_priv);
718 int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
719 struct drm_file *file_priv);
720 int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
721 struct drm_file *file_priv);
722 int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
723 struct drm_file *file_priv);
724 vm_fault_t vc4_fault(struct vm_fault *vmf);
725 int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
726 int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
727 struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev,
728 struct dma_buf_attachment *attach,
729 struct sg_table *sgt);
730 void *vc4_prime_vmap(struct drm_gem_object *obj);
731 int vc4_bo_cache_init(struct drm_device *dev);
732 void vc4_bo_cache_destroy(struct drm_device *dev);
733 int vc4_bo_inc_usecnt(struct vc4_bo *bo);
734 void vc4_bo_dec_usecnt(struct vc4_bo *bo);
735 void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo);
736 void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo);
737
738 /* vc4_crtc.c */
739 extern struct platform_driver vc4_crtc_driver;
740 bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
741 bool in_vblank_irq, int *vpos, int *hpos,
742 ktime_t *stime, ktime_t *etime,
743 const struct drm_display_mode *mode);
744 void vc4_crtc_handle_vblank(struct vc4_crtc *crtc);
745 void vc4_crtc_txp_armed(struct drm_crtc_state *state);
746 void vc4_crtc_get_margins(struct drm_crtc_state *state,
747 unsigned int *right, unsigned int *left,
748 unsigned int *top, unsigned int *bottom);
749
750 /* vc4_debugfs.c */
751 int vc4_debugfs_init(struct drm_minor *minor);
752 #ifdef CONFIG_DEBUG_FS
753 void vc4_debugfs_add_file(struct drm_device *drm,
754 const char *filename,
755 int (*show)(struct seq_file*, void*),
756 void *data);
757 void vc4_debugfs_add_regset32(struct drm_device *drm,
758 const char *filename,
759 struct debugfs_regset32 *regset);
760 #else
761 static inline void vc4_debugfs_add_file(struct drm_device *drm,
762 const char *filename,
763 int (*show)(struct seq_file*, void*),
764 void *data)
765 {
766 }
767
768 static inline void vc4_debugfs_add_regset32(struct drm_device *drm,
769 const char *filename,
770 struct debugfs_regset32 *regset)
771 {
772 }
773 #endif
774
775 /* vc4_drv.c */
776 void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index);
777
778 /* vc4_dpi.c */
779 extern struct platform_driver vc4_dpi_driver;
780
781 /* vc4_dsi.c */
782 extern struct platform_driver vc4_dsi_driver;
783
784 /* vc4_fence.c */
785 extern const struct dma_fence_ops vc4_fence_ops;
786
787 /* vc4_gem.c */
788 void vc4_gem_init(struct drm_device *dev);
789 void vc4_gem_destroy(struct drm_device *dev);
790 int vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
791 struct drm_file *file_priv);
792 int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
793 struct drm_file *file_priv);
794 int vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
795 struct drm_file *file_priv);
796 void vc4_submit_next_bin_job(struct drm_device *dev);
797 void vc4_submit_next_render_job(struct drm_device *dev);
798 void vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec);
799 int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno,
800 uint64_t timeout_ns, bool interruptible);
801 void vc4_job_handle_completed(struct vc4_dev *vc4);
802 int vc4_queue_seqno_cb(struct drm_device *dev,
803 struct vc4_seqno_cb *cb, uint64_t seqno,
804 void (*func)(struct vc4_seqno_cb *cb));
805 int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
806 struct drm_file *file_priv);
807
808 /* vc4_hdmi.c */
809 extern struct platform_driver vc4_hdmi_driver;
810
811 /* vc4_vec.c */
812 extern struct platform_driver vc4_vec_driver;
813
814 /* vc4_txp.c */
815 extern struct platform_driver vc4_txp_driver;
816
817 /* vc4_irq.c */
818 irqreturn_t vc4_irq(int irq, void *arg);
819 void vc4_irq_preinstall(struct drm_device *dev);
820 int vc4_irq_postinstall(struct drm_device *dev);
821 void vc4_irq_uninstall(struct drm_device *dev);
822 void vc4_irq_reset(struct drm_device *dev);
823
824 /* vc4_hvs.c */
825 extern struct platform_driver vc4_hvs_driver;
826 void vc4_hvs_dump_state(struct drm_device *dev);
827 void vc4_hvs_unmask_underrun(struct drm_device *dev, int channel);
828 void vc4_hvs_mask_underrun(struct drm_device *dev, int channel);
829
830 /* vc4_kms.c */
831 int vc4_kms_load(struct drm_device *dev);
832
833 /* vc4_plane.c */
834 struct drm_plane *vc4_plane_init(struct drm_device *dev,
835 enum drm_plane_type type);
836 u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist);
837 u32 vc4_plane_dlist_size(const struct drm_plane_state *state);
838 void vc4_plane_async_set_fb(struct drm_plane *plane,
839 struct drm_framebuffer *fb);
840
841 /* vc4_v3d.c */
842 extern struct platform_driver vc4_v3d_driver;
843 extern const struct of_device_id vc4_v3d_dt_match[];
844 int vc4_v3d_get_bin_slot(struct vc4_dev *vc4);
845 int vc4_v3d_bin_bo_get(struct vc4_dev *vc4, bool *used);
846 void vc4_v3d_bin_bo_put(struct vc4_dev *vc4);
847 int vc4_v3d_pm_get(struct vc4_dev *vc4);
848 void vc4_v3d_pm_put(struct vc4_dev *vc4);
849
850 /* vc4_validate.c */
851 int
852 vc4_validate_bin_cl(struct drm_device *dev,
853 void *validated,
854 void *unvalidated,
855 struct vc4_exec_info *exec);
856
857 int
858 vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec);
859
860 struct drm_gem_cma_object *vc4_use_bo(struct vc4_exec_info *exec,
861 uint32_t hindex);
862
863 int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec);
864
865 bool vc4_check_tex_size(struct vc4_exec_info *exec,
866 struct drm_gem_cma_object *fbo,
867 uint32_t offset, uint8_t tiling_format,
868 uint32_t width, uint32_t height, uint8_t cpp);
869
870 /* vc4_validate_shader.c */
871 struct vc4_validated_shader_info *
872 vc4_validate_shader(struct drm_gem_cma_object *shader_obj);
873
874 /* vc4_perfmon.c */
875 void vc4_perfmon_get(struct vc4_perfmon *perfmon);
876 void vc4_perfmon_put(struct vc4_perfmon *perfmon);
877 void vc4_perfmon_start(struct vc4_dev *vc4, struct vc4_perfmon *perfmon);
878 void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon,
879 bool capture);
880 struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id);
881 void vc4_perfmon_open_file(struct vc4_file *vc4file);
882 void vc4_perfmon_close_file(struct vc4_file *vc4file);
883 int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data,
884 struct drm_file *file_priv);
885 int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
886 struct drm_file *file_priv);
887 int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data,
888 struct drm_file *file_priv);