]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/gpu/drm/vc4/vc4_drv.h
drm/vc4: Add a mode for using the closed firmware for display.
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / vc4 / vc4_drv.h
1 /*
2 * Copyright (C) 2015 Broadcom
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9 #include <linux/reservation.h>
10 #include <drm/drmP.h>
11 #include <drm/drm_encoder.h>
12 #include <drm/drm_gem_cma_helper.h>
13
14 struct vc4_dev {
15 struct drm_device *dev;
16
17 bool firmware_kms;
18 struct rpi_firmware *firmware;
19
20 struct vc4_hdmi *hdmi;
21 struct vc4_hvs *hvs;
22 struct vc4_v3d *v3d;
23 struct vc4_dpi *dpi;
24 struct vc4_dsi *dsi1;
25 struct vc4_vec *vec;
26
27 struct drm_fbdev_cma *fbdev;
28
29 struct vc4_hang_state *hang_state;
30
31 /* The kernel-space BO cache. Tracks buffers that have been
32 * unreferenced by all other users (refcounts of 0!) but not
33 * yet freed, so we can do cheap allocations.
34 */
35 struct vc4_bo_cache {
36 /* Array of list heads for entries in the BO cache,
37 * based on number of pages, so we can do O(1) lookups
38 * in the cache when allocating.
39 */
40 struct list_head *size_list;
41 uint32_t size_list_size;
42
43 /* List of all BOs in the cache, ordered by age, so we
44 * can do O(1) lookups when trying to free old
45 * buffers.
46 */
47 struct list_head time_list;
48 struct work_struct time_work;
49 struct timer_list time_timer;
50 } bo_cache;
51
52 struct vc4_bo_stats {
53 u32 num_allocated;
54 u32 size_allocated;
55 u32 num_cached;
56 u32 size_cached;
57 } bo_stats;
58
59 /* Protects bo_cache and the BO stats. */
60 struct mutex bo_lock;
61
62 uint64_t dma_fence_context;
63
64 /* Sequence number for the last job queued in bin_job_list.
65 * Starts at 0 (no jobs emitted).
66 */
67 uint64_t emit_seqno;
68
69 /* Sequence number for the last completed job on the GPU.
70 * Starts at 0 (no jobs completed).
71 */
72 uint64_t finished_seqno;
73
74 /* List of all struct vc4_exec_info for jobs to be executed in
75 * the binner. The first job in the list is the one currently
76 * programmed into ct0ca for execution.
77 */
78 struct list_head bin_job_list;
79
80 /* List of all struct vc4_exec_info for jobs that have
81 * completed binning and are ready for rendering. The first
82 * job in the list is the one currently programmed into ct1ca
83 * for execution.
84 */
85 struct list_head render_job_list;
86
87 /* List of the finished vc4_exec_infos waiting to be freed by
88 * job_done_work.
89 */
90 struct list_head job_done_list;
91 /* Spinlock used to synchronize the job_list and seqno
92 * accesses between the IRQ handler and GEM ioctls.
93 */
94 spinlock_t job_lock;
95 wait_queue_head_t job_wait_queue;
96 struct work_struct job_done_work;
97
98 /* List of struct vc4_seqno_cb for callbacks to be made from a
99 * workqueue when the given seqno is passed.
100 */
101 struct list_head seqno_cb_list;
102
103 /* The memory used for storing binner tile alloc, tile state,
104 * and overflow memory allocations. This is freed when V3D
105 * powers down.
106 */
107 struct vc4_bo *bin_bo;
108
109 /* Size of blocks allocated within bin_bo. */
110 uint32_t bin_alloc_size;
111
112 /* Bitmask of the bin_alloc_size chunks in bin_bo that are
113 * used.
114 */
115 uint32_t bin_alloc_used;
116
117 /* Bitmask of the current bin_alloc used for overflow memory. */
118 uint32_t bin_alloc_overflow;
119
120 struct work_struct overflow_mem_work;
121
122 int power_refcount;
123
124 /* Mutex controlling the power refcount. */
125 struct mutex power_lock;
126
127 struct {
128 struct timer_list timer;
129 struct work_struct reset_work;
130 } hangcheck;
131
132 struct semaphore async_modeset;
133 };
134
135 static inline struct vc4_dev *
136 to_vc4_dev(struct drm_device *dev)
137 {
138 return (struct vc4_dev *)dev->dev_private;
139 }
140
141 struct vc4_bo {
142 struct drm_gem_cma_object base;
143
144 /* seqno of the last job to render using this BO. */
145 uint64_t seqno;
146
147 /* seqno of the last job to use the RCL to write to this BO.
148 *
149 * Note that this doesn't include binner overflow memory
150 * writes.
151 */
152 uint64_t write_seqno;
153
154 bool t_format;
155
156 /* List entry for the BO's position in either
157 * vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list
158 */
159 struct list_head unref_head;
160
161 /* Time in jiffies when the BO was put in vc4->bo_cache. */
162 unsigned long free_time;
163
164 /* List entry for the BO's position in vc4_dev->bo_cache.size_list */
165 struct list_head size_head;
166
167 /* Struct for shader validation state, if created by
168 * DRM_IOCTL_VC4_CREATE_SHADER_BO.
169 */
170 struct vc4_validated_shader_info *validated_shader;
171
172 /* normally (resv == &_resv) except for imported bo's */
173 struct reservation_object *resv;
174 struct reservation_object _resv;
175 };
176
177 static inline struct vc4_bo *
178 to_vc4_bo(struct drm_gem_object *bo)
179 {
180 return (struct vc4_bo *)bo;
181 }
182
183 struct vc4_fence {
184 struct dma_fence base;
185 struct drm_device *dev;
186 /* vc4 seqno for signaled() test */
187 uint64_t seqno;
188 };
189
190 static inline struct vc4_fence *
191 to_vc4_fence(struct dma_fence *fence)
192 {
193 return (struct vc4_fence *)fence;
194 }
195
196 struct vc4_seqno_cb {
197 struct work_struct work;
198 uint64_t seqno;
199 void (*func)(struct vc4_seqno_cb *cb);
200 };
201
202 struct vc4_v3d {
203 struct vc4_dev *vc4;
204 struct platform_device *pdev;
205 void __iomem *regs;
206 struct clk *clk;
207 };
208
209 struct vc4_hvs {
210 struct platform_device *pdev;
211 void __iomem *regs;
212 u32 __iomem *dlist;
213
214 /* Memory manager for CRTCs to allocate space in the display
215 * list. Units are dwords.
216 */
217 struct drm_mm dlist_mm;
218 /* Memory manager for the LBM memory used by HVS scaling. */
219 struct drm_mm lbm_mm;
220 spinlock_t mm_lock;
221
222 struct drm_mm_node mitchell_netravali_filter;
223 };
224
225 struct vc4_plane {
226 struct drm_plane base;
227 };
228
229 static inline struct vc4_plane *
230 to_vc4_plane(struct drm_plane *plane)
231 {
232 return (struct vc4_plane *)plane;
233 }
234
235 enum vc4_encoder_type {
236 VC4_ENCODER_TYPE_NONE,
237 VC4_ENCODER_TYPE_HDMI,
238 VC4_ENCODER_TYPE_VEC,
239 VC4_ENCODER_TYPE_DSI0,
240 VC4_ENCODER_TYPE_DSI1,
241 VC4_ENCODER_TYPE_SMI,
242 VC4_ENCODER_TYPE_DPI,
243 };
244
245 struct vc4_encoder {
246 struct drm_encoder base;
247 enum vc4_encoder_type type;
248 u32 clock_select;
249 };
250
251 static inline struct vc4_encoder *
252 to_vc4_encoder(struct drm_encoder *encoder)
253 {
254 return container_of(encoder, struct vc4_encoder, base);
255 }
256
257 #define V3D_READ(offset) readl(vc4->v3d->regs + offset)
258 #define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset)
259 #define HVS_READ(offset) readl(vc4->hvs->regs + offset)
260 #define HVS_WRITE(offset, val) writel(val, vc4->hvs->regs + offset)
261
262 struct vc4_exec_info {
263 /* Sequence number for this bin/render job. */
264 uint64_t seqno;
265
266 /* Latest write_seqno of any BO that binning depends on. */
267 uint64_t bin_dep_seqno;
268
269 struct dma_fence *fence;
270
271 /* Last current addresses the hardware was processing when the
272 * hangcheck timer checked on us.
273 */
274 uint32_t last_ct0ca, last_ct1ca;
275
276 /* Kernel-space copy of the ioctl arguments */
277 struct drm_vc4_submit_cl *args;
278
279 /* This is the array of BOs that were looked up at the start of exec.
280 * Command validation will use indices into this array.
281 */
282 struct drm_gem_cma_object **bo;
283 uint32_t bo_count;
284
285 /* List of BOs that are being written by the RCL. Other than
286 * the binner temporary storage, this is all the BOs written
287 * by the job.
288 */
289 struct drm_gem_cma_object *rcl_write_bo[4];
290 uint32_t rcl_write_bo_count;
291
292 /* Pointers for our position in vc4->job_list */
293 struct list_head head;
294
295 /* List of other BOs used in the job that need to be released
296 * once the job is complete.
297 */
298 struct list_head unref_list;
299
300 /* Current unvalidated indices into @bo loaded by the non-hardware
301 * VC4_PACKET_GEM_HANDLES.
302 */
303 uint32_t bo_index[2];
304
305 /* This is the BO where we store the validated command lists, shader
306 * records, and uniforms.
307 */
308 struct drm_gem_cma_object *exec_bo;
309
310 /**
311 * This tracks the per-shader-record state (packet 64) that
312 * determines the length of the shader record and the offset
313 * it's expected to be found at. It gets read in from the
314 * command lists.
315 */
316 struct vc4_shader_state {
317 uint32_t addr;
318 /* Maximum vertex index referenced by any primitive using this
319 * shader state.
320 */
321 uint32_t max_index;
322 } *shader_state;
323
324 /** How many shader states the user declared they were using. */
325 uint32_t shader_state_size;
326 /** How many shader state records the validator has seen. */
327 uint32_t shader_state_count;
328
329 bool found_tile_binning_mode_config_packet;
330 bool found_start_tile_binning_packet;
331 bool found_increment_semaphore_packet;
332 bool found_flush;
333 uint8_t bin_tiles_x, bin_tiles_y;
334 /* Physical address of the start of the tile alloc array
335 * (where each tile's binned CL will start)
336 */
337 uint32_t tile_alloc_offset;
338 /* Bitmask of which binner slots are freed when this job completes. */
339 uint32_t bin_slots;
340
341 /**
342 * Computed addresses pointing into exec_bo where we start the
343 * bin thread (ct0) and render thread (ct1).
344 */
345 uint32_t ct0ca, ct0ea;
346 uint32_t ct1ca, ct1ea;
347
348 /* Pointer to the unvalidated bin CL (if present). */
349 void *bin_u;
350
351 /* Pointers to the shader recs. These paddr gets incremented as CL
352 * packets are relocated in validate_gl_shader_state, and the vaddrs
353 * (u and v) get incremented and size decremented as the shader recs
354 * themselves are validated.
355 */
356 void *shader_rec_u;
357 void *shader_rec_v;
358 uint32_t shader_rec_p;
359 uint32_t shader_rec_size;
360
361 /* Pointers to the uniform data. These pointers are incremented, and
362 * size decremented, as each batch of uniforms is uploaded.
363 */
364 void *uniforms_u;
365 void *uniforms_v;
366 uint32_t uniforms_p;
367 uint32_t uniforms_size;
368 };
369
370 static inline struct vc4_exec_info *
371 vc4_first_bin_job(struct vc4_dev *vc4)
372 {
373 return list_first_entry_or_null(&vc4->bin_job_list,
374 struct vc4_exec_info, head);
375 }
376
377 static inline struct vc4_exec_info *
378 vc4_first_render_job(struct vc4_dev *vc4)
379 {
380 return list_first_entry_or_null(&vc4->render_job_list,
381 struct vc4_exec_info, head);
382 }
383
384 static inline struct vc4_exec_info *
385 vc4_last_render_job(struct vc4_dev *vc4)
386 {
387 if (list_empty(&vc4->render_job_list))
388 return NULL;
389 return list_last_entry(&vc4->render_job_list,
390 struct vc4_exec_info, head);
391 }
392
393 /**
394 * struct vc4_texture_sample_info - saves the offsets into the UBO for texture
395 * setup parameters.
396 *
397 * This will be used at draw time to relocate the reference to the texture
398 * contents in p0, and validate that the offset combined with
399 * width/height/stride/etc. from p1 and p2/p3 doesn't sample outside the BO.
400 * Note that the hardware treats unprovided config parameters as 0, so not all
401 * of them need to be set up for every texure sample, and we'll store ~0 as
402 * the offset to mark the unused ones.
403 *
404 * See the VC4 3D architecture guide page 41 ("Texture and Memory Lookup Unit
405 * Setup") for definitions of the texture parameters.
406 */
407 struct vc4_texture_sample_info {
408 bool is_direct;
409 uint32_t p_offset[4];
410 };
411
412 /**
413 * struct vc4_validated_shader_info - information about validated shaders that
414 * needs to be used from command list validation.
415 *
416 * For a given shader, each time a shader state record references it, we need
417 * to verify that the shader doesn't read more uniforms than the shader state
418 * record's uniform BO pointer can provide, and we need to apply relocations
419 * and validate the shader state record's uniforms that define the texture
420 * samples.
421 */
422 struct vc4_validated_shader_info {
423 uint32_t uniforms_size;
424 uint32_t uniforms_src_size;
425 uint32_t num_texture_samples;
426 struct vc4_texture_sample_info *texture_samples;
427
428 uint32_t num_uniform_addr_offsets;
429 uint32_t *uniform_addr_offsets;
430
431 bool is_threaded;
432 };
433
434 /**
435 * _wait_for - magic (register) wait macro
436 *
437 * Does the right thing for modeset paths when run under kdgb or similar atomic
438 * contexts. Note that it's important that we check the condition again after
439 * having timed out, since the timeout could be due to preemption or similar and
440 * we've never had a chance to check the condition before the timeout.
441 */
442 #define _wait_for(COND, MS, W) ({ \
443 unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
444 int ret__ = 0; \
445 while (!(COND)) { \
446 if (time_after(jiffies, timeout__)) { \
447 if (!(COND)) \
448 ret__ = -ETIMEDOUT; \
449 break; \
450 } \
451 if (W && drm_can_sleep()) { \
452 msleep(W); \
453 } else { \
454 cpu_relax(); \
455 } \
456 } \
457 ret__; \
458 })
459
460 #define wait_for(COND, MS) _wait_for(COND, MS, 1)
461
462 /* vc4_bo.c */
463 struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size);
464 void vc4_free_object(struct drm_gem_object *gem_obj);
465 struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size,
466 bool from_cache);
467 int vc4_dumb_create(struct drm_file *file_priv,
468 struct drm_device *dev,
469 struct drm_mode_create_dumb *args);
470 struct dma_buf *vc4_prime_export(struct drm_device *dev,
471 struct drm_gem_object *obj, int flags);
472 int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
473 struct drm_file *file_priv);
474 int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
475 struct drm_file *file_priv);
476 int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
477 struct drm_file *file_priv);
478 int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
479 struct drm_file *file_priv);
480 int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
481 struct drm_file *file_priv);
482 int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
483 struct drm_file *file_priv);
484 int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
485 struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj);
486 int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
487 struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev,
488 struct dma_buf_attachment *attach,
489 struct sg_table *sgt);
490 void *vc4_prime_vmap(struct drm_gem_object *obj);
491 void vc4_bo_cache_init(struct drm_device *dev);
492 void vc4_bo_cache_destroy(struct drm_device *dev);
493 int vc4_bo_stats_debugfs(struct seq_file *m, void *arg);
494
495 /* vc4_crtc.c */
496 extern struct platform_driver vc4_crtc_driver;
497 bool vc4_event_pending(struct drm_crtc *crtc);
498 int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg);
499 bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
500 bool in_vblank_irq, int *vpos, int *hpos,
501 ktime_t *stime, ktime_t *etime,
502 const struct drm_display_mode *mode);
503
504 /* vc4_debugfs.c */
505 int vc4_debugfs_init(struct drm_minor *minor);
506
507 /* vc4_drv.c */
508 void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index);
509
510 /* vc4_dpi.c */
511 extern struct platform_driver vc4_dpi_driver;
512 int vc4_dpi_debugfs_regs(struct seq_file *m, void *unused);
513
514 /* vc4_dsi.c */
515 extern struct platform_driver vc4_dsi_driver;
516 int vc4_dsi_debugfs_regs(struct seq_file *m, void *unused);
517
518 /* vc4_fence.c */
519 extern const struct dma_fence_ops vc4_fence_ops;
520
521 /* vc4_firmware_kms.c */
522 extern struct platform_driver vc4_firmware_kms_driver;
523 void vc4_fkms_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file);
524
525 /* vc4_gem.c */
526 void vc4_gem_init(struct drm_device *dev);
527 void vc4_gem_destroy(struct drm_device *dev);
528 int vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
529 struct drm_file *file_priv);
530 int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
531 struct drm_file *file_priv);
532 int vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
533 struct drm_file *file_priv);
534 void vc4_submit_next_bin_job(struct drm_device *dev);
535 void vc4_submit_next_render_job(struct drm_device *dev);
536 void vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec);
537 int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno,
538 uint64_t timeout_ns, bool interruptible);
539 void vc4_job_handle_completed(struct vc4_dev *vc4);
540 int vc4_queue_seqno_cb(struct drm_device *dev,
541 struct vc4_seqno_cb *cb, uint64_t seqno,
542 void (*func)(struct vc4_seqno_cb *cb));
543
544 /* vc4_hdmi.c */
545 extern struct platform_driver vc4_hdmi_driver;
546 int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused);
547
548 /* vc4_vec.c */
549 extern struct platform_driver vc4_vec_driver;
550 int vc4_vec_debugfs_regs(struct seq_file *m, void *unused);
551
552 /* vc4_irq.c */
553 irqreturn_t vc4_irq(int irq, void *arg);
554 void vc4_irq_preinstall(struct drm_device *dev);
555 int vc4_irq_postinstall(struct drm_device *dev);
556 void vc4_irq_uninstall(struct drm_device *dev);
557 void vc4_irq_reset(struct drm_device *dev);
558
559 /* vc4_hvs.c */
560 extern struct platform_driver vc4_hvs_driver;
561 void vc4_hvs_dump_state(struct drm_device *dev);
562 int vc4_hvs_debugfs_regs(struct seq_file *m, void *unused);
563
564 /* vc4_kms.c */
565 int vc4_kms_load(struct drm_device *dev);
566
567 /* vc4_plane.c */
568 struct drm_plane *vc4_plane_init(struct drm_device *dev,
569 enum drm_plane_type type);
570 u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist);
571 u32 vc4_plane_dlist_size(const struct drm_plane_state *state);
572 void vc4_plane_async_set_fb(struct drm_plane *plane,
573 struct drm_framebuffer *fb);
574
575 /* vc4_v3d.c */
576 extern struct platform_driver vc4_v3d_driver;
577 int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused);
578 int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused);
579 int vc4_v3d_get_bin_slot(struct vc4_dev *vc4);
580
581 /* vc4_validate.c */
582 int
583 vc4_validate_bin_cl(struct drm_device *dev,
584 void *validated,
585 void *unvalidated,
586 struct vc4_exec_info *exec);
587
588 int
589 vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec);
590
591 struct drm_gem_cma_object *vc4_use_bo(struct vc4_exec_info *exec,
592 uint32_t hindex);
593
594 int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec);
595
596 bool vc4_check_tex_size(struct vc4_exec_info *exec,
597 struct drm_gem_cma_object *fbo,
598 uint32_t offset, uint8_t tiling_format,
599 uint32_t width, uint32_t height, uint8_t cpp);
600
601 /* vc4_validate_shader.c */
602 struct vc4_validated_shader_info *
603 vc4_validate_shader(struct drm_gem_cma_object *shader_obj);