]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/gpu/drm/arm/malidp_crtc.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/livepatchin...
[mirror_ubuntu-jammy-kernel.git] / drivers / gpu / drm / arm / malidp_crtc.c
1 /*
2 * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
3 * Author: Liviu Dudau <Liviu.Dudau@arm.com>
4 *
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
8 * of such GNU licence.
9 *
10 * ARM Mali DP500/DP550/DP650 driver (crtc operations)
11 */
12
13 #include <drm/drmP.h>
14 #include <drm/drm_atomic.h>
15 #include <drm/drm_atomic_helper.h>
16 #include <drm/drm_crtc.h>
17 #include <drm/drm_probe_helper.h>
18 #include <linux/clk.h>
19 #include <linux/pm_runtime.h>
20 #include <video/videomode.h>
21
22 #include "malidp_drv.h"
23 #include "malidp_hw.h"
24
25 static enum drm_mode_status malidp_crtc_mode_valid(struct drm_crtc *crtc,
26 const struct drm_display_mode *mode)
27 {
28 struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
29 struct malidp_hw_device *hwdev = malidp->dev;
30
31 /*
32 * check that the hardware can drive the required clock rate,
33 * but skip the check if the clock is meant to be disabled (req_rate = 0)
34 */
35 long rate, req_rate = mode->crtc_clock * 1000;
36
37 if (req_rate) {
38 rate = clk_round_rate(hwdev->pxlclk, req_rate);
39 if (rate != req_rate) {
40 DRM_DEBUG_DRIVER("pxlclk doesn't support %ld Hz\n",
41 req_rate);
42 return MODE_NOCLOCK;
43 }
44 }
45
46 return MODE_OK;
47 }
48
49 static void malidp_crtc_atomic_enable(struct drm_crtc *crtc,
50 struct drm_crtc_state *old_state)
51 {
52 struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
53 struct malidp_hw_device *hwdev = malidp->dev;
54 struct videomode vm;
55 int err = pm_runtime_get_sync(crtc->dev->dev);
56
57 if (err < 0) {
58 DRM_DEBUG_DRIVER("Failed to enable runtime power management: %d\n", err);
59 return;
60 }
61
62 drm_display_mode_to_videomode(&crtc->state->adjusted_mode, &vm);
63 clk_prepare_enable(hwdev->pxlclk);
64
65 /* We rely on firmware to set mclk to a sensible level. */
66 clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000);
67
68 hwdev->hw->modeset(hwdev, &vm);
69 hwdev->hw->leave_config_mode(hwdev);
70 drm_crtc_vblank_on(crtc);
71 }
72
73 static void malidp_crtc_atomic_disable(struct drm_crtc *crtc,
74 struct drm_crtc_state *old_state)
75 {
76 struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
77 struct malidp_hw_device *hwdev = malidp->dev;
78 int err;
79
80 /* always disable planes on the CRTC that is being turned off */
81 drm_atomic_helper_disable_planes_on_crtc(old_state, false);
82
83 drm_crtc_vblank_off(crtc);
84 hwdev->hw->enter_config_mode(hwdev);
85
86 clk_disable_unprepare(hwdev->pxlclk);
87
88 err = pm_runtime_put(crtc->dev->dev);
89 if (err < 0) {
90 DRM_DEBUG_DRIVER("Failed to disable runtime power management: %d\n", err);
91 }
92 }
93
94 static const struct gamma_curve_segment {
95 u16 start;
96 u16 end;
97 } segments[MALIDP_COEFFTAB_NUM_COEFFS] = {
98 /* sector 0 */
99 { 0, 0 }, { 1, 1 }, { 2, 2 }, { 3, 3 },
100 { 4, 4 }, { 5, 5 }, { 6, 6 }, { 7, 7 },
101 { 8, 8 }, { 9, 9 }, { 10, 10 }, { 11, 11 },
102 { 12, 12 }, { 13, 13 }, { 14, 14 }, { 15, 15 },
103 /* sector 1 */
104 { 16, 19 }, { 20, 23 }, { 24, 27 }, { 28, 31 },
105 /* sector 2 */
106 { 32, 39 }, { 40, 47 }, { 48, 55 }, { 56, 63 },
107 /* sector 3 */
108 { 64, 79 }, { 80, 95 }, { 96, 111 }, { 112, 127 },
109 /* sector 4 */
110 { 128, 159 }, { 160, 191 }, { 192, 223 }, { 224, 255 },
111 /* sector 5 */
112 { 256, 319 }, { 320, 383 }, { 384, 447 }, { 448, 511 },
113 /* sector 6 */
114 { 512, 639 }, { 640, 767 }, { 768, 895 }, { 896, 1023 },
115 { 1024, 1151 }, { 1152, 1279 }, { 1280, 1407 }, { 1408, 1535 },
116 { 1536, 1663 }, { 1664, 1791 }, { 1792, 1919 }, { 1920, 2047 },
117 { 2048, 2175 }, { 2176, 2303 }, { 2304, 2431 }, { 2432, 2559 },
118 { 2560, 2687 }, { 2688, 2815 }, { 2816, 2943 }, { 2944, 3071 },
119 { 3072, 3199 }, { 3200, 3327 }, { 3328, 3455 }, { 3456, 3583 },
120 { 3584, 3711 }, { 3712, 3839 }, { 3840, 3967 }, { 3968, 4095 },
121 };
122
123 #define DE_COEFTAB_DATA(a, b) ((((a) & 0xfff) << 16) | (((b) & 0xfff)))
124
125 static void malidp_generate_gamma_table(struct drm_property_blob *lut_blob,
126 u32 coeffs[MALIDP_COEFFTAB_NUM_COEFFS])
127 {
128 struct drm_color_lut *lut = (struct drm_color_lut *)lut_blob->data;
129 int i;
130
131 for (i = 0; i < MALIDP_COEFFTAB_NUM_COEFFS; ++i) {
132 u32 a, b, delta_in, out_start, out_end;
133
134 delta_in = segments[i].end - segments[i].start;
135 /* DP has 12-bit internal precision for its LUTs. */
136 out_start = drm_color_lut_extract(lut[segments[i].start].green,
137 12);
138 out_end = drm_color_lut_extract(lut[segments[i].end].green, 12);
139 a = (delta_in == 0) ? 0 : ((out_end - out_start) * 256) / delta_in;
140 b = out_start;
141 coeffs[i] = DE_COEFTAB_DATA(a, b);
142 }
143 }
144
145 /*
146 * Check if there is a new gamma LUT and if it is of an acceptable size. Also,
147 * reject any LUTs that use distinct red, green, and blue curves.
148 */
149 static int malidp_crtc_atomic_check_gamma(struct drm_crtc *crtc,
150 struct drm_crtc_state *state)
151 {
152 struct malidp_crtc_state *mc = to_malidp_crtc_state(state);
153 struct drm_color_lut *lut;
154 size_t lut_size;
155 int i;
156
157 if (!state->color_mgmt_changed || !state->gamma_lut)
158 return 0;
159
160 if (crtc->state->gamma_lut &&
161 (crtc->state->gamma_lut->base.id == state->gamma_lut->base.id))
162 return 0;
163
164 if (state->gamma_lut->length % sizeof(struct drm_color_lut))
165 return -EINVAL;
166
167 lut_size = state->gamma_lut->length / sizeof(struct drm_color_lut);
168 if (lut_size != MALIDP_GAMMA_LUT_SIZE)
169 return -EINVAL;
170
171 lut = (struct drm_color_lut *)state->gamma_lut->data;
172 for (i = 0; i < lut_size; ++i)
173 if (!((lut[i].red == lut[i].green) &&
174 (lut[i].red == lut[i].blue)))
175 return -EINVAL;
176
177 if (!state->mode_changed) {
178 int ret;
179
180 state->mode_changed = true;
181 /*
182 * Kerneldoc for drm_atomic_helper_check_modeset mandates that
183 * it be invoked when the driver sets ->mode_changed. Since
184 * changing the gamma LUT doesn't depend on any external
185 * resources, it is safe to call it only once.
186 */
187 ret = drm_atomic_helper_check_modeset(crtc->dev, state->state);
188 if (ret)
189 return ret;
190 }
191
192 malidp_generate_gamma_table(state->gamma_lut, mc->gamma_coeffs);
193 return 0;
194 }
195
196 /*
197 * Check if there is a new CTM and if it contains valid input. Valid here means
198 * that the number is inside the representable range for a Q3.12 number,
199 * excluding truncating the fractional part of the input data.
200 *
201 * The COLORADJ registers can be changed atomically.
202 */
203 static int malidp_crtc_atomic_check_ctm(struct drm_crtc *crtc,
204 struct drm_crtc_state *state)
205 {
206 struct malidp_crtc_state *mc = to_malidp_crtc_state(state);
207 struct drm_color_ctm *ctm;
208 int i;
209
210 if (!state->color_mgmt_changed)
211 return 0;
212
213 if (!state->ctm)
214 return 0;
215
216 if (crtc->state->ctm && (crtc->state->ctm->base.id ==
217 state->ctm->base.id))
218 return 0;
219
220 /*
221 * The size of the ctm is checked in
222 * drm_atomic_replace_property_blob_from_id.
223 */
224 ctm = (struct drm_color_ctm *)state->ctm->data;
225 for (i = 0; i < ARRAY_SIZE(ctm->matrix); ++i) {
226 /* Convert from S31.32 to Q3.12. */
227 s64 val = ctm->matrix[i];
228 u32 mag = ((((u64)val) & ~BIT_ULL(63)) >> 20) &
229 GENMASK_ULL(14, 0);
230
231 /*
232 * Convert to 2s complement and check the destination's top bit
233 * for overflow. NB: Can't check before converting or it'd
234 * incorrectly reject the case:
235 * sign == 1
236 * mag == 0x2000
237 */
238 if (val & BIT_ULL(63))
239 mag = ~mag + 1;
240 if (!!(val & BIT_ULL(63)) != !!(mag & BIT(14)))
241 return -EINVAL;
242 mc->coloradj_coeffs[i] = mag;
243 }
244
245 return 0;
246 }
247
248 static int malidp_crtc_atomic_check_scaling(struct drm_crtc *crtc,
249 struct drm_crtc_state *state)
250 {
251 struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
252 struct malidp_hw_device *hwdev = malidp->dev;
253 struct malidp_crtc_state *cs = to_malidp_crtc_state(state);
254 struct malidp_se_config *s = &cs->scaler_config;
255 struct drm_plane *plane;
256 struct videomode vm;
257 const struct drm_plane_state *pstate;
258 u32 h_upscale_factor = 0; /* U16.16 */
259 u32 v_upscale_factor = 0; /* U16.16 */
260 u8 scaling = cs->scaled_planes_mask;
261 int ret;
262
263 if (!scaling) {
264 s->scale_enable = false;
265 goto mclk_calc;
266 }
267
268 /* The scaling engine can only handle one plane at a time. */
269 if (scaling & (scaling - 1))
270 return -EINVAL;
271
272 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
273 struct malidp_plane *mp = to_malidp_plane(plane);
274 u32 phase;
275
276 if (!(mp->layer->id & scaling))
277 continue;
278
279 /*
280 * Convert crtc_[w|h] to U32.32, then divide by U16.16 src_[w|h]
281 * to get the U16.16 result.
282 */
283 h_upscale_factor = div_u64((u64)pstate->crtc_w << 32,
284 pstate->src_w);
285 v_upscale_factor = div_u64((u64)pstate->crtc_h << 32,
286 pstate->src_h);
287
288 s->enhancer_enable = ((h_upscale_factor >> 16) >= 2 ||
289 (v_upscale_factor >> 16) >= 2);
290
291 if (pstate->rotation & MALIDP_ROTATED_MASK) {
292 s->input_w = pstate->src_h >> 16;
293 s->input_h = pstate->src_w >> 16;
294 } else {
295 s->input_w = pstate->src_w >> 16;
296 s->input_h = pstate->src_h >> 16;
297 }
298
299 s->output_w = pstate->crtc_w;
300 s->output_h = pstate->crtc_h;
301
302 #define SE_N_PHASE 4
303 #define SE_SHIFT_N_PHASE 12
304 /* Calculate initial_phase and delta_phase for horizontal. */
305 phase = s->input_w;
306 s->h_init_phase =
307 ((phase << SE_N_PHASE) / s->output_w + 1) / 2;
308
309 phase = s->input_w;
310 phase <<= (SE_SHIFT_N_PHASE + SE_N_PHASE);
311 s->h_delta_phase = phase / s->output_w;
312
313 /* Same for vertical. */
314 phase = s->input_h;
315 s->v_init_phase =
316 ((phase << SE_N_PHASE) / s->output_h + 1) / 2;
317
318 phase = s->input_h;
319 phase <<= (SE_SHIFT_N_PHASE + SE_N_PHASE);
320 s->v_delta_phase = phase / s->output_h;
321 #undef SE_N_PHASE
322 #undef SE_SHIFT_N_PHASE
323 s->plane_src_id = mp->layer->id;
324 }
325
326 s->scale_enable = true;
327 s->hcoeff = malidp_se_select_coeffs(h_upscale_factor);
328 s->vcoeff = malidp_se_select_coeffs(v_upscale_factor);
329
330 mclk_calc:
331 drm_display_mode_to_videomode(&state->adjusted_mode, &vm);
332 ret = hwdev->hw->se_calc_mclk(hwdev, s, &vm);
333 if (ret < 0)
334 return -EINVAL;
335 return 0;
336 }
337
338 static int malidp_crtc_atomic_check(struct drm_crtc *crtc,
339 struct drm_crtc_state *state)
340 {
341 struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
342 struct malidp_hw_device *hwdev = malidp->dev;
343 struct drm_plane *plane;
344 const struct drm_plane_state *pstate;
345 u32 rot_mem_free, rot_mem_usable;
346 int rotated_planes = 0;
347 int ret;
348
349 /*
350 * check if there is enough rotation memory available for planes
351 * that need 90° and 270° rotion or planes that are compressed.
352 * Each plane has set its required memory size in the ->plane_check()
353 * callback, here we only make sure that the sums are less that the
354 * total usable memory.
355 *
356 * The rotation memory allocation algorithm (for each plane):
357 * a. If no more rotated or compressed planes exist, all remaining
358 * rotate memory in the bank is available for use by the plane.
359 * b. If other rotated or compressed planes exist, and plane's
360 * layer ID is DE_VIDEO1, it can use all the memory from first bank
361 * if secondary rotation memory bank is available, otherwise it can
362 * use up to half the bank's memory.
363 * c. If other rotated or compressed planes exist, and plane's layer ID
364 * is not DE_VIDEO1, it can use half of the available memory.
365 *
366 * Note: this algorithm assumes that the order in which the planes are
367 * checked always has DE_VIDEO1 plane first in the list if it is
368 * rotated. Because that is how we create the planes in the first
369 * place, under current DRM version things work, but if ever the order
370 * in which drm_atomic_crtc_state_for_each_plane() iterates over planes
371 * changes, we need to pre-sort the planes before validation.
372 */
373
374 /* first count the number of rotated planes */
375 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
376 struct drm_framebuffer *fb = pstate->fb;
377
378 if ((pstate->rotation & MALIDP_ROTATED_MASK) || fb->modifier)
379 rotated_planes++;
380 }
381
382 rot_mem_free = hwdev->rotation_memory[0];
383 /*
384 * if we have more than 1 plane using rotation memory, use the second
385 * block of rotation memory as well
386 */
387 if (rotated_planes > 1)
388 rot_mem_free += hwdev->rotation_memory[1];
389
390 /* now validate the rotation memory requirements */
391 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
392 struct malidp_plane *mp = to_malidp_plane(plane);
393 struct malidp_plane_state *ms = to_malidp_plane_state(pstate);
394 struct drm_framebuffer *fb = pstate->fb;
395
396 if ((pstate->rotation & MALIDP_ROTATED_MASK) || fb->modifier) {
397 /* process current plane */
398 rotated_planes--;
399
400 if (!rotated_planes) {
401 /* no more rotated planes, we can use what's left */
402 rot_mem_usable = rot_mem_free;
403 } else {
404 if ((mp->layer->id != DE_VIDEO1) ||
405 (hwdev->rotation_memory[1] == 0))
406 rot_mem_usable = rot_mem_free / 2;
407 else
408 rot_mem_usable = hwdev->rotation_memory[0];
409 }
410
411 rot_mem_free -= rot_mem_usable;
412
413 if (ms->rotmem_size > rot_mem_usable)
414 return -EINVAL;
415 }
416 }
417
418 /* If only the writeback routing has changed, we don't need a modeset */
419 if (state->connectors_changed) {
420 u32 old_mask = crtc->state->connector_mask;
421 u32 new_mask = state->connector_mask;
422
423 if ((old_mask ^ new_mask) ==
424 (1 << drm_connector_index(&malidp->mw_connector.base)))
425 state->connectors_changed = false;
426 }
427
428 ret = malidp_crtc_atomic_check_gamma(crtc, state);
429 ret = ret ? ret : malidp_crtc_atomic_check_ctm(crtc, state);
430 ret = ret ? ret : malidp_crtc_atomic_check_scaling(crtc, state);
431
432 return ret;
433 }
434
435 static const struct drm_crtc_helper_funcs malidp_crtc_helper_funcs = {
436 .mode_valid = malidp_crtc_mode_valid,
437 .atomic_check = malidp_crtc_atomic_check,
438 .atomic_enable = malidp_crtc_atomic_enable,
439 .atomic_disable = malidp_crtc_atomic_disable,
440 };
441
442 static struct drm_crtc_state *malidp_crtc_duplicate_state(struct drm_crtc *crtc)
443 {
444 struct malidp_crtc_state *state, *old_state;
445
446 if (WARN_ON(!crtc->state))
447 return NULL;
448
449 old_state = to_malidp_crtc_state(crtc->state);
450 state = kmalloc(sizeof(*state), GFP_KERNEL);
451 if (!state)
452 return NULL;
453
454 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
455 memcpy(state->gamma_coeffs, old_state->gamma_coeffs,
456 sizeof(state->gamma_coeffs));
457 memcpy(state->coloradj_coeffs, old_state->coloradj_coeffs,
458 sizeof(state->coloradj_coeffs));
459 memcpy(&state->scaler_config, &old_state->scaler_config,
460 sizeof(state->scaler_config));
461 state->scaled_planes_mask = 0;
462
463 return &state->base;
464 }
465
466 static void malidp_crtc_reset(struct drm_crtc *crtc)
467 {
468 struct malidp_crtc_state *state = NULL;
469
470 if (crtc->state) {
471 state = to_malidp_crtc_state(crtc->state);
472 __drm_atomic_helper_crtc_destroy_state(crtc->state);
473 }
474
475 kfree(state);
476 state = kzalloc(sizeof(*state), GFP_KERNEL);
477 if (state) {
478 crtc->state = &state->base;
479 crtc->state->crtc = crtc;
480 }
481 }
482
483 static void malidp_crtc_destroy_state(struct drm_crtc *crtc,
484 struct drm_crtc_state *state)
485 {
486 struct malidp_crtc_state *mali_state = NULL;
487
488 if (state) {
489 mali_state = to_malidp_crtc_state(state);
490 __drm_atomic_helper_crtc_destroy_state(state);
491 }
492
493 kfree(mali_state);
494 }
495
496 static int malidp_crtc_enable_vblank(struct drm_crtc *crtc)
497 {
498 struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
499 struct malidp_hw_device *hwdev = malidp->dev;
500
501 malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK,
502 hwdev->hw->map.de_irq_map.vsync_irq);
503 return 0;
504 }
505
506 static void malidp_crtc_disable_vblank(struct drm_crtc *crtc)
507 {
508 struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
509 struct malidp_hw_device *hwdev = malidp->dev;
510
511 malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK,
512 hwdev->hw->map.de_irq_map.vsync_irq);
513 }
514
515 static const struct drm_crtc_funcs malidp_crtc_funcs = {
516 .gamma_set = drm_atomic_helper_legacy_gamma_set,
517 .destroy = drm_crtc_cleanup,
518 .set_config = drm_atomic_helper_set_config,
519 .page_flip = drm_atomic_helper_page_flip,
520 .reset = malidp_crtc_reset,
521 .atomic_duplicate_state = malidp_crtc_duplicate_state,
522 .atomic_destroy_state = malidp_crtc_destroy_state,
523 .enable_vblank = malidp_crtc_enable_vblank,
524 .disable_vblank = malidp_crtc_disable_vblank,
525 };
526
527 int malidp_crtc_init(struct drm_device *drm)
528 {
529 struct malidp_drm *malidp = drm->dev_private;
530 struct drm_plane *primary = NULL, *plane;
531 int ret;
532
533 ret = malidp_de_planes_init(drm);
534 if (ret < 0) {
535 DRM_ERROR("Failed to initialise planes\n");
536 return ret;
537 }
538
539 drm_for_each_plane(plane, drm) {
540 if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
541 primary = plane;
542 break;
543 }
544 }
545
546 if (!primary) {
547 DRM_ERROR("no primary plane found\n");
548 return -EINVAL;
549 }
550
551 ret = drm_crtc_init_with_planes(drm, &malidp->crtc, primary, NULL,
552 &malidp_crtc_funcs, NULL);
553 if (ret)
554 return ret;
555
556 drm_crtc_helper_add(&malidp->crtc, &malidp_crtc_helper_funcs);
557 drm_mode_crtc_set_gamma_size(&malidp->crtc, MALIDP_GAMMA_LUT_SIZE);
558 /* No inverse-gamma: it is per-plane. */
559 drm_crtc_enable_color_mgmt(&malidp->crtc, 0, true, MALIDP_GAMMA_LUT_SIZE);
560
561 malidp_se_set_enh_coeffs(malidp->dev);
562
563 return 0;
564 }