]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
Merge tag 'armsoc-cleanup' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[mirror_ubuntu-eoan-kernel.git] / drivers / gpu / drm / msm / mdp / mdp5 / mdp5_smp.c
CommitLineData
06c0dd96 1/*
bfcdfb0e 2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
06c0dd96
RC
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19
20#include "mdp5_kms.h"
21#include "mdp5_smp.h"
22
23
24/* SMP - Shared Memory Pool
25 *
26 * These are shared between all the clients, where each plane in a
27 * scanout buffer is a SMP client. Ie. scanout of 3 plane I420 on
28 * pipe VIG0 => 3 clients: VIG0_Y, VIG0_CB, VIG0_CR.
29 *
30 * Based on the size of the attached scanout buffer, a certain # of
31 * blocks must be allocated to that client out of the shared pool.
32 *
bfcdfb0e
SV
33 * In some hw, some blocks are statically allocated for certain pipes
34 * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0).
35 *
36 * For each block that can be dynamically allocated, it can be either
b4cba04f
WX
37 * free:
38 * The block is free.
39 *
40 * pending:
41 * The block is allocated to some client and not free.
42 *
43 * configured:
44 * The block is allocated to some client, and assigned to that
45 * client in MDP5_MDP_SMP_ALLOC registers.
46 *
47 * inuse:
48 * The block is being actively used by a client.
49 *
50 * The updates happen in the following steps:
06c0dd96
RC
51 *
52 * 1) mdp5_smp_request():
53 * When plane scanout is setup, calculate required number of
b4cba04f
WX
54 * blocks needed per client, and request. Blocks neither inuse nor
55 * configured nor pending by any other client are added to client's
56 * pending set.
57 * For shrinking, blocks in pending but not in configured can be freed
58 * directly, but those already in configured will be freed later by
59 * mdp5_smp_commit.
06c0dd96
RC
60 *
61 * 2) mdp5_smp_configure():
f5253812 62 * As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers
06c0dd96 63 * are configured for the union(pending, inuse)
b4cba04f
WX
64 * Current pending is copied to configured.
65 * It is assumed that mdp5_smp_request and mdp5_smp_configure not run
66 * concurrently for the same pipe.
06c0dd96
RC
67 *
68 * 3) mdp5_smp_commit():
b4cba04f 69 * After next vblank, copy configured -> inuse. Optionally update
06c0dd96
RC
70 * MDP5_SMP_ALLOC registers if there are newly unused blocks
71 *
b4cba04f
WX
72 * 4) mdp5_smp_release():
73 * Must be called after the pipe is disabled and no longer uses any SMB
74 *
06c0dd96
RC
75 * On the next vblank after changes have been committed to hw, the
76 * client's pending blocks become it's in-use blocks (and no-longer
77 * in-use blocks become available to other clients).
78 *
79 * btw, hurray for confusing overloaded acronyms! :-/
80 *
81 * NOTE: for atomic modeset/pageflip NONBLOCK operations, step #1
82 * should happen at (or before)? atomic->check(). And we'd need
83 * an API to discard previous requests if update is aborted or
84 * (test-only).
85 *
86 * TODO would perhaps be nice to have debugfs to dump out kernel
87 * inuse and pending state of all clients..
88 */
89
bfcdfb0e
SV
90struct mdp5_smp {
91 struct drm_device *dev;
92
93 int blk_cnt;
94 int blk_size;
95
96 spinlock_t state_lock;
97 mdp5_smp_state_t state; /* to track smp allocation amongst pipes: */
98
6fa6acdf 99 struct mdp5_client_smp_state client_state[MAX_CLIENTS];
bfcdfb0e 100};
06c0dd96 101
b4cba04f
WX
102static void update_smp_state(struct mdp5_smp *smp,
103 u32 cid, mdp5_smp_state_t *assigned);
104
bfcdfb0e
SV
105static inline
106struct mdp5_kms *get_kms(struct mdp5_smp *smp)
107{
108 struct msm_drm_private *priv = smp->dev->dev_private;
109
110 return to_mdp5_kms(to_mdp_kms(priv->kms));
111}
112
6fa6acdf 113static inline u32 pipe2client(enum mdp5_pipe pipe, int plane)
bfcdfb0e 114{
6fa6acdf
SV
115#define CID_UNUSED 0
116
117 if (WARN_ON(plane >= pipe2nclients(pipe)))
118 return CID_UNUSED;
119
120 /*
121 * Note on SMP clients:
122 * For ViG pipes, fetch Y/Cr/Cb-components clients are always
123 * consecutive, and in that order.
124 *
125 * e.g.:
126 * if mdp5_cfg->smp.clients[SSPP_VIG0] = N,
127 * Y plane's client ID is N
128 * Cr plane's client ID is N + 1
129 * Cb plane's client ID is N + 2
130 */
131
132 return mdp5_cfg->smp.clients[pipe] + plane;
bfcdfb0e 133}
06c0dd96
RC
134
135/* step #1: update # of blocks pending for the client: */
bfcdfb0e 136static int smp_request_block(struct mdp5_smp *smp,
6fa6acdf 137 u32 cid, int nblks)
06c0dd96 138{
bfcdfb0e 139 struct mdp5_kms *mdp5_kms = get_kms(smp);
2e362e17 140 const struct mdp5_cfg_hw *hw_cfg;
bfcdfb0e
SV
141 struct mdp5_client_smp_state *ps = &smp->client_state[cid];
142 int i, ret, avail, cur_nblks, cnt = smp->blk_cnt;
2e362e17 143 int reserved;
06c0dd96
RC
144 unsigned long flags;
145
42238da8 146 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
2e362e17
SV
147 reserved = hw_cfg->smp.reserved[cid];
148
bfcdfb0e
SV
149 spin_lock_irqsave(&smp->state_lock, flags);
150
2559d19f
SV
151 if (reserved) {
152 nblks = max(0, nblks - reserved);
bfcdfb0e 153 DBG("%d MMBs allocated (%d reserved)", nblks, reserved);
2559d19f 154 }
06c0dd96 155
bfcdfb0e 156 avail = cnt - bitmap_weight(smp->state, cnt);
06c0dd96 157 if (nblks > avail) {
bfcdfb0e
SV
158 dev_err(mdp5_kms->dev->dev, "out of blks (req=%d > avail=%d)\n",
159 nblks, avail);
06c0dd96
RC
160 ret = -ENOSPC;
161 goto fail;
162 }
163
164 cur_nblks = bitmap_weight(ps->pending, cnt);
165 if (nblks > cur_nblks) {
166 /* grow the existing pending reservation: */
167 for (i = cur_nblks; i < nblks; i++) {
bfcdfb0e 168 int blk = find_first_zero_bit(smp->state, cnt);
06c0dd96 169 set_bit(blk, ps->pending);
bfcdfb0e 170 set_bit(blk, smp->state);
06c0dd96
RC
171 }
172 } else {
173 /* shrink the existing pending reservation: */
174 for (i = cur_nblks; i > nblks; i--) {
175 int blk = find_first_bit(ps->pending, cnt);
176 clear_bit(blk, ps->pending);
b4cba04f
WX
177
178 /* clear in global smp_state if not in configured
179 * otherwise until _commit()
180 */
181 if (!test_bit(blk, ps->configured))
182 clear_bit(blk, smp->state);
06c0dd96
RC
183 }
184 }
185
186fail:
bfcdfb0e
SV
187 spin_unlock_irqrestore(&smp->state_lock, flags);
188 return 0;
189}
190
191static void set_fifo_thresholds(struct mdp5_smp *smp,
192 enum mdp5_pipe pipe, int nblks)
193{
194 struct mdp5_kms *mdp5_kms = get_kms(smp);
195 u32 smp_entries_per_blk = smp->blk_size / (128 / BITS_PER_BYTE);
196 u32 val;
197
198 /* 1/4 of SMP pool that is being fetched */
199 val = (nblks * smp_entries_per_blk) / 4;
200
201 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1);
202 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2);
203 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3);
204}
205
206/*
207 * NOTE: looks like if horizontal decimation is used (if we supported that)
208 * then the width used to calculate SMP block requirements is the post-
209 * decimated width. Ie. SMP buffering sits downstream of decimation (which
210 * presumably happens during the dma from scanout buffer).
211 */
42238da8 212int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width)
bfcdfb0e 213{
bfcdfb0e
SV
214 struct mdp5_kms *mdp5_kms = get_kms(smp);
215 struct drm_device *dev = mdp5_kms->dev;
42238da8 216 int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
bfcdfb0e
SV
217 int i, hsub, nplanes, nlines, nblks, ret;
218
219 nplanes = drm_format_num_planes(fmt);
220 hsub = drm_format_horz_chroma_subsampling(fmt);
221
222 /* different if BWC (compressed framebuffer?) enabled: */
223 nlines = 2;
224
225 for (i = 0, nblks = 0; i < nplanes; i++) {
226 int n, fetch_stride, cpp;
227
228 cpp = drm_format_plane_cpp(fmt, i);
229 fetch_stride = width * cpp / (i ? hsub : 1);
230
231 n = DIV_ROUND_UP(fetch_stride * nlines, smp->blk_size);
232
233 /* for hw rev v1.00 */
2e362e17 234 if (rev == 0)
bfcdfb0e
SV
235 n = roundup_pow_of_two(n);
236
237 DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n);
238 ret = smp_request_block(smp, pipe2client(pipe, i), n);
239 if (ret) {
240 dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
241 n, ret);
242 return ret;
243 }
244
245 nblks += n;
246 }
247
248 set_fifo_thresholds(smp, pipe, nblks);
249
06c0dd96
RC
250 return 0;
251}
252
bfcdfb0e 253/* Release SMP blocks for all clients of the pipe */
42238da8 254void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe)
bfcdfb0e 255{
b4cba04f
WX
256 int i;
257 unsigned long flags;
258 int cnt = smp->blk_cnt;
259
260 for (i = 0; i < pipe2nclients(pipe); i++) {
261 mdp5_smp_state_t assigned;
262 u32 cid = pipe2client(pipe, i);
263 struct mdp5_client_smp_state *ps = &smp->client_state[cid];
264
265 spin_lock_irqsave(&smp->state_lock, flags);
266
267 /* clear hw assignment */
268 bitmap_or(assigned, ps->inuse, ps->configured, cnt);
269 update_smp_state(smp, CID_UNUSED, &assigned);
270
271 /* free to global pool */
272 bitmap_andnot(smp->state, smp->state, ps->pending, cnt);
273 bitmap_andnot(smp->state, smp->state, assigned, cnt);
274
275 /* clear client's infor */
276 bitmap_zero(ps->pending, cnt);
277 bitmap_zero(ps->configured, cnt);
278 bitmap_zero(ps->inuse, cnt);
279
280 spin_unlock_irqrestore(&smp->state_lock, flags);
281 }
bfcdfb0e 282
bfcdfb0e
SV
283 set_fifo_thresholds(smp, pipe, 0);
284}
285
286static void update_smp_state(struct mdp5_smp *smp,
6fa6acdf 287 u32 cid, mdp5_smp_state_t *assigned)
06c0dd96 288{
bfcdfb0e
SV
289 struct mdp5_kms *mdp5_kms = get_kms(smp);
290 int cnt = smp->blk_cnt;
291 u32 blk, val;
06c0dd96
RC
292
293 for_each_set_bit(blk, *assigned, cnt) {
294 int idx = blk / 3;
295 int fld = blk % 3;
296
f5253812 297 val = mdp5_read(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_W_REG(0, idx));
06c0dd96
RC
298
299 switch (fld) {
300 case 0:
f5253812
SV
301 val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK;
302 val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0(cid);
06c0dd96
RC
303 break;
304 case 1:
f5253812
SV
305 val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK;
306 val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1(cid);
06c0dd96
RC
307 break;
308 case 2:
f5253812
SV
309 val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK;
310 val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2(cid);
06c0dd96
RC
311 break;
312 }
313
f5253812
SV
314 mdp5_write(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_W_REG(0, idx), val);
315 mdp5_write(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_R_REG(0, idx), val);
06c0dd96
RC
316 }
317}
318
319/* step #2: configure hw for union(pending, inuse): */
42238da8 320void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe)
06c0dd96 321{
bfcdfb0e 322 int cnt = smp->blk_cnt;
06c0dd96 323 mdp5_smp_state_t assigned;
bfcdfb0e
SV
324 int i;
325
326 for (i = 0; i < pipe2nclients(pipe); i++) {
6fa6acdf 327 u32 cid = pipe2client(pipe, i);
bfcdfb0e 328 struct mdp5_client_smp_state *ps = &smp->client_state[cid];
06c0dd96 329
b4cba04f
WX
330 /*
331 * if vblank has not happened since last smp_configure
332 * skip the configure for now
333 */
334 if (!bitmap_equal(ps->inuse, ps->configured, cnt))
335 continue;
336
337 bitmap_copy(ps->configured, ps->pending, cnt);
338 bitmap_or(assigned, ps->inuse, ps->configured, cnt);
bfcdfb0e
SV
339 update_smp_state(smp, cid, &assigned);
340 }
06c0dd96
RC
341}
342
b4cba04f 343/* step #3: after vblank, copy configured -> inuse: */
42238da8 344void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
06c0dd96 345{
bfcdfb0e 346 int cnt = smp->blk_cnt;
06c0dd96 347 mdp5_smp_state_t released;
bfcdfb0e
SV
348 int i;
349
350 for (i = 0; i < pipe2nclients(pipe); i++) {
6fa6acdf 351 u32 cid = pipe2client(pipe, i);
bfcdfb0e
SV
352 struct mdp5_client_smp_state *ps = &smp->client_state[cid];
353
354 /*
355 * Figure out if there are any blocks we where previously
356 * using, which can be released and made available to other
357 * clients:
358 */
b4cba04f 359 if (bitmap_andnot(released, ps->inuse, ps->configured, cnt)) {
bfcdfb0e
SV
360 unsigned long flags;
361
362 spin_lock_irqsave(&smp->state_lock, flags);
363 /* clear released blocks: */
364 bitmap_andnot(smp->state, smp->state, released, cnt);
365 spin_unlock_irqrestore(&smp->state_lock, flags);
366
367 update_smp_state(smp, CID_UNUSED, &released);
368 }
06c0dd96 369
b4cba04f 370 bitmap_copy(ps->inuse, ps->configured, cnt);
06c0dd96 371 }
bfcdfb0e
SV
372}
373
42238da8 374void mdp5_smp_destroy(struct mdp5_smp *smp)
bfcdfb0e 375{
bfcdfb0e
SV
376 kfree(smp);
377}
378
42238da8 379struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg)
bfcdfb0e
SV
380{
381 struct mdp5_smp *smp = NULL;
382 int ret;
383
384 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
385 if (unlikely(!smp)) {
386 ret = -ENOMEM;
387 goto fail;
388 }
389
390 smp->dev = dev;
391 smp->blk_cnt = cfg->mmb_count;
392 smp->blk_size = cfg->mmb_size;
393
394 /* statically tied MMBs cannot be re-allocated: */
395 bitmap_copy(smp->state, cfg->reserved_state, smp->blk_cnt);
396 spin_lock_init(&smp->state_lock);
397
398 return smp;
399fail:
400 if (smp)
401 mdp5_smp_destroy(smp);
06c0dd96 402
bfcdfb0e 403 return ERR_PTR(ret);
06c0dd96 404}