]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c
Merge tag 'pinctrl-v4.1-1' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
[mirror_ubuntu-zesty-kernel.git] / drivers / gpu / drm / atmel-hlcdc / atmel_hlcdc_layer.c
1 /*
2 * Copyright (C) 2014 Free Electrons
3 * Copyright (C) 2014 Atmel
4 *
5 * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include <linux/dma-mapping.h>
21 #include <linux/interrupt.h>
22
23 #include "atmel_hlcdc_dc.h"
24
25 static void
26 atmel_hlcdc_layer_fb_flip_release(struct drm_flip_work *work, void *val)
27 {
28 struct atmel_hlcdc_layer_fb_flip *flip = val;
29
30 if (flip->fb)
31 drm_framebuffer_unreference(flip->fb);
32 kfree(flip);
33 }
34
35 static void
36 atmel_hlcdc_layer_fb_flip_destroy(struct atmel_hlcdc_layer_fb_flip *flip)
37 {
38 if (flip->fb)
39 drm_framebuffer_unreference(flip->fb);
40 kfree(flip->task);
41 kfree(flip);
42 }
43
44 static void
45 atmel_hlcdc_layer_fb_flip_release_queue(struct atmel_hlcdc_layer *layer,
46 struct atmel_hlcdc_layer_fb_flip *flip)
47 {
48 int i;
49
50 if (!flip)
51 return;
52
53 for (i = 0; i < layer->max_planes; i++) {
54 if (!flip->dscrs[i])
55 break;
56
57 flip->dscrs[i]->status = 0;
58 flip->dscrs[i] = NULL;
59 }
60
61 drm_flip_work_queue_task(&layer->gc, flip->task);
62 drm_flip_work_commit(&layer->gc, layer->wq);
63 }
64
65 static void atmel_hlcdc_layer_update_reset(struct atmel_hlcdc_layer *layer,
66 int id)
67 {
68 struct atmel_hlcdc_layer_update *upd = &layer->update;
69 struct atmel_hlcdc_layer_update_slot *slot;
70
71 if (id < 0 || id > 1)
72 return;
73
74 slot = &upd->slots[id];
75 bitmap_clear(slot->updated_configs, 0, layer->desc->nconfigs);
76 memset(slot->configs, 0,
77 sizeof(*slot->configs) * layer->desc->nconfigs);
78
79 if (slot->fb_flip) {
80 atmel_hlcdc_layer_fb_flip_release_queue(layer, slot->fb_flip);
81 slot->fb_flip = NULL;
82 }
83 }
84
85 static void atmel_hlcdc_layer_update_apply(struct atmel_hlcdc_layer *layer)
86 {
87 struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
88 const struct atmel_hlcdc_layer_desc *desc = layer->desc;
89 struct atmel_hlcdc_layer_update *upd = &layer->update;
90 struct regmap *regmap = layer->hlcdc->regmap;
91 struct atmel_hlcdc_layer_update_slot *slot;
92 struct atmel_hlcdc_layer_fb_flip *fb_flip;
93 struct atmel_hlcdc_dma_channel_dscr *dscr;
94 unsigned int cfg;
95 u32 action = 0;
96 int i = 0;
97
98 if (upd->pending < 0 || upd->pending > 1)
99 return;
100
101 slot = &upd->slots[upd->pending];
102
103 for_each_set_bit(cfg, slot->updated_configs, layer->desc->nconfigs) {
104 regmap_write(regmap,
105 desc->regs_offset +
106 ATMEL_HLCDC_LAYER_CFG(layer, cfg),
107 slot->configs[cfg]);
108 action |= ATMEL_HLCDC_LAYER_UPDATE;
109 }
110
111 fb_flip = slot->fb_flip;
112
113 if (!fb_flip->fb)
114 goto apply;
115
116 if (dma->status == ATMEL_HLCDC_LAYER_DISABLED) {
117 for (i = 0; i < fb_flip->ngems; i++) {
118 dscr = fb_flip->dscrs[i];
119 dscr->ctrl = ATMEL_HLCDC_LAYER_DFETCH |
120 ATMEL_HLCDC_LAYER_DMA_IRQ |
121 ATMEL_HLCDC_LAYER_ADD_IRQ |
122 ATMEL_HLCDC_LAYER_DONE_IRQ;
123
124 regmap_write(regmap,
125 desc->regs_offset +
126 ATMEL_HLCDC_LAYER_PLANE_ADDR(i),
127 dscr->addr);
128 regmap_write(regmap,
129 desc->regs_offset +
130 ATMEL_HLCDC_LAYER_PLANE_CTRL(i),
131 dscr->ctrl);
132 regmap_write(regmap,
133 desc->regs_offset +
134 ATMEL_HLCDC_LAYER_PLANE_NEXT(i),
135 dscr->next);
136 }
137
138 action |= ATMEL_HLCDC_LAYER_DMA_CHAN;
139 dma->status = ATMEL_HLCDC_LAYER_ENABLED;
140 } else {
141 for (i = 0; i < fb_flip->ngems; i++) {
142 dscr = fb_flip->dscrs[i];
143 dscr->ctrl = ATMEL_HLCDC_LAYER_DFETCH |
144 ATMEL_HLCDC_LAYER_DMA_IRQ |
145 ATMEL_HLCDC_LAYER_DSCR_IRQ |
146 ATMEL_HLCDC_LAYER_DONE_IRQ;
147
148 regmap_write(regmap,
149 desc->regs_offset +
150 ATMEL_HLCDC_LAYER_PLANE_HEAD(i),
151 dscr->next);
152 }
153
154 action |= ATMEL_HLCDC_LAYER_A2Q;
155 }
156
157 /* Release unneeded descriptors */
158 for (i = fb_flip->ngems; i < layer->max_planes; i++) {
159 fb_flip->dscrs[i]->status = 0;
160 fb_flip->dscrs[i] = NULL;
161 }
162
163 dma->queue = fb_flip;
164 slot->fb_flip = NULL;
165
166 apply:
167 if (action)
168 regmap_write(regmap,
169 desc->regs_offset + ATMEL_HLCDC_LAYER_CHER,
170 action);
171
172 atmel_hlcdc_layer_update_reset(layer, upd->pending);
173
174 upd->pending = -1;
175 }
176
177 void atmel_hlcdc_layer_irq(struct atmel_hlcdc_layer *layer)
178 {
179 struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
180 const struct atmel_hlcdc_layer_desc *desc = layer->desc;
181 struct regmap *regmap = layer->hlcdc->regmap;
182 struct atmel_hlcdc_layer_fb_flip *flip;
183 unsigned long flags;
184 unsigned int isr, imr;
185 unsigned int status;
186 unsigned int plane_status;
187 u32 flip_status;
188
189 int i;
190
191 regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IMR, &imr);
192 regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, &isr);
193 status = imr & isr;
194 if (!status)
195 return;
196
197 spin_lock_irqsave(&layer->lock, flags);
198
199 flip = dma->queue ? dma->queue : dma->cur;
200
201 if (!flip) {
202 spin_unlock_irqrestore(&layer->lock, flags);
203 return;
204 }
205
206 /*
207 * Set LOADED and DONE flags: they'll be cleared if at least one
208 * memory plane is not LOADED or DONE.
209 */
210 flip_status = ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED |
211 ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE;
212 for (i = 0; i < flip->ngems; i++) {
213 plane_status = (status >> (8 * i));
214
215 if (plane_status &
216 (ATMEL_HLCDC_LAYER_ADD_IRQ |
217 ATMEL_HLCDC_LAYER_DSCR_IRQ) &
218 ~flip->dscrs[i]->ctrl) {
219 flip->dscrs[i]->status |=
220 ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED;
221 flip->dscrs[i]->ctrl |=
222 ATMEL_HLCDC_LAYER_ADD_IRQ |
223 ATMEL_HLCDC_LAYER_DSCR_IRQ;
224 }
225
226 if (plane_status &
227 ATMEL_HLCDC_LAYER_DONE_IRQ &
228 ~flip->dscrs[i]->ctrl) {
229 flip->dscrs[i]->status |=
230 ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE;
231 flip->dscrs[i]->ctrl |=
232 ATMEL_HLCDC_LAYER_DONE_IRQ;
233 }
234
235 if (plane_status & ATMEL_HLCDC_LAYER_OVR_IRQ)
236 flip->dscrs[i]->status |=
237 ATMEL_HLCDC_DMA_CHANNEL_DSCR_OVERRUN;
238
239 /*
240 * Clear LOADED and DONE flags if the memory plane is either
241 * not LOADED or not DONE.
242 */
243 if (!(flip->dscrs[i]->status &
244 ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED))
245 flip_status &= ~ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED;
246
247 if (!(flip->dscrs[i]->status &
248 ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE))
249 flip_status &= ~ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE;
250
251 /*
252 * An overrun on one memory plane impact the whole framebuffer
253 * transfer, hence we set the OVERRUN flag as soon as there's
254 * one memory plane reporting such an overrun.
255 */
256 flip_status |= flip->dscrs[i]->status &
257 ATMEL_HLCDC_DMA_CHANNEL_DSCR_OVERRUN;
258 }
259
260 /* Get changed bits */
261 flip_status ^= flip->status;
262 flip->status |= flip_status;
263
264 if (flip_status & ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED) {
265 atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->cur);
266 dma->cur = dma->queue;
267 dma->queue = NULL;
268 }
269
270 if (flip_status & ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE) {
271 atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->cur);
272 dma->cur = NULL;
273 }
274
275 if (flip_status & ATMEL_HLCDC_DMA_CHANNEL_DSCR_OVERRUN) {
276 regmap_write(regmap,
277 desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR,
278 ATMEL_HLCDC_LAYER_RST);
279 if (dma->queue)
280 atmel_hlcdc_layer_fb_flip_release_queue(layer,
281 dma->queue);
282
283 if (dma->cur)
284 atmel_hlcdc_layer_fb_flip_release_queue(layer,
285 dma->cur);
286
287 dma->cur = NULL;
288 dma->queue = NULL;
289 }
290
291 if (!dma->queue) {
292 atmel_hlcdc_layer_update_apply(layer);
293
294 if (!dma->cur)
295 dma->status = ATMEL_HLCDC_LAYER_DISABLED;
296 }
297
298 spin_unlock_irqrestore(&layer->lock, flags);
299 }
300
301 int atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer)
302 {
303 struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
304 struct atmel_hlcdc_layer_update *upd = &layer->update;
305 struct regmap *regmap = layer->hlcdc->regmap;
306 const struct atmel_hlcdc_layer_desc *desc = layer->desc;
307 unsigned long flags;
308 unsigned int isr;
309
310 spin_lock_irqsave(&layer->lock, flags);
311
312 /* Disable the layer */
313 regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR,
314 ATMEL_HLCDC_LAYER_RST | ATMEL_HLCDC_LAYER_A2Q |
315 ATMEL_HLCDC_LAYER_UPDATE);
316
317 /* Clear all pending interrupts */
318 regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, &isr);
319
320 /* Discard current and queued framebuffer transfers. */
321 if (dma->cur) {
322 atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->cur);
323 dma->cur = NULL;
324 }
325
326 if (dma->queue) {
327 atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->queue);
328 dma->queue = NULL;
329 }
330
331 /*
332 * Then discard the pending update request (if any) to prevent
333 * DMA irq handler from restarting the DMA channel after it has
334 * been disabled.
335 */
336 if (upd->pending >= 0) {
337 atmel_hlcdc_layer_update_reset(layer, upd->pending);
338 upd->pending = -1;
339 }
340
341 dma->status = ATMEL_HLCDC_LAYER_DISABLED;
342
343 spin_unlock_irqrestore(&layer->lock, flags);
344
345 return 0;
346 }
347
348 int atmel_hlcdc_layer_update_start(struct atmel_hlcdc_layer *layer)
349 {
350 struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
351 struct atmel_hlcdc_layer_update *upd = &layer->update;
352 struct regmap *regmap = layer->hlcdc->regmap;
353 struct atmel_hlcdc_layer_fb_flip *fb_flip;
354 struct atmel_hlcdc_layer_update_slot *slot;
355 unsigned long flags;
356 int i, j = 0;
357
358 fb_flip = kzalloc(sizeof(*fb_flip), GFP_KERNEL);
359 if (!fb_flip)
360 return -ENOMEM;
361
362 fb_flip->task = drm_flip_work_allocate_task(fb_flip, GFP_KERNEL);
363 if (!fb_flip->task) {
364 kfree(fb_flip);
365 return -ENOMEM;
366 }
367
368 spin_lock_irqsave(&layer->lock, flags);
369
370 upd->next = upd->pending ? 0 : 1;
371
372 slot = &upd->slots[upd->next];
373
374 for (i = 0; i < layer->max_planes * 4; i++) {
375 if (!dma->dscrs[i].status) {
376 fb_flip->dscrs[j++] = &dma->dscrs[i];
377 dma->dscrs[i].status =
378 ATMEL_HLCDC_DMA_CHANNEL_DSCR_RESERVED;
379 if (j == layer->max_planes)
380 break;
381 }
382 }
383
384 if (j < layer->max_planes) {
385 for (i = 0; i < j; i++)
386 fb_flip->dscrs[i]->status = 0;
387 }
388
389 if (j < layer->max_planes) {
390 spin_unlock_irqrestore(&layer->lock, flags);
391 atmel_hlcdc_layer_fb_flip_destroy(fb_flip);
392 return -EBUSY;
393 }
394
395 slot->fb_flip = fb_flip;
396
397 if (upd->pending >= 0) {
398 memcpy(slot->configs,
399 upd->slots[upd->pending].configs,
400 layer->desc->nconfigs * sizeof(u32));
401 memcpy(slot->updated_configs,
402 upd->slots[upd->pending].updated_configs,
403 DIV_ROUND_UP(layer->desc->nconfigs,
404 BITS_PER_BYTE * sizeof(unsigned long)) *
405 sizeof(unsigned long));
406 slot->fb_flip->fb = upd->slots[upd->pending].fb_flip->fb;
407 if (upd->slots[upd->pending].fb_flip->fb) {
408 slot->fb_flip->fb =
409 upd->slots[upd->pending].fb_flip->fb;
410 slot->fb_flip->ngems =
411 upd->slots[upd->pending].fb_flip->ngems;
412 drm_framebuffer_reference(slot->fb_flip->fb);
413 }
414 } else {
415 regmap_bulk_read(regmap,
416 layer->desc->regs_offset +
417 ATMEL_HLCDC_LAYER_CFG(layer, 0),
418 upd->slots[upd->next].configs,
419 layer->desc->nconfigs);
420 }
421
422 spin_unlock_irqrestore(&layer->lock, flags);
423
424 return 0;
425 }
426
427 void atmel_hlcdc_layer_update_rollback(struct atmel_hlcdc_layer *layer)
428 {
429 struct atmel_hlcdc_layer_update *upd = &layer->update;
430
431 atmel_hlcdc_layer_update_reset(layer, upd->next);
432 upd->next = -1;
433 }
434
435 void atmel_hlcdc_layer_update_set_fb(struct atmel_hlcdc_layer *layer,
436 struct drm_framebuffer *fb,
437 unsigned int *offsets)
438 {
439 struct atmel_hlcdc_layer_update *upd = &layer->update;
440 struct atmel_hlcdc_layer_fb_flip *fb_flip;
441 struct atmel_hlcdc_layer_update_slot *slot;
442 struct atmel_hlcdc_dma_channel_dscr *dscr;
443 struct drm_framebuffer *old_fb;
444 int nplanes = 0;
445 int i;
446
447 if (upd->next < 0 || upd->next > 1)
448 return;
449
450 if (fb)
451 nplanes = drm_format_num_planes(fb->pixel_format);
452
453 if (nplanes > layer->max_planes)
454 return;
455
456 slot = &upd->slots[upd->next];
457
458 fb_flip = slot->fb_flip;
459 old_fb = slot->fb_flip->fb;
460
461 for (i = 0; i < nplanes; i++) {
462 struct drm_gem_cma_object *gem;
463
464 dscr = slot->fb_flip->dscrs[i];
465 gem = drm_fb_cma_get_gem_obj(fb, i);
466 dscr->addr = gem->paddr + offsets[i];
467 }
468
469 fb_flip->ngems = nplanes;
470 fb_flip->fb = fb;
471
472 if (fb)
473 drm_framebuffer_reference(fb);
474
475 if (old_fb)
476 drm_framebuffer_unreference(old_fb);
477 }
478
479 void atmel_hlcdc_layer_update_cfg(struct atmel_hlcdc_layer *layer, int cfg,
480 u32 mask, u32 val)
481 {
482 struct atmel_hlcdc_layer_update *upd = &layer->update;
483 struct atmel_hlcdc_layer_update_slot *slot;
484
485 if (upd->next < 0 || upd->next > 1)
486 return;
487
488 if (cfg >= layer->desc->nconfigs)
489 return;
490
491 slot = &upd->slots[upd->next];
492 slot->configs[cfg] &= ~mask;
493 slot->configs[cfg] |= (val & mask);
494 set_bit(cfg, slot->updated_configs);
495 }
496
497 void atmel_hlcdc_layer_update_commit(struct atmel_hlcdc_layer *layer)
498 {
499 struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
500 struct atmel_hlcdc_layer_update *upd = &layer->update;
501 struct atmel_hlcdc_layer_update_slot *slot;
502 unsigned long flags;
503
504 if (upd->next < 0 || upd->next > 1)
505 return;
506
507 slot = &upd->slots[upd->next];
508
509 spin_lock_irqsave(&layer->lock, flags);
510
511 /*
512 * Release pending update request and replace it by the new one.
513 */
514 if (upd->pending >= 0)
515 atmel_hlcdc_layer_update_reset(layer, upd->pending);
516
517 upd->pending = upd->next;
518 upd->next = -1;
519
520 if (!dma->queue)
521 atmel_hlcdc_layer_update_apply(layer);
522
523 spin_unlock_irqrestore(&layer->lock, flags);
524
525
526 upd->next = -1;
527 }
528
529 static int atmel_hlcdc_layer_dma_init(struct drm_device *dev,
530 struct atmel_hlcdc_layer *layer)
531 {
532 struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
533 dma_addr_t dma_addr;
534 int i;
535
536 dma->dscrs = dma_alloc_coherent(dev->dev,
537 layer->max_planes * 4 *
538 sizeof(*dma->dscrs),
539 &dma_addr, GFP_KERNEL);
540 if (!dma->dscrs)
541 return -ENOMEM;
542
543 for (i = 0; i < layer->max_planes * 4; i++) {
544 struct atmel_hlcdc_dma_channel_dscr *dscr = &dma->dscrs[i];
545
546 dscr->next = dma_addr + (i * sizeof(*dscr));
547 }
548
549 return 0;
550 }
551
552 static void atmel_hlcdc_layer_dma_cleanup(struct drm_device *dev,
553 struct atmel_hlcdc_layer *layer)
554 {
555 struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
556 int i;
557
558 for (i = 0; i < layer->max_planes * 4; i++) {
559 struct atmel_hlcdc_dma_channel_dscr *dscr = &dma->dscrs[i];
560
561 dscr->status = 0;
562 }
563
564 dma_free_coherent(dev->dev, layer->max_planes * 4 *
565 sizeof(*dma->dscrs), dma->dscrs,
566 dma->dscrs[0].next);
567 }
568
569 static int atmel_hlcdc_layer_update_init(struct drm_device *dev,
570 struct atmel_hlcdc_layer *layer,
571 const struct atmel_hlcdc_layer_desc *desc)
572 {
573 struct atmel_hlcdc_layer_update *upd = &layer->update;
574 int updated_size;
575 void *buffer;
576 int i;
577
578 updated_size = DIV_ROUND_UP(desc->nconfigs,
579 BITS_PER_BYTE *
580 sizeof(unsigned long));
581
582 buffer = devm_kzalloc(dev->dev,
583 ((desc->nconfigs * sizeof(u32)) +
584 (updated_size * sizeof(unsigned long))) * 2,
585 GFP_KERNEL);
586 if (!buffer)
587 return -ENOMEM;
588
589 for (i = 0; i < 2; i++) {
590 upd->slots[i].updated_configs = buffer;
591 buffer += updated_size * sizeof(unsigned long);
592 upd->slots[i].configs = buffer;
593 buffer += desc->nconfigs * sizeof(u32);
594 }
595
596 upd->pending = -1;
597 upd->next = -1;
598
599 return 0;
600 }
601
602 int atmel_hlcdc_layer_init(struct drm_device *dev,
603 struct atmel_hlcdc_layer *layer,
604 const struct atmel_hlcdc_layer_desc *desc)
605 {
606 struct atmel_hlcdc_dc *dc = dev->dev_private;
607 struct regmap *regmap = dc->hlcdc->regmap;
608 unsigned int tmp;
609 int ret;
610 int i;
611
612 layer->hlcdc = dc->hlcdc;
613 layer->wq = dc->wq;
614 layer->desc = desc;
615
616 regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR,
617 ATMEL_HLCDC_LAYER_RST);
618 for (i = 0; i < desc->formats->nformats; i++) {
619 int nplanes = drm_format_num_planes(desc->formats->formats[i]);
620
621 if (nplanes > layer->max_planes)
622 layer->max_planes = nplanes;
623 }
624
625 spin_lock_init(&layer->lock);
626 drm_flip_work_init(&layer->gc, desc->name,
627 atmel_hlcdc_layer_fb_flip_release);
628 ret = atmel_hlcdc_layer_dma_init(dev, layer);
629 if (ret)
630 return ret;
631
632 ret = atmel_hlcdc_layer_update_init(dev, layer, desc);
633 if (ret)
634 return ret;
635
636 /* Flush Status Register */
637 regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IDR,
638 0xffffffff);
639 regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR,
640 &tmp);
641
642 tmp = 0;
643 for (i = 0; i < layer->max_planes; i++)
644 tmp |= (ATMEL_HLCDC_LAYER_DMA_IRQ |
645 ATMEL_HLCDC_LAYER_DSCR_IRQ |
646 ATMEL_HLCDC_LAYER_ADD_IRQ |
647 ATMEL_HLCDC_LAYER_DONE_IRQ |
648 ATMEL_HLCDC_LAYER_OVR_IRQ) << (8 * i);
649
650 regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IER, tmp);
651
652 return 0;
653 }
654
655 void atmel_hlcdc_layer_cleanup(struct drm_device *dev,
656 struct atmel_hlcdc_layer *layer)
657 {
658 const struct atmel_hlcdc_layer_desc *desc = layer->desc;
659 struct regmap *regmap = layer->hlcdc->regmap;
660
661 regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IDR,
662 0xffffffff);
663 regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR,
664 ATMEL_HLCDC_LAYER_RST);
665
666 atmel_hlcdc_layer_dma_cleanup(dev, layer);
667 drm_flip_work_cleanup(&layer->gc);
668 }