]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/media/pci/intel/ipu-psys.c
UBUNTU: SAUCE: IPU6 driver release for kernel 5.13
[mirror_ubuntu-jammy-kernel.git] / drivers / media / pci / intel / ipu-psys.c
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2013 - 2020 Intel Corporation
3
4 #include <linux/debugfs.h>
5 #include <linux/delay.h>
6 #include <linux/device.h>
7 #include <linux/dma-buf.h>
8 #include <linux/firmware.h>
9 #include <linux/fs.h>
10 #include <linux/highmem.h>
11 #include <linux/init_task.h>
12 #include <linux/kthread.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/version.h>
17 #include <linux/poll.h>
18 #include <uapi/linux/sched/types.h>
19 #include <linux/uaccess.h>
20 #include <linux/vmalloc.h>
21 #include <linux/dma-mapping.h>
22
23 #include <uapi/linux/ipu-psys.h>
24
25 #include "ipu.h"
26 #include "ipu-mmu.h"
27 #include "ipu-bus.h"
28 #include "ipu-platform.h"
29 #include "ipu-buttress.h"
30 #include "ipu-cpd.h"
31 #include "ipu-fw-psys.h"
32 #include "ipu-psys.h"
33 #include "ipu-platform-psys.h"
34 #include "ipu-platform-regs.h"
35 #include "ipu-fw-com.h"
36
37 static bool async_fw_init;
38 module_param(async_fw_init, bool, 0664);
39 MODULE_PARM_DESC(async_fw_init, "Enable asynchronous firmware initialization");
40
41 #define IPU_PSYS_NUM_DEVICES 4
42 #define IPU_PSYS_AUTOSUSPEND_DELAY 2000
43
44 #ifdef CONFIG_PM
45 static int psys_runtime_pm_resume(struct device *dev);
46 static int psys_runtime_pm_suspend(struct device *dev);
47 #else
48 #define pm_runtime_dont_use_autosuspend(d)
49 #define pm_runtime_use_autosuspend(d)
50 #define pm_runtime_set_autosuspend_delay(d, f) 0
51 #define pm_runtime_get_sync(d) 0
52 #define pm_runtime_put(d) 0
53 #define pm_runtime_put_sync(d) 0
54 #define pm_runtime_put_noidle(d) 0
55 #define pm_runtime_put_autosuspend(d) 0
56 #endif
57
58 static dev_t ipu_psys_dev_t;
59 static DECLARE_BITMAP(ipu_psys_devices, IPU_PSYS_NUM_DEVICES);
60 static DEFINE_MUTEX(ipu_psys_mutex);
61
62 static struct fw_init_task {
63 struct delayed_work work;
64 struct ipu_psys *psys;
65 } fw_init_task;
66
67 static void ipu_psys_remove(struct ipu_bus_device *adev);
68
69 static struct bus_type ipu_psys_bus = {
70 .name = IPU_PSYS_NAME,
71 };
72
73 struct ipu_psys_pg *__get_pg_buf(struct ipu_psys *psys, size_t pg_size)
74 {
75 struct ipu_psys_pg *kpg;
76 unsigned long flags;
77
78 spin_lock_irqsave(&psys->pgs_lock, flags);
79 list_for_each_entry(kpg, &psys->pgs, list) {
80 if (!kpg->pg_size && kpg->size >= pg_size) {
81 kpg->pg_size = pg_size;
82 spin_unlock_irqrestore(&psys->pgs_lock, flags);
83 return kpg;
84 }
85 }
86 spin_unlock_irqrestore(&psys->pgs_lock, flags);
87 /* no big enough buffer available, allocate new one */
88 kpg = kzalloc(sizeof(*kpg), GFP_KERNEL);
89 if (!kpg)
90 return NULL;
91
92 kpg->pg = dma_alloc_attrs(&psys->adev->dev, pg_size,
93 &kpg->pg_dma_addr, GFP_KERNEL, 0);
94 if (!kpg->pg) {
95 kfree(kpg);
96 return NULL;
97 }
98
99 kpg->pg_size = pg_size;
100 kpg->size = pg_size;
101 spin_lock_irqsave(&psys->pgs_lock, flags);
102 list_add(&kpg->list, &psys->pgs);
103 spin_unlock_irqrestore(&psys->pgs_lock, flags);
104
105 return kpg;
106 }
107
108 static int ipu_psys_unmapbuf_locked(int fd, struct ipu_psys_fh *fh,
109 struct ipu_psys_kbuffer *kbuf);
110 struct ipu_psys_kbuffer *ipu_psys_lookup_kbuffer(struct ipu_psys_fh *fh, int fd)
111 {
112 struct ipu_psys_kbuffer *kbuf;
113
114 list_for_each_entry(kbuf, &fh->bufmap, list) {
115 if (kbuf->fd == fd)
116 return kbuf;
117 }
118
119 return NULL;
120 }
121
122 struct ipu_psys_kbuffer *
123 ipu_psys_lookup_kbuffer_by_kaddr(struct ipu_psys_fh *fh, void *kaddr)
124 {
125 struct ipu_psys_kbuffer *kbuffer;
126
127 list_for_each_entry(kbuffer, &fh->bufmap, list) {
128 if (kbuffer->kaddr == kaddr)
129 return kbuffer;
130 }
131
132 return NULL;
133 }
134
135 static int ipu_psys_get_userpages(struct ipu_dma_buf_attach *attach)
136 {
137 struct vm_area_struct *vma;
138 unsigned long start, end;
139 int npages, array_size;
140 struct page **pages;
141 struct sg_table *sgt;
142 int nr = 0;
143 int ret = -ENOMEM;
144
145 start = (unsigned long)attach->userptr;
146 end = PAGE_ALIGN(start + attach->len);
147 npages = (end - (start & PAGE_MASK)) >> PAGE_SHIFT;
148 array_size = npages * sizeof(struct page *);
149
150 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
151 if (!sgt)
152 return -ENOMEM;
153
154 if (attach->npages != 0) {
155 pages = attach->pages;
156 npages = attach->npages;
157 attach->vma_is_io = 1;
158 goto skip_pages;
159 }
160
161 pages = kvzalloc(array_size, GFP_KERNEL);
162 if (!pages)
163 goto free_sgt;
164
165 mmap_read_lock(current->mm);
166 vma = find_vma(current->mm, start);
167 if (!vma) {
168 ret = -EFAULT;
169 goto error_up_read;
170 }
171
172 /*
173 * For buffers from Gralloc, VM_PFNMAP is expected,
174 * but VM_IO is set. Possibly bug in Gralloc.
175 */
176 attach->vma_is_io = vma->vm_flags & (VM_IO | VM_PFNMAP);
177
178 if (attach->vma_is_io) {
179 unsigned long io_start = start;
180
181 if (vma->vm_end < start + attach->len) {
182 dev_err(attach->dev,
183 "vma at %lu is too small for %llu bytes\n",
184 start, attach->len);
185 ret = -EFAULT;
186 goto error_up_read;
187 }
188
189 for (nr = 0; nr < npages; nr++, io_start += PAGE_SIZE) {
190 unsigned long pfn;
191
192 ret = follow_pfn(vma, io_start, &pfn);
193 if (ret)
194 goto error_up_read;
195 pages[nr] = pfn_to_page(pfn);
196 }
197 } else {
198 nr = get_user_pages(start & PAGE_MASK, npages,
199 FOLL_WRITE,
200 pages, NULL);
201 if (nr < npages)
202 goto error_up_read;
203 }
204 mmap_read_unlock(current->mm);
205
206 attach->pages = pages;
207 attach->npages = npages;
208
209 skip_pages:
210 ret = sg_alloc_table_from_pages(sgt, pages, npages,
211 start & ~PAGE_MASK, attach->len,
212 GFP_KERNEL);
213 if (ret < 0)
214 goto error;
215
216 attach->sgt = sgt;
217
218 return 0;
219
220 error_up_read:
221 mmap_read_unlock(current->mm);
222 error:
223 if (!attach->vma_is_io)
224 while (nr > 0)
225 put_page(pages[--nr]);
226
227 if (array_size <= PAGE_SIZE)
228 kfree(pages);
229 else
230 vfree(pages);
231 free_sgt:
232 kfree(sgt);
233
234 dev_err(attach->dev, "failed to get userpages:%d\n", ret);
235
236 return ret;
237 }
238
239 static void ipu_psys_put_userpages(struct ipu_dma_buf_attach *attach)
240 {
241 if (!attach || !attach->userptr || !attach->sgt)
242 return;
243
244 if (!attach->vma_is_io) {
245 int i = attach->npages;
246
247 while (--i >= 0) {
248 set_page_dirty_lock(attach->pages[i]);
249 put_page(attach->pages[i]);
250 }
251 }
252
253 kvfree(attach->pages);
254
255 sg_free_table(attach->sgt);
256 kfree(attach->sgt);
257 attach->sgt = NULL;
258 }
259
260 static int ipu_dma_buf_attach(struct dma_buf *dbuf,
261 struct dma_buf_attachment *attach)
262 {
263 struct ipu_psys_kbuffer *kbuf = dbuf->priv;
264 struct ipu_dma_buf_attach *ipu_attach;
265
266 ipu_attach = kzalloc(sizeof(*ipu_attach), GFP_KERNEL);
267 if (!ipu_attach)
268 return -ENOMEM;
269
270 ipu_attach->len = kbuf->len;
271 ipu_attach->userptr = kbuf->userptr;
272
273 attach->priv = ipu_attach;
274 return 0;
275 }
276
277 static void ipu_dma_buf_detach(struct dma_buf *dbuf,
278 struct dma_buf_attachment *attach)
279 {
280 struct ipu_dma_buf_attach *ipu_attach = attach->priv;
281
282 kfree(ipu_attach);
283 attach->priv = NULL;
284 }
285
286 static struct sg_table *ipu_dma_buf_map(struct dma_buf_attachment *attach,
287 enum dma_data_direction dir)
288 {
289 struct ipu_dma_buf_attach *ipu_attach = attach->priv;
290 unsigned long attrs;
291 int ret;
292
293 ret = ipu_psys_get_userpages(ipu_attach);
294 if (ret)
295 return NULL;
296
297 attrs = DMA_ATTR_SKIP_CPU_SYNC;
298 ret = dma_map_sg_attrs(attach->dev, ipu_attach->sgt->sgl,
299 ipu_attach->sgt->orig_nents, dir, attrs);
300 if (ret < ipu_attach->sgt->orig_nents) {
301 ipu_psys_put_userpages(ipu_attach);
302 dev_dbg(attach->dev, "buf map failed\n");
303
304 return ERR_PTR(-EIO);
305 }
306
307 /*
308 * Initial cache flush to avoid writing dirty pages for buffers which
309 * are later marked as IPU_BUFFER_FLAG_NO_FLUSH.
310 */
311 dma_sync_sg_for_device(attach->dev, ipu_attach->sgt->sgl,
312 ipu_attach->sgt->orig_nents, DMA_BIDIRECTIONAL);
313
314 return ipu_attach->sgt;
315 }
316
317 static void ipu_dma_buf_unmap(struct dma_buf_attachment *attach,
318 struct sg_table *sg, enum dma_data_direction dir)
319 {
320 struct ipu_dma_buf_attach *ipu_attach = attach->priv;
321
322 dma_unmap_sg(attach->dev, sg->sgl, sg->orig_nents, dir);
323 ipu_psys_put_userpages(ipu_attach);
324 }
325
326 static int ipu_dma_buf_mmap(struct dma_buf *dbuf, struct vm_area_struct *vma)
327 {
328 return -ENOTTY;
329 }
330
331 static void ipu_dma_buf_release(struct dma_buf *buf)
332 {
333 struct ipu_psys_kbuffer *kbuf = buf->priv;
334
335 if (!kbuf)
336 return;
337
338 if (kbuf->db_attach) {
339 dev_dbg(kbuf->db_attach->dev,
340 "releasing buffer %d\n", kbuf->fd);
341 ipu_psys_put_userpages(kbuf->db_attach->priv);
342 }
343 kfree(kbuf);
344 }
345
346 static int ipu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
347 enum dma_data_direction dir)
348 {
349 return -ENOTTY;
350 }
351
352 static int ipu_dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
353 {
354 struct dma_buf_attachment *attach;
355 struct ipu_dma_buf_attach *ipu_attach;
356
357 if (list_empty(&dmabuf->attachments))
358 return -EINVAL;
359
360 attach = list_last_entry(&dmabuf->attachments,
361 struct dma_buf_attachment, node);
362 ipu_attach = attach->priv;
363
364 if (!ipu_attach || !ipu_attach->pages || !ipu_attach->npages)
365 return -EINVAL;
366
367 map->vaddr = vm_map_ram(ipu_attach->pages, ipu_attach->npages, 0);
368 map->is_iomem = false;
369 if (!map->vaddr)
370 return -EINVAL;
371
372 return 0;
373 }
374
375 static void ipu_dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
376 {
377 struct dma_buf_attachment *attach;
378 struct ipu_dma_buf_attach *ipu_attach;
379
380 if (WARN_ON(list_empty(&dmabuf->attachments)))
381 return;
382
383 attach = list_last_entry(&dmabuf->attachments,
384 struct dma_buf_attachment, node);
385 ipu_attach = attach->priv;
386
387 if (WARN_ON(!ipu_attach || !ipu_attach->pages || !ipu_attach->npages))
388 return;
389
390 vm_unmap_ram(map->vaddr, ipu_attach->npages);
391 }
392
393 struct dma_buf_ops ipu_dma_buf_ops = {
394 .attach = ipu_dma_buf_attach,
395 .detach = ipu_dma_buf_detach,
396 .map_dma_buf = ipu_dma_buf_map,
397 .unmap_dma_buf = ipu_dma_buf_unmap,
398 .release = ipu_dma_buf_release,
399 .begin_cpu_access = ipu_dma_buf_begin_cpu_access,
400 .mmap = ipu_dma_buf_mmap,
401 .vmap = ipu_dma_buf_vmap,
402 .vunmap = ipu_dma_buf_vunmap,
403 };
404
405 static int ipu_psys_open(struct inode *inode, struct file *file)
406 {
407 struct ipu_psys *psys = inode_to_ipu_psys(inode);
408 struct ipu_device *isp = psys->adev->isp;
409 struct ipu_psys_fh *fh;
410 int rval;
411
412 if (isp->flr_done)
413 return -EIO;
414
415 fh = kzalloc(sizeof(*fh), GFP_KERNEL);
416 if (!fh)
417 return -ENOMEM;
418
419 fh->psys = psys;
420
421 file->private_data = fh;
422
423 mutex_init(&fh->mutex);
424 INIT_LIST_HEAD(&fh->bufmap);
425 init_waitqueue_head(&fh->wait);
426
427 rval = ipu_psys_fh_init(fh);
428 if (rval)
429 goto open_failed;
430
431 mutex_lock(&psys->mutex);
432 list_add_tail(&fh->list, &psys->fhs);
433 mutex_unlock(&psys->mutex);
434
435 return 0;
436
437 open_failed:
438 mutex_destroy(&fh->mutex);
439 kfree(fh);
440 return rval;
441 }
442
443 static inline void ipu_psys_kbuf_unmap(struct ipu_psys_kbuffer *kbuf)
444 {
445 if (!kbuf)
446 return;
447
448 kbuf->valid = false;
449 if (kbuf->kaddr) {
450 struct dma_buf_map dmap;
451
452 dma_buf_map_set_vaddr(&dmap, kbuf->kaddr);
453 dma_buf_vunmap(kbuf->dbuf, &dmap);
454 }
455 if (kbuf->sgt)
456 dma_buf_unmap_attachment(kbuf->db_attach,
457 kbuf->sgt,
458 DMA_BIDIRECTIONAL);
459 if (kbuf->db_attach)
460 dma_buf_detach(kbuf->dbuf, kbuf->db_attach);
461 dma_buf_put(kbuf->dbuf);
462
463 kbuf->db_attach = NULL;
464 kbuf->dbuf = NULL;
465 kbuf->sgt = NULL;
466 }
467
468 static int ipu_psys_release(struct inode *inode, struct file *file)
469 {
470 struct ipu_psys *psys = inode_to_ipu_psys(inode);
471 struct ipu_psys_fh *fh = file->private_data;
472 struct ipu_psys_kbuffer *kbuf, *kbuf0;
473 struct dma_buf_attachment *db_attach;
474
475 mutex_lock(&fh->mutex);
476 /* clean up buffers */
477 if (!list_empty(&fh->bufmap)) {
478 list_for_each_entry_safe(kbuf, kbuf0, &fh->bufmap, list) {
479 list_del(&kbuf->list);
480 db_attach = kbuf->db_attach;
481
482 /* Unmap and release buffers */
483 if (kbuf->dbuf && db_attach) {
484
485 ipu_psys_kbuf_unmap(kbuf);
486 } else {
487 if (db_attach)
488 ipu_psys_put_userpages(db_attach->priv);
489 kfree(kbuf);
490 }
491 }
492 }
493 mutex_unlock(&fh->mutex);
494
495 mutex_lock(&psys->mutex);
496 list_del(&fh->list);
497
498 mutex_unlock(&psys->mutex);
499 ipu_psys_fh_deinit(fh);
500
501 mutex_lock(&psys->mutex);
502 if (list_empty(&psys->fhs))
503 psys->power_gating = 0;
504 mutex_unlock(&psys->mutex);
505 mutex_destroy(&fh->mutex);
506 kfree(fh);
507
508 return 0;
509 }
510
511 static int ipu_psys_getbuf(struct ipu_psys_buffer *buf, struct ipu_psys_fh *fh)
512 {
513 struct ipu_psys_kbuffer *kbuf;
514 struct ipu_psys *psys = fh->psys;
515
516 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
517 struct dma_buf *dbuf;
518 int ret;
519
520 if (!buf->base.userptr) {
521 dev_err(&psys->adev->dev, "Buffer allocation not supported\n");
522 return -EINVAL;
523 }
524
525 kbuf = kzalloc(sizeof(*kbuf), GFP_KERNEL);
526 if (!kbuf)
527 return -ENOMEM;
528
529 kbuf->len = buf->len;
530 kbuf->userptr = buf->base.userptr;
531 kbuf->flags = buf->flags;
532
533 exp_info.ops = &ipu_dma_buf_ops;
534 exp_info.size = kbuf->len;
535 exp_info.flags = O_RDWR;
536 exp_info.priv = kbuf;
537
538 dbuf = dma_buf_export(&exp_info);
539 if (IS_ERR(dbuf)) {
540 kfree(kbuf);
541 return PTR_ERR(dbuf);
542 }
543
544 ret = dma_buf_fd(dbuf, 0);
545 if (ret < 0) {
546 kfree(kbuf);
547 dma_buf_put(dbuf);
548 return ret;
549 }
550
551 kbuf->fd = ret;
552 buf->base.fd = ret;
553 kbuf->flags = buf->flags &= ~IPU_BUFFER_FLAG_USERPTR;
554 kbuf->flags = buf->flags |= IPU_BUFFER_FLAG_DMA_HANDLE;
555
556 mutex_lock(&fh->mutex);
557 list_add(&kbuf->list, &fh->bufmap);
558 mutex_unlock(&fh->mutex);
559
560 dev_dbg(&psys->adev->dev, "IOC_GETBUF: userptr %p size %llu to fd %d",
561 buf->base.userptr, buf->len, buf->base.fd);
562
563 return 0;
564 }
565
566 static int ipu_psys_putbuf(struct ipu_psys_buffer *buf, struct ipu_psys_fh *fh)
567 {
568 return 0;
569 }
570
571 int ipu_psys_mapbuf_locked(int fd, struct ipu_psys_fh *fh,
572 struct ipu_psys_kbuffer *kbuf)
573 {
574 struct ipu_psys *psys = fh->psys;
575 struct dma_buf *dbuf;
576 struct dma_buf_map dmap;
577 int ret;
578
579 dbuf = dma_buf_get(fd);
580 if (IS_ERR(dbuf))
581 return -EINVAL;
582
583 if (!kbuf) {
584 /* This fd isn't generated by ipu_psys_getbuf, it
585 * is a new fd. Create a new kbuf item for this fd, and
586 * add this kbuf to bufmap list.
587 */
588 kbuf = kzalloc(sizeof(*kbuf), GFP_KERNEL);
589 if (!kbuf) {
590 ret = -ENOMEM;
591 goto mapbuf_fail;
592 }
593
594 list_add(&kbuf->list, &fh->bufmap);
595 }
596
597 /* fd valid and found, need remap */
598 if (kbuf->dbuf && (kbuf->dbuf != dbuf || kbuf->len != dbuf->size)) {
599 dev_dbg(&psys->adev->dev,
600 "dmabuf fd %d with kbuf %p changed, need remap.\n",
601 fd, kbuf);
602 ret = ipu_psys_unmapbuf_locked(fd, fh, kbuf);
603 if (ret)
604 goto mapbuf_fail;
605
606 kbuf = ipu_psys_lookup_kbuffer(fh, fd);
607 /* changed external dmabuf */
608 if (!kbuf) {
609 kbuf = kzalloc(sizeof(*kbuf), GFP_KERNEL);
610 if (!kbuf) {
611 ret = -ENOMEM;
612 goto mapbuf_fail;
613 }
614 list_add(&kbuf->list, &fh->bufmap);
615 }
616 }
617
618 if (kbuf->sgt) {
619 dev_dbg(&psys->adev->dev, "fd %d has been mapped!\n", fd);
620 dma_buf_put(dbuf);
621 goto mapbuf_end;
622 }
623
624 kbuf->dbuf = dbuf;
625
626 if (kbuf->len == 0)
627 kbuf->len = kbuf->dbuf->size;
628
629 kbuf->fd = fd;
630
631 kbuf->db_attach = dma_buf_attach(kbuf->dbuf, &psys->adev->dev);
632 if (IS_ERR(kbuf->db_attach)) {
633 ret = PTR_ERR(kbuf->db_attach);
634 dev_dbg(&psys->adev->dev, "dma buf attach failed\n");
635 goto kbuf_map_fail;
636 }
637
638 kbuf->sgt = dma_buf_map_attachment(kbuf->db_attach, DMA_BIDIRECTIONAL);
639 if (IS_ERR_OR_NULL(kbuf->sgt)) {
640 ret = -EINVAL;
641 kbuf->sgt = NULL;
642 dev_dbg(&psys->adev->dev, "dma buf map attachment failed\n");
643 goto kbuf_map_fail;
644 }
645
646 kbuf->dma_addr = sg_dma_address(kbuf->sgt->sgl);
647
648 ret = dma_buf_vmap(kbuf->dbuf, &dmap);
649 if (ret) {
650 dev_dbg(&psys->adev->dev, "dma buf vmap failed\n");
651 goto kbuf_map_fail;
652 }
653 kbuf->kaddr = dmap.vaddr;
654
655 dev_dbg(&psys->adev->dev, "%s kbuf %p fd %d with len %llu mapped\n",
656 __func__, kbuf, fd, kbuf->len);
657 mapbuf_end:
658
659 kbuf->valid = true;
660
661 return 0;
662
663 kbuf_map_fail:
664 ipu_psys_kbuf_unmap(kbuf);
665
666 list_del(&kbuf->list);
667 if (!kbuf->userptr)
668 kfree(kbuf);
669 return ret;
670
671 mapbuf_fail:
672 dma_buf_put(dbuf);
673
674 dev_err(&psys->adev->dev, "%s failed for fd %d\n", __func__, fd);
675 return ret;
676 }
677
678 static long ipu_psys_mapbuf(int fd, struct ipu_psys_fh *fh)
679 {
680 long ret;
681 struct ipu_psys_kbuffer *kbuf;
682
683 mutex_lock(&fh->mutex);
684 kbuf = ipu_psys_lookup_kbuffer(fh, fd);
685 ret = ipu_psys_mapbuf_locked(fd, fh, kbuf);
686 mutex_unlock(&fh->mutex);
687
688 dev_dbg(&fh->psys->adev->dev, "IOC_MAPBUF ret %ld\n", ret);
689
690 return ret;
691 }
692
693 static int ipu_psys_unmapbuf_locked(int fd, struct ipu_psys_fh *fh,
694 struct ipu_psys_kbuffer *kbuf)
695 {
696 struct ipu_psys *psys = fh->psys;
697
698 if (!kbuf || fd != kbuf->fd) {
699 dev_err(&psys->adev->dev, "invalid kbuffer\n");
700 return -EINVAL;
701 }
702
703 /* From now on it is not safe to use this kbuffer */
704 ipu_psys_kbuf_unmap(kbuf);
705
706 list_del(&kbuf->list);
707
708 if (!kbuf->userptr)
709 kfree(kbuf);
710
711 dev_dbg(&psys->adev->dev, "%s fd %d unmapped\n", __func__, fd);
712
713 return 0;
714 }
715
716 static long ipu_psys_unmapbuf(int fd, struct ipu_psys_fh *fh)
717 {
718 struct ipu_psys_kbuffer *kbuf;
719 long ret;
720
721 mutex_lock(&fh->mutex);
722 kbuf = ipu_psys_lookup_kbuffer(fh, fd);
723 if (!kbuf) {
724 dev_err(&fh->psys->adev->dev,
725 "buffer with fd %d not found\n", fd);
726 mutex_unlock(&fh->mutex);
727 return -EINVAL;
728 }
729 ret = ipu_psys_unmapbuf_locked(fd, fh, kbuf);
730 mutex_unlock(&fh->mutex);
731
732 dev_dbg(&fh->psys->adev->dev, "IOC_UNMAPBUF\n");
733
734 return ret;
735 }
736
737 static unsigned int ipu_psys_poll(struct file *file,
738 struct poll_table_struct *wait)
739 {
740 struct ipu_psys_fh *fh = file->private_data;
741 struct ipu_psys *psys = fh->psys;
742 unsigned int res = 0;
743
744 dev_dbg(&psys->adev->dev, "ipu psys poll\n");
745
746 poll_wait(file, &fh->wait, wait);
747
748 if (ipu_get_completed_kcmd(fh))
749 res = POLLIN;
750
751 dev_dbg(&psys->adev->dev, "ipu psys poll res %u\n", res);
752
753 return res;
754 }
755
756 static long ipu_get_manifest(struct ipu_psys_manifest *manifest,
757 struct ipu_psys_fh *fh)
758 {
759 struct ipu_psys *psys = fh->psys;
760 struct ipu_device *isp = psys->adev->isp;
761 struct ipu_cpd_client_pkg_hdr *client_pkg;
762 u32 entries;
763 void *host_fw_data;
764 dma_addr_t dma_fw_data;
765 u32 client_pkg_offset;
766
767 host_fw_data = (void *)isp->cpd_fw->data;
768 dma_fw_data = sg_dma_address(psys->fw_sgt.sgl);
769
770 entries = ipu_cpd_pkg_dir_get_num_entries(psys->pkg_dir);
771 if (!manifest || manifest->index > entries - 1) {
772 dev_err(&psys->adev->dev, "invalid argument\n");
773 return -EINVAL;
774 }
775
776 if (!ipu_cpd_pkg_dir_get_size(psys->pkg_dir, manifest->index) ||
777 ipu_cpd_pkg_dir_get_type(psys->pkg_dir, manifest->index) <
778 IPU_CPD_PKG_DIR_CLIENT_PG_TYPE) {
779 dev_dbg(&psys->adev->dev, "invalid pkg dir entry\n");
780 return -ENOENT;
781 }
782
783 client_pkg_offset = ipu_cpd_pkg_dir_get_address(psys->pkg_dir,
784 manifest->index);
785 client_pkg_offset -= dma_fw_data;
786
787 client_pkg = host_fw_data + client_pkg_offset;
788 manifest->size = client_pkg->pg_manifest_size;
789
790 if (!manifest->manifest)
791 return 0;
792
793 if (copy_to_user(manifest->manifest,
794 (uint8_t *)client_pkg + client_pkg->pg_manifest_offs,
795 manifest->size)) {
796 return -EFAULT;
797 }
798
799 return 0;
800 }
801
802 static long ipu_psys_ioctl(struct file *file, unsigned int cmd,
803 unsigned long arg)
804 {
805 union {
806 struct ipu_psys_buffer buf;
807 struct ipu_psys_command cmd;
808 struct ipu_psys_event ev;
809 struct ipu_psys_capability caps;
810 struct ipu_psys_manifest m;
811 } karg;
812 struct ipu_psys_fh *fh = file->private_data;
813 long err = 0;
814 void __user *up = (void __user *)arg;
815 bool copy = (cmd != IPU_IOC_MAPBUF && cmd != IPU_IOC_UNMAPBUF);
816
817 if (copy) {
818 if (_IOC_SIZE(cmd) > sizeof(karg))
819 return -ENOTTY;
820
821 if (_IOC_DIR(cmd) & _IOC_WRITE) {
822 err = copy_from_user(&karg, up, _IOC_SIZE(cmd));
823 if (err)
824 return -EFAULT;
825 }
826 }
827
828 switch (cmd) {
829 case IPU_IOC_MAPBUF:
830 err = ipu_psys_mapbuf(arg, fh);
831 break;
832 case IPU_IOC_UNMAPBUF:
833 err = ipu_psys_unmapbuf(arg, fh);
834 break;
835 case IPU_IOC_QUERYCAP:
836 karg.caps = fh->psys->caps;
837 break;
838 case IPU_IOC_GETBUF:
839 err = ipu_psys_getbuf(&karg.buf, fh);
840 break;
841 case IPU_IOC_PUTBUF:
842 err = ipu_psys_putbuf(&karg.buf, fh);
843 break;
844 case IPU_IOC_QCMD:
845 err = ipu_psys_kcmd_new(&karg.cmd, fh);
846 break;
847 case IPU_IOC_DQEVENT:
848 err = ipu_ioctl_dqevent(&karg.ev, fh, file->f_flags);
849 break;
850 case IPU_IOC_GET_MANIFEST:
851 err = ipu_get_manifest(&karg.m, fh);
852 break;
853 default:
854 err = -ENOTTY;
855 break;
856 }
857
858 if (err)
859 return err;
860
861 if (copy && _IOC_DIR(cmd) & _IOC_READ)
862 if (copy_to_user(up, &karg, _IOC_SIZE(cmd)))
863 return -EFAULT;
864
865 return 0;
866 }
867
868 static const struct file_operations ipu_psys_fops = {
869 .open = ipu_psys_open,
870 .release = ipu_psys_release,
871 .unlocked_ioctl = ipu_psys_ioctl,
872 #ifdef CONFIG_COMPAT
873 .compat_ioctl = ipu_psys_compat_ioctl32,
874 #endif
875 .poll = ipu_psys_poll,
876 .owner = THIS_MODULE,
877 };
878
879 static void ipu_psys_dev_release(struct device *dev)
880 {
881 }
882
883 #ifdef CONFIG_PM
884 static int psys_runtime_pm_resume(struct device *dev)
885 {
886 struct ipu_bus_device *adev = to_ipu_bus_device(dev);
887 struct ipu_psys *psys = ipu_bus_get_drvdata(adev);
888 unsigned long flags;
889 int retval;
890
891 if (!psys)
892 return 0;
893
894 /*
895 * In runtime autosuspend mode, if the psys is in power on state, no
896 * need to resume again.
897 */
898 spin_lock_irqsave(&psys->ready_lock, flags);
899 if (psys->ready) {
900 spin_unlock_irqrestore(&psys->ready_lock, flags);
901 return 0;
902 }
903 spin_unlock_irqrestore(&psys->ready_lock, flags);
904
905 retval = ipu_mmu_hw_init(adev->mmu);
906 if (retval)
907 return retval;
908
909 if (async_fw_init && !psys->fwcom) {
910 dev_err(dev,
911 "%s: asynchronous firmware init not finished, skipping\n",
912 __func__);
913 return 0;
914 }
915
916 if (!ipu_buttress_auth_done(adev->isp)) {
917 dev_dbg(dev, "%s: not yet authenticated, skipping\n", __func__);
918 return 0;
919 }
920
921 ipu_psys_setup_hw(psys);
922
923 ipu_psys_subdomains_power(psys, 1);
924 ipu_trace_restore(&psys->adev->dev);
925
926 ipu_configure_spc(adev->isp,
927 &psys->pdata->ipdata->hw_variant,
928 IPU_CPD_PKG_DIR_PSYS_SERVER_IDX,
929 psys->pdata->base, psys->pkg_dir,
930 psys->pkg_dir_dma_addr);
931
932 retval = ipu_fw_psys_open(psys);
933 if (retval) {
934 dev_err(&psys->adev->dev, "Failed to open abi.\n");
935 return retval;
936 }
937
938 spin_lock_irqsave(&psys->ready_lock, flags);
939 psys->ready = 1;
940 spin_unlock_irqrestore(&psys->ready_lock, flags);
941
942 return 0;
943 }
944
945 static int psys_runtime_pm_suspend(struct device *dev)
946 {
947 struct ipu_bus_device *adev = to_ipu_bus_device(dev);
948 struct ipu_psys *psys = ipu_bus_get_drvdata(adev);
949 unsigned long flags;
950 int rval;
951
952 if (!psys)
953 return 0;
954
955 if (!psys->ready)
956 return 0;
957
958 spin_lock_irqsave(&psys->ready_lock, flags);
959 psys->ready = 0;
960 spin_unlock_irqrestore(&psys->ready_lock, flags);
961
962 /*
963 * We can trace failure but better to not return an error.
964 * At suspend we are progressing towards psys power gated state.
965 * Any hang / failure inside psys will be forgotten soon.
966 */
967 rval = ipu_fw_psys_close(psys);
968 if (rval)
969 dev_err(dev, "Device close failure: %d\n", rval);
970
971 ipu_psys_subdomains_power(psys, 0);
972
973 ipu_mmu_hw_cleanup(adev->mmu);
974
975 return 0;
976 }
977
978 /* The following PM callbacks are needed to enable runtime PM in IPU PCI
979 * device resume, otherwise, runtime PM can't work in PCI resume from
980 * S3 state.
981 */
982 static int psys_resume(struct device *dev)
983 {
984 return 0;
985 }
986
987 static int psys_suspend(struct device *dev)
988 {
989 return 0;
990 }
991
992 static const struct dev_pm_ops psys_pm_ops = {
993 .runtime_suspend = psys_runtime_pm_suspend,
994 .runtime_resume = psys_runtime_pm_resume,
995 .suspend = psys_suspend,
996 .resume = psys_resume,
997 };
998
999 #define PSYS_PM_OPS (&psys_pm_ops)
1000 #else
1001 #define PSYS_PM_OPS NULL
1002 #endif
1003
1004 static int cpd_fw_reload(struct ipu_device *isp)
1005 {
1006 struct ipu_psys *psys = ipu_bus_get_drvdata(isp->psys);
1007 int rval;
1008
1009 if (!isp->secure_mode) {
1010 dev_warn(&isp->pdev->dev,
1011 "CPD firmware reload was only supported for secure mode.\n");
1012 return -EINVAL;
1013 }
1014
1015 if (isp->cpd_fw) {
1016 ipu_cpd_free_pkg_dir(isp->psys, psys->pkg_dir,
1017 psys->pkg_dir_dma_addr,
1018 psys->pkg_dir_size);
1019
1020 ipu_buttress_unmap_fw_image(isp->psys, &psys->fw_sgt);
1021 release_firmware(isp->cpd_fw);
1022 isp->cpd_fw = NULL;
1023 dev_info(&isp->pdev->dev, "Old FW removed\n");
1024 }
1025
1026 rval = request_cpd_fw(&isp->cpd_fw, isp->cpd_fw_name,
1027 &isp->pdev->dev);
1028 if (rval) {
1029 dev_err(&isp->pdev->dev, "Requesting firmware(%s) failed\n",
1030 isp->cpd_fw_name);
1031 return rval;
1032 }
1033
1034 rval = ipu_cpd_validate_cpd_file(isp, isp->cpd_fw->data,
1035 isp->cpd_fw->size);
1036 if (rval) {
1037 dev_err(&isp->pdev->dev, "Failed to validate cpd file\n");
1038 goto out_release_firmware;
1039 }
1040
1041 rval = ipu_buttress_map_fw_image(isp->psys, isp->cpd_fw, &psys->fw_sgt);
1042 if (rval)
1043 goto out_release_firmware;
1044
1045 psys->pkg_dir = ipu_cpd_create_pkg_dir(isp->psys,
1046 isp->cpd_fw->data,
1047 sg_dma_address(psys->fw_sgt.sgl),
1048 &psys->pkg_dir_dma_addr,
1049 &psys->pkg_dir_size);
1050
1051 if (!psys->pkg_dir) {
1052 rval = -EINVAL;
1053 goto out_unmap_fw_image;
1054 }
1055
1056 isp->pkg_dir = psys->pkg_dir;
1057 isp->pkg_dir_dma_addr = psys->pkg_dir_dma_addr;
1058 isp->pkg_dir_size = psys->pkg_dir_size;
1059
1060 if (!isp->secure_mode)
1061 return 0;
1062
1063 rval = ipu_fw_authenticate(isp, 1);
1064 if (rval)
1065 goto out_free_pkg_dir;
1066
1067 return 0;
1068
1069 out_free_pkg_dir:
1070 ipu_cpd_free_pkg_dir(isp->psys, psys->pkg_dir,
1071 psys->pkg_dir_dma_addr, psys->pkg_dir_size);
1072 out_unmap_fw_image:
1073 ipu_buttress_unmap_fw_image(isp->psys, &psys->fw_sgt);
1074 out_release_firmware:
1075 release_firmware(isp->cpd_fw);
1076 isp->cpd_fw = NULL;
1077
1078 return rval;
1079 }
1080
1081 #ifdef CONFIG_DEBUG_FS
1082 static int ipu_psys_icache_prefetch_sp_get(void *data, u64 *val)
1083 {
1084 struct ipu_psys *psys = data;
1085
1086 *val = psys->icache_prefetch_sp;
1087 return 0;
1088 }
1089
1090 static int ipu_psys_icache_prefetch_sp_set(void *data, u64 val)
1091 {
1092 struct ipu_psys *psys = data;
1093
1094 if (val != !!val)
1095 return -EINVAL;
1096
1097 psys->icache_prefetch_sp = val;
1098
1099 return 0;
1100 }
1101
1102 DEFINE_SIMPLE_ATTRIBUTE(psys_icache_prefetch_sp_fops,
1103 ipu_psys_icache_prefetch_sp_get,
1104 ipu_psys_icache_prefetch_sp_set, "%llu\n");
1105
1106 static int ipu_psys_icache_prefetch_isp_get(void *data, u64 *val)
1107 {
1108 struct ipu_psys *psys = data;
1109
1110 *val = psys->icache_prefetch_isp;
1111 return 0;
1112 }
1113
1114 static int ipu_psys_icache_prefetch_isp_set(void *data, u64 val)
1115 {
1116 struct ipu_psys *psys = data;
1117
1118 if (val != !!val)
1119 return -EINVAL;
1120
1121 psys->icache_prefetch_isp = val;
1122
1123 return 0;
1124 }
1125
1126 DEFINE_SIMPLE_ATTRIBUTE(psys_icache_prefetch_isp_fops,
1127 ipu_psys_icache_prefetch_isp_get,
1128 ipu_psys_icache_prefetch_isp_set, "%llu\n");
1129
1130 static int ipu_psys_init_debugfs(struct ipu_psys *psys)
1131 {
1132 struct dentry *file;
1133 struct dentry *dir;
1134
1135 dir = debugfs_create_dir("psys", psys->adev->isp->ipu_dir);
1136 if (IS_ERR(dir))
1137 return -ENOMEM;
1138
1139 file = debugfs_create_file("icache_prefetch_sp", 0600,
1140 dir, psys, &psys_icache_prefetch_sp_fops);
1141 if (IS_ERR(file))
1142 goto err;
1143
1144 file = debugfs_create_file("icache_prefetch_isp", 0600,
1145 dir, psys, &psys_icache_prefetch_isp_fops);
1146 if (IS_ERR(file))
1147 goto err;
1148
1149 psys->debugfsdir = dir;
1150
1151 #ifdef IPU_PSYS_GPC
1152 if (ipu_psys_gpc_init_debugfs(psys))
1153 return -ENOMEM;
1154 #endif
1155
1156 return 0;
1157 err:
1158 debugfs_remove_recursive(dir);
1159 return -ENOMEM;
1160 }
1161 #endif
1162
1163 static int ipu_psys_sched_cmd(void *ptr)
1164 {
1165 struct ipu_psys *psys = ptr;
1166 size_t pending = 0;
1167
1168 while (1) {
1169 wait_event_interruptible(psys->sched_cmd_wq,
1170 (kthread_should_stop() ||
1171 (pending =
1172 atomic_read(&psys->wakeup_count))));
1173
1174 if (kthread_should_stop())
1175 break;
1176
1177 if (pending == 0)
1178 continue;
1179
1180 mutex_lock(&psys->mutex);
1181 atomic_set(&psys->wakeup_count, 0);
1182 ipu_psys_run_next(psys);
1183 mutex_unlock(&psys->mutex);
1184 }
1185
1186 return 0;
1187 }
1188
1189 static void start_sp(struct ipu_bus_device *adev)
1190 {
1191 struct ipu_psys *psys = ipu_bus_get_drvdata(adev);
1192 void __iomem *spc_regs_base = psys->pdata->base +
1193 psys->pdata->ipdata->hw_variant.spc_offset;
1194 u32 val = 0;
1195
1196 val |= IPU_PSYS_SPC_STATUS_START |
1197 IPU_PSYS_SPC_STATUS_RUN |
1198 IPU_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE;
1199 val |= psys->icache_prefetch_sp ?
1200 IPU_PSYS_SPC_STATUS_ICACHE_PREFETCH : 0;
1201 writel(val, spc_regs_base + IPU_PSYS_REG_SPC_STATUS_CTRL);
1202 }
1203
1204 static int query_sp(struct ipu_bus_device *adev)
1205 {
1206 struct ipu_psys *psys = ipu_bus_get_drvdata(adev);
1207 void __iomem *spc_regs_base = psys->pdata->base +
1208 psys->pdata->ipdata->hw_variant.spc_offset;
1209 u32 val = readl(spc_regs_base + IPU_PSYS_REG_SPC_STATUS_CTRL);
1210
1211 /* return true when READY == 1, START == 0 */
1212 val &= IPU_PSYS_SPC_STATUS_READY | IPU_PSYS_SPC_STATUS_START;
1213
1214 return val == IPU_PSYS_SPC_STATUS_READY;
1215 }
1216
1217 static int ipu_psys_fw_init(struct ipu_psys *psys)
1218 {
1219 unsigned int size;
1220 struct ipu_fw_syscom_queue_config *queue_cfg;
1221 struct ipu_fw_syscom_queue_config fw_psys_event_queue_cfg[] = {
1222 {
1223 IPU_FW_PSYS_EVENT_QUEUE_SIZE,
1224 sizeof(struct ipu_fw_psys_event)
1225 }
1226 };
1227 struct ipu_fw_psys_srv_init server_init = {
1228 .ddr_pkg_dir_address = 0,
1229 .host_ddr_pkg_dir = NULL,
1230 .pkg_dir_size = 0,
1231 .icache_prefetch_sp = psys->icache_prefetch_sp,
1232 .icache_prefetch_isp = psys->icache_prefetch_isp,
1233 };
1234 struct ipu_fw_com_cfg fwcom = {
1235 .num_output_queues = IPU_FW_PSYS_N_PSYS_EVENT_QUEUE_ID,
1236 .output = fw_psys_event_queue_cfg,
1237 .specific_addr = &server_init,
1238 .specific_size = sizeof(server_init),
1239 .cell_start = start_sp,
1240 .cell_ready = query_sp,
1241 .buttress_boot_offset = SYSCOM_BUTTRESS_FW_PARAMS_PSYS_OFFSET,
1242 };
1243 int i;
1244
1245 size = IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID;
1246 if (ipu_ver == IPU_VER_6 || ipu_ver == IPU_VER_6EP)
1247 size = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID;
1248
1249 queue_cfg = devm_kzalloc(&psys->adev->dev, sizeof(*queue_cfg) * size,
1250 GFP_KERNEL);
1251 if (!queue_cfg)
1252 return -ENOMEM;
1253
1254 for (i = 0; i < size; i++) {
1255 queue_cfg[i].queue_size = IPU_FW_PSYS_CMD_QUEUE_SIZE;
1256 queue_cfg[i].token_size = sizeof(struct ipu_fw_psys_cmd);
1257 }
1258
1259 fwcom.input = queue_cfg;
1260 fwcom.num_input_queues = size;
1261 fwcom.dmem_addr = psys->pdata->ipdata->hw_variant.dmem_offset;
1262
1263 psys->fwcom = ipu_fw_com_prepare(&fwcom, psys->adev, psys->pdata->base);
1264 if (!psys->fwcom) {
1265 dev_err(&psys->adev->dev, "psys fw com prepare failed\n");
1266 return -EIO;
1267 }
1268
1269 return 0;
1270 }
1271
1272 static void run_fw_init_work(struct work_struct *work)
1273 {
1274 struct fw_init_task *task = (struct fw_init_task *)work;
1275 struct ipu_psys *psys = task->psys;
1276 int rval;
1277
1278 rval = ipu_psys_fw_init(psys);
1279
1280 if (rval) {
1281 dev_err(&psys->adev->dev, "FW init failed(%d)\n", rval);
1282 ipu_psys_remove(psys->adev);
1283 } else {
1284 dev_info(&psys->adev->dev, "FW init done\n");
1285 }
1286 }
1287
1288 static int ipu_psys_probe(struct ipu_bus_device *adev)
1289 {
1290 struct ipu_device *isp = adev->isp;
1291 struct ipu_psys_pg *kpg, *kpg0;
1292 struct ipu_psys *psys;
1293 unsigned int minor;
1294 int i, rval = -E2BIG;
1295
1296 rval = ipu_mmu_hw_init(adev->mmu);
1297 if (rval)
1298 return rval;
1299
1300 mutex_lock(&ipu_psys_mutex);
1301
1302 minor = find_next_zero_bit(ipu_psys_devices, IPU_PSYS_NUM_DEVICES, 0);
1303 if (minor == IPU_PSYS_NUM_DEVICES) {
1304 dev_err(&adev->dev, "too many devices\n");
1305 goto out_unlock;
1306 }
1307
1308 psys = devm_kzalloc(&adev->dev, sizeof(*psys), GFP_KERNEL);
1309 if (!psys) {
1310 rval = -ENOMEM;
1311 goto out_unlock;
1312 }
1313
1314 psys->adev = adev;
1315 psys->pdata = adev->pdata;
1316 psys->icache_prefetch_sp = 0;
1317
1318 psys->power_gating = 0;
1319
1320 ipu_trace_init(adev->isp, psys->pdata->base, &adev->dev,
1321 psys_trace_blocks);
1322
1323 cdev_init(&psys->cdev, &ipu_psys_fops);
1324 psys->cdev.owner = ipu_psys_fops.owner;
1325
1326 rval = cdev_add(&psys->cdev, MKDEV(MAJOR(ipu_psys_dev_t), minor), 1);
1327 if (rval) {
1328 dev_err(&adev->dev, "cdev_add failed (%d)\n", rval);
1329 goto out_unlock;
1330 }
1331
1332 set_bit(minor, ipu_psys_devices);
1333
1334 spin_lock_init(&psys->ready_lock);
1335 spin_lock_init(&psys->pgs_lock);
1336 psys->ready = 0;
1337 psys->timeout = IPU_PSYS_CMD_TIMEOUT_MS;
1338
1339 mutex_init(&psys->mutex);
1340 INIT_LIST_HEAD(&psys->fhs);
1341 INIT_LIST_HEAD(&psys->pgs);
1342 INIT_LIST_HEAD(&psys->started_kcmds_list);
1343 INIT_WORK(&psys->watchdog_work, ipu_psys_watchdog_work);
1344
1345 init_waitqueue_head(&psys->sched_cmd_wq);
1346 atomic_set(&psys->wakeup_count, 0);
1347 /*
1348 * Create a thread to schedule commands sent to IPU firmware.
1349 * The thread reduces the coupling between the command scheduler
1350 * and queueing commands from the user to driver.
1351 */
1352 psys->sched_cmd_thread = kthread_run(ipu_psys_sched_cmd, psys,
1353 "psys_sched_cmd");
1354
1355 if (IS_ERR(psys->sched_cmd_thread)) {
1356 psys->sched_cmd_thread = NULL;
1357 mutex_destroy(&psys->mutex);
1358 goto out_unlock;
1359 }
1360
1361 ipu_bus_set_drvdata(adev, psys);
1362
1363 rval = ipu_psys_resource_pool_init(&psys->resource_pool_started);
1364 if (rval < 0) {
1365 dev_err(&psys->dev,
1366 "unable to alloc process group resources\n");
1367 goto out_mutex_destroy;
1368 }
1369
1370 rval = ipu_psys_resource_pool_init(&psys->resource_pool_running);
1371 if (rval < 0) {
1372 dev_err(&psys->dev,
1373 "unable to alloc process group resources\n");
1374 goto out_resources_started_free;
1375 }
1376
1377 ipu6_psys_hw_res_variant_init();
1378 psys->pkg_dir = isp->pkg_dir;
1379 psys->pkg_dir_dma_addr = isp->pkg_dir_dma_addr;
1380 psys->pkg_dir_size = isp->pkg_dir_size;
1381 psys->fw_sgt = isp->fw_sgt;
1382
1383 /* allocate and map memory for process groups */
1384 for (i = 0; i < IPU_PSYS_PG_POOL_SIZE; i++) {
1385 kpg = kzalloc(sizeof(*kpg), GFP_KERNEL);
1386 if (!kpg)
1387 goto out_free_pgs;
1388 kpg->pg = dma_alloc_attrs(&adev->dev,
1389 IPU_PSYS_PG_MAX_SIZE,
1390 &kpg->pg_dma_addr,
1391 GFP_KERNEL, 0);
1392 if (!kpg->pg) {
1393 kfree(kpg);
1394 goto out_free_pgs;
1395 }
1396 kpg->size = IPU_PSYS_PG_MAX_SIZE;
1397 list_add(&kpg->list, &psys->pgs);
1398 }
1399
1400 psys->caps.pg_count = ipu_cpd_pkg_dir_get_num_entries(psys->pkg_dir);
1401
1402 dev_info(&adev->dev, "pkg_dir entry count:%d\n", psys->caps.pg_count);
1403 if (async_fw_init) {
1404 INIT_DELAYED_WORK((struct delayed_work *)&fw_init_task,
1405 run_fw_init_work);
1406 fw_init_task.psys = psys;
1407 schedule_delayed_work((struct delayed_work *)&fw_init_task, 0);
1408 } else {
1409 rval = ipu_psys_fw_init(psys);
1410 }
1411
1412 if (rval) {
1413 dev_err(&adev->dev, "FW init failed(%d)\n", rval);
1414 goto out_free_pgs;
1415 }
1416
1417 psys->dev.parent = &adev->dev;
1418 psys->dev.bus = &ipu_psys_bus;
1419 psys->dev.devt = MKDEV(MAJOR(ipu_psys_dev_t), minor);
1420 psys->dev.release = ipu_psys_dev_release;
1421 dev_set_name(&psys->dev, "ipu-psys%d", minor);
1422 rval = device_register(&psys->dev);
1423 if (rval < 0) {
1424 dev_err(&psys->dev, "psys device_register failed\n");
1425 goto out_release_fw_com;
1426 }
1427
1428 /* Add the hw stepping information to caps */
1429 strlcpy(psys->caps.dev_model, IPU_MEDIA_DEV_MODEL_NAME,
1430 sizeof(psys->caps.dev_model));
1431
1432 pm_runtime_set_autosuspend_delay(&psys->adev->dev,
1433 IPU_PSYS_AUTOSUSPEND_DELAY);
1434 pm_runtime_use_autosuspend(&psys->adev->dev);
1435 pm_runtime_mark_last_busy(&psys->adev->dev);
1436
1437 mutex_unlock(&ipu_psys_mutex);
1438
1439 #ifdef CONFIG_DEBUG_FS
1440 /* Debug fs failure is not fatal. */
1441 ipu_psys_init_debugfs(psys);
1442 #endif
1443
1444 adev->isp->cpd_fw_reload = &cpd_fw_reload;
1445
1446 dev_info(&adev->dev, "psys probe minor: %d\n", minor);
1447
1448 ipu_mmu_hw_cleanup(adev->mmu);
1449
1450 return 0;
1451
1452 out_release_fw_com:
1453 ipu_fw_com_release(psys->fwcom, 1);
1454 out_free_pgs:
1455 list_for_each_entry_safe(kpg, kpg0, &psys->pgs, list) {
1456 dma_free_attrs(&adev->dev, kpg->size, kpg->pg,
1457 kpg->pg_dma_addr, 0);
1458 kfree(kpg);
1459 }
1460
1461 ipu_psys_resource_pool_cleanup(&psys->resource_pool_running);
1462 out_resources_started_free:
1463 ipu_psys_resource_pool_cleanup(&psys->resource_pool_started);
1464 out_mutex_destroy:
1465 mutex_destroy(&psys->mutex);
1466 cdev_del(&psys->cdev);
1467 if (psys->sched_cmd_thread) {
1468 kthread_stop(psys->sched_cmd_thread);
1469 psys->sched_cmd_thread = NULL;
1470 }
1471 out_unlock:
1472 /* Safe to call even if the init is not called */
1473 ipu_trace_uninit(&adev->dev);
1474 mutex_unlock(&ipu_psys_mutex);
1475
1476 ipu_mmu_hw_cleanup(adev->mmu);
1477
1478 return rval;
1479 }
1480
1481 static void ipu_psys_remove(struct ipu_bus_device *adev)
1482 {
1483 struct ipu_device *isp = adev->isp;
1484 struct ipu_psys *psys = ipu_bus_get_drvdata(adev);
1485 struct ipu_psys_pg *kpg, *kpg0;
1486
1487 #ifdef CONFIG_DEBUG_FS
1488 if (isp->ipu_dir)
1489 debugfs_remove_recursive(psys->debugfsdir);
1490 #endif
1491
1492 flush_workqueue(IPU_PSYS_WORK_QUEUE);
1493
1494 if (psys->sched_cmd_thread) {
1495 kthread_stop(psys->sched_cmd_thread);
1496 psys->sched_cmd_thread = NULL;
1497 }
1498
1499 pm_runtime_dont_use_autosuspend(&psys->adev->dev);
1500
1501 mutex_lock(&ipu_psys_mutex);
1502
1503 list_for_each_entry_safe(kpg, kpg0, &psys->pgs, list) {
1504 dma_free_attrs(&adev->dev, kpg->size, kpg->pg,
1505 kpg->pg_dma_addr, 0);
1506 kfree(kpg);
1507 }
1508
1509 if (psys->fwcom && ipu_fw_com_release(psys->fwcom, 1))
1510 dev_err(&adev->dev, "fw com release failed.\n");
1511
1512 kfree(psys->server_init);
1513 kfree(psys->syscom_config);
1514
1515 ipu_trace_uninit(&adev->dev);
1516
1517 ipu_psys_resource_pool_cleanup(&psys->resource_pool_started);
1518 ipu_psys_resource_pool_cleanup(&psys->resource_pool_running);
1519
1520 device_unregister(&psys->dev);
1521
1522 clear_bit(MINOR(psys->cdev.dev), ipu_psys_devices);
1523 cdev_del(&psys->cdev);
1524
1525 mutex_unlock(&ipu_psys_mutex);
1526
1527 mutex_destroy(&psys->mutex);
1528
1529 dev_info(&adev->dev, "removed\n");
1530 }
1531
1532 static irqreturn_t psys_isr_threaded(struct ipu_bus_device *adev)
1533 {
1534 struct ipu_psys *psys = ipu_bus_get_drvdata(adev);
1535 void __iomem *base = psys->pdata->base;
1536 u32 status;
1537 int r;
1538
1539 mutex_lock(&psys->mutex);
1540 #ifdef CONFIG_PM
1541 r = pm_runtime_get_if_in_use(&psys->adev->dev);
1542 if (!r || WARN_ON_ONCE(r < 0)) {
1543 mutex_unlock(&psys->mutex);
1544 return IRQ_NONE;
1545 }
1546 #endif
1547
1548 status = readl(base + IPU_REG_PSYS_GPDEV_IRQ_STATUS);
1549 writel(status, base + IPU_REG_PSYS_GPDEV_IRQ_CLEAR);
1550
1551 if (status & IPU_PSYS_GPDEV_IRQ_FWIRQ(IPU_PSYS_GPDEV_FWIRQ0)) {
1552 writel(0, base + IPU_REG_PSYS_GPDEV_FWIRQ(0));
1553 ipu_psys_handle_events(psys);
1554 }
1555
1556 pm_runtime_mark_last_busy(&psys->adev->dev);
1557 pm_runtime_put_autosuspend(&psys->adev->dev);
1558 mutex_unlock(&psys->mutex);
1559
1560 return status ? IRQ_HANDLED : IRQ_NONE;
1561 }
1562
1563 static struct ipu_bus_driver ipu_psys_driver = {
1564 .probe = ipu_psys_probe,
1565 .remove = ipu_psys_remove,
1566 .isr_threaded = psys_isr_threaded,
1567 .wanted = IPU_PSYS_NAME,
1568 .drv = {
1569 .name = IPU_PSYS_NAME,
1570 .owner = THIS_MODULE,
1571 .pm = PSYS_PM_OPS,
1572 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
1573 },
1574 };
1575
1576 static int __init ipu_psys_init(void)
1577 {
1578 int rval = alloc_chrdev_region(&ipu_psys_dev_t, 0,
1579 IPU_PSYS_NUM_DEVICES, IPU_PSYS_NAME);
1580 if (rval) {
1581 pr_err("can't alloc psys chrdev region (%d)\n", rval);
1582 return rval;
1583 }
1584
1585 rval = bus_register(&ipu_psys_bus);
1586 if (rval) {
1587 pr_warn("can't register psys bus (%d)\n", rval);
1588 goto out_bus_register;
1589 }
1590
1591 ipu_bus_register_driver(&ipu_psys_driver);
1592
1593 return rval;
1594
1595 out_bus_register:
1596 unregister_chrdev_region(ipu_psys_dev_t, IPU_PSYS_NUM_DEVICES);
1597
1598 return rval;
1599 }
1600
1601 static void __exit ipu_psys_exit(void)
1602 {
1603 ipu_bus_unregister_driver(&ipu_psys_driver);
1604 bus_unregister(&ipu_psys_bus);
1605 unregister_chrdev_region(ipu_psys_dev_t, IPU_PSYS_NUM_DEVICES);
1606 }
1607
1608 static const struct pci_device_id ipu_pci_tbl[] = {
1609 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6_PCI_ID)},
1610 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6SE_PCI_ID)},
1611 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6EP_PCI_ID)},
1612 {0,}
1613 };
1614 MODULE_DEVICE_TABLE(pci, ipu_pci_tbl);
1615
1616 module_init(ipu_psys_init);
1617 module_exit(ipu_psys_exit);
1618
1619 MODULE_AUTHOR("Antti Laakso <antti.laakso@intel.com>");
1620 MODULE_AUTHOR("Bin Han <bin.b.han@intel.com>");
1621 MODULE_AUTHOR("Renwei Wu <renwei.wu@intel.com>");
1622 MODULE_AUTHOR("Jianxu Zheng <jian.xu.zheng@intel.com>");
1623 MODULE_AUTHOR("Xia Wu <xia.wu@intel.com>");
1624 MODULE_AUTHOR("Bingbu Cao <bingbu.cao@intel.com>");
1625 MODULE_AUTHOR("Zaikuo Wang <zaikuo.wang@intel.com>");
1626 MODULE_AUTHOR("Yunliang Ding <yunliang.ding@intel.com>");
1627 MODULE_LICENSE("GPL");
1628 MODULE_DESCRIPTION("Intel ipu processing system driver");