]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/media/pci/intel/ipu-psys.c
37964b2965d904ea00506e28e261554a6690f457
[mirror_ubuntu-jammy-kernel.git] / drivers / media / pci / intel / ipu-psys.c
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2013 - 2020 Intel Corporation
3
4 #include <linux/debugfs.h>
5 #include <linux/delay.h>
6 #include <linux/device.h>
7 #include <linux/dma-buf.h>
8 #include <linux/firmware.h>
9 #include <linux/fs.h>
10 #include <linux/highmem.h>
11 #include <linux/init_task.h>
12 #include <linux/kthread.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/version.h>
17 #include <linux/poll.h>
18 #include <uapi/linux/sched/types.h>
19 #include <linux/uaccess.h>
20 #include <linux/vmalloc.h>
21 #include <linux/dma-mapping.h>
22
23 #include <uapi/linux/ipu-psys.h>
24
25 #include "ipu.h"
26 #include "ipu-mmu.h"
27 #include "ipu-bus.h"
28 #include "ipu-platform.h"
29 #include "ipu-buttress.h"
30 #include "ipu-cpd.h"
31 #include "ipu-fw-psys.h"
32 #include "ipu-psys.h"
33 #include "ipu-platform-psys.h"
34 #include "ipu-platform-regs.h"
35 #include "ipu-fw-com.h"
36
37 static bool async_fw_init;
38 module_param(async_fw_init, bool, 0664);
39 MODULE_PARM_DESC(async_fw_init, "Enable asynchronous firmware initialization");
40
41 #define IPU_PSYS_NUM_DEVICES 4
42 #define IPU_PSYS_AUTOSUSPEND_DELAY 2000
43
44 #ifdef CONFIG_PM
45 static int psys_runtime_pm_resume(struct device *dev);
46 static int psys_runtime_pm_suspend(struct device *dev);
47 #else
48 #define pm_runtime_dont_use_autosuspend(d)
49 #define pm_runtime_use_autosuspend(d)
50 #define pm_runtime_set_autosuspend_delay(d, f) 0
51 #define pm_runtime_get_sync(d) 0
52 #define pm_runtime_put(d) 0
53 #define pm_runtime_put_sync(d) 0
54 #define pm_runtime_put_noidle(d) 0
55 #define pm_runtime_put_autosuspend(d) 0
56 #endif
57
58 static dev_t ipu_psys_dev_t;
59 static DECLARE_BITMAP(ipu_psys_devices, IPU_PSYS_NUM_DEVICES);
60 static DEFINE_MUTEX(ipu_psys_mutex);
61
62 static struct fw_init_task {
63 struct delayed_work work;
64 struct ipu_psys *psys;
65 } fw_init_task;
66
67 static void ipu_psys_remove(struct ipu_bus_device *adev);
68
69 static struct bus_type ipu_psys_bus = {
70 .name = IPU_PSYS_NAME,
71 };
72
73 struct ipu_psys_pg *__get_pg_buf(struct ipu_psys *psys, size_t pg_size)
74 {
75 struct ipu_psys_pg *kpg;
76 unsigned long flags;
77
78 spin_lock_irqsave(&psys->pgs_lock, flags);
79 list_for_each_entry(kpg, &psys->pgs, list) {
80 if (!kpg->pg_size && kpg->size >= pg_size) {
81 kpg->pg_size = pg_size;
82 spin_unlock_irqrestore(&psys->pgs_lock, flags);
83 return kpg;
84 }
85 }
86 spin_unlock_irqrestore(&psys->pgs_lock, flags);
87 /* no big enough buffer available, allocate new one */
88 kpg = kzalloc(sizeof(*kpg), GFP_KERNEL);
89 if (!kpg)
90 return NULL;
91
92 kpg->pg = dma_alloc_attrs(&psys->adev->dev, pg_size,
93 &kpg->pg_dma_addr, GFP_KERNEL, 0);
94 if (!kpg->pg) {
95 kfree(kpg);
96 return NULL;
97 }
98
99 kpg->pg_size = pg_size;
100 kpg->size = pg_size;
101 spin_lock_irqsave(&psys->pgs_lock, flags);
102 list_add(&kpg->list, &psys->pgs);
103 spin_unlock_irqrestore(&psys->pgs_lock, flags);
104
105 return kpg;
106 }
107
108 static int ipu_psys_unmapbuf_locked(int fd, struct ipu_psys_fh *fh,
109 struct ipu_psys_kbuffer *kbuf);
110 struct ipu_psys_kbuffer *ipu_psys_lookup_kbuffer(struct ipu_psys_fh *fh, int fd)
111 {
112 struct ipu_psys_kbuffer *kbuf;
113
114 list_for_each_entry(kbuf, &fh->bufmap, list) {
115 if (kbuf->fd == fd)
116 return kbuf;
117 }
118
119 return NULL;
120 }
121
122 struct ipu_psys_kbuffer *
123 ipu_psys_lookup_kbuffer_by_kaddr(struct ipu_psys_fh *fh, void *kaddr)
124 {
125 struct ipu_psys_kbuffer *kbuffer;
126
127 list_for_each_entry(kbuffer, &fh->bufmap, list) {
128 if (kbuffer->kaddr == kaddr)
129 return kbuffer;
130 }
131
132 return NULL;
133 }
134
135 static int ipu_psys_get_userpages(struct ipu_dma_buf_attach *attach)
136 {
137 struct vm_area_struct *vma;
138 unsigned long start, end;
139 int npages, array_size;
140 struct page **pages;
141 struct sg_table *sgt;
142 int nr = 0;
143 int ret = -ENOMEM;
144
145 start = (unsigned long)attach->userptr;
146 end = PAGE_ALIGN(start + attach->len);
147 npages = (end - (start & PAGE_MASK)) >> PAGE_SHIFT;
148 array_size = npages * sizeof(struct page *);
149
150 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
151 if (!sgt)
152 return -ENOMEM;
153
154 if (attach->npages != 0) {
155 pages = attach->pages;
156 npages = attach->npages;
157 attach->vma_is_io = 1;
158 goto skip_pages;
159 }
160
161 pages = kvzalloc(array_size, GFP_KERNEL);
162 if (!pages)
163 goto free_sgt;
164
165 mmap_read_lock(current->mm);
166 vma = find_vma(current->mm, start);
167 if (!vma) {
168 ret = -EFAULT;
169 goto error_up_read;
170 }
171
172 /*
173 * For buffers from Gralloc, VM_PFNMAP is expected,
174 * but VM_IO is set. Possibly bug in Gralloc.
175 */
176 attach->vma_is_io = vma->vm_flags & (VM_IO | VM_PFNMAP);
177
178 if (attach->vma_is_io) {
179 unsigned long io_start = start;
180
181 if (vma->vm_end < start + attach->len) {
182 dev_err(attach->dev,
183 "vma at %lu is too small for %llu bytes\n",
184 start, attach->len);
185 ret = -EFAULT;
186 goto error_up_read;
187 }
188
189 for (nr = 0; nr < npages; nr++, io_start += PAGE_SIZE) {
190 unsigned long pfn;
191
192 ret = follow_pfn(vma, io_start, &pfn);
193 if (ret)
194 goto error_up_read;
195 pages[nr] = pfn_to_page(pfn);
196 }
197 } else {
198 nr = get_user_pages(start & PAGE_MASK, npages,
199 FOLL_WRITE,
200 pages, NULL);
201 if (nr < npages)
202 goto error_up_read;
203 }
204 mmap_read_unlock(current->mm);
205
206 attach->pages = pages;
207 attach->npages = npages;
208
209 skip_pages:
210 ret = sg_alloc_table_from_pages(sgt, pages, npages,
211 start & ~PAGE_MASK, attach->len,
212 GFP_KERNEL);
213 if (ret < 0)
214 goto error;
215
216 attach->sgt = sgt;
217
218 return 0;
219
220 error_up_read:
221 mmap_read_unlock(current->mm);
222 error:
223 if (!attach->vma_is_io)
224 while (nr > 0)
225 put_page(pages[--nr]);
226
227 if (array_size <= PAGE_SIZE)
228 kfree(pages);
229 else
230 vfree(pages);
231 free_sgt:
232 kfree(sgt);
233
234 dev_err(attach->dev, "failed to get userpages:%d\n", ret);
235
236 return ret;
237 }
238
239 static void ipu_psys_put_userpages(struct ipu_dma_buf_attach *attach)
240 {
241 if (!attach || !attach->userptr || !attach->sgt)
242 return;
243
244 if (!attach->vma_is_io) {
245 int i = attach->npages;
246
247 while (--i >= 0) {
248 set_page_dirty_lock(attach->pages[i]);
249 put_page(attach->pages[i]);
250 }
251 }
252
253 kvfree(attach->pages);
254
255 sg_free_table(attach->sgt);
256 kfree(attach->sgt);
257 attach->sgt = NULL;
258 }
259
260 static int ipu_dma_buf_attach(struct dma_buf *dbuf,
261 struct dma_buf_attachment *attach)
262 {
263 struct ipu_psys_kbuffer *kbuf = dbuf->priv;
264 struct ipu_dma_buf_attach *ipu_attach;
265
266 ipu_attach = kzalloc(sizeof(*ipu_attach), GFP_KERNEL);
267 if (!ipu_attach)
268 return -ENOMEM;
269
270 ipu_attach->len = kbuf->len;
271 ipu_attach->userptr = kbuf->userptr;
272
273 attach->priv = ipu_attach;
274 return 0;
275 }
276
277 static void ipu_dma_buf_detach(struct dma_buf *dbuf,
278 struct dma_buf_attachment *attach)
279 {
280 struct ipu_dma_buf_attach *ipu_attach = attach->priv;
281
282 kfree(ipu_attach);
283 attach->priv = NULL;
284 }
285
286 static struct sg_table *ipu_dma_buf_map(struct dma_buf_attachment *attach,
287 enum dma_data_direction dir)
288 {
289 struct ipu_dma_buf_attach *ipu_attach = attach->priv;
290 unsigned long attrs;
291 int ret;
292
293 ret = ipu_psys_get_userpages(ipu_attach);
294 if (ret)
295 return NULL;
296
297 attrs = DMA_ATTR_SKIP_CPU_SYNC;
298 ret = dma_map_sg_attrs(attach->dev, ipu_attach->sgt->sgl,
299 ipu_attach->sgt->orig_nents, dir, attrs);
300 if (ret < ipu_attach->sgt->orig_nents) {
301 ipu_psys_put_userpages(ipu_attach);
302 dev_dbg(attach->dev, "buf map failed\n");
303
304 return ERR_PTR(-EIO);
305 }
306
307 /*
308 * Initial cache flush to avoid writing dirty pages for buffers which
309 * are later marked as IPU_BUFFER_FLAG_NO_FLUSH.
310 */
311 dma_sync_sg_for_device(attach->dev, ipu_attach->sgt->sgl,
312 ipu_attach->sgt->orig_nents, DMA_BIDIRECTIONAL);
313
314 return ipu_attach->sgt;
315 }
316
317 static void ipu_dma_buf_unmap(struct dma_buf_attachment *attach,
318 struct sg_table *sg, enum dma_data_direction dir)
319 {
320 struct ipu_dma_buf_attach *ipu_attach = attach->priv;
321
322 dma_unmap_sg(attach->dev, sg->sgl, sg->orig_nents, dir);
323 ipu_psys_put_userpages(ipu_attach);
324 }
325
326 static int ipu_dma_buf_mmap(struct dma_buf *dbuf, struct vm_area_struct *vma)
327 {
328 return -ENOTTY;
329 }
330
331 static void ipu_dma_buf_release(struct dma_buf *buf)
332 {
333 struct ipu_psys_kbuffer *kbuf = buf->priv;
334
335 if (!kbuf)
336 return;
337
338 if (kbuf->db_attach) {
339 dev_dbg(kbuf->db_attach->dev,
340 "releasing buffer %d\n", kbuf->fd);
341 ipu_psys_put_userpages(kbuf->db_attach->priv);
342 }
343 kfree(kbuf);
344 }
345
346 static int ipu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
347 enum dma_data_direction dir)
348 {
349 return -ENOTTY;
350 }
351
352 static void *ipu_dma_buf_vmap(struct dma_buf *dmabuf)
353 {
354 struct dma_buf_attachment *attach;
355 struct ipu_dma_buf_attach *ipu_attach;
356
357 if (list_empty(&dmabuf->attachments))
358 return NULL;
359
360 attach = list_last_entry(&dmabuf->attachments,
361 struct dma_buf_attachment, node);
362 ipu_attach = attach->priv;
363
364 if (!ipu_attach || !ipu_attach->pages || !ipu_attach->npages)
365 return NULL;
366
367 return vm_map_ram(ipu_attach->pages, ipu_attach->npages, 0);
368 }
369
370 static void ipu_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
371 {
372 struct dma_buf_attachment *attach;
373 struct ipu_dma_buf_attach *ipu_attach;
374
375 if (WARN_ON(list_empty(&dmabuf->attachments)))
376 return;
377
378 attach = list_last_entry(&dmabuf->attachments,
379 struct dma_buf_attachment, node);
380 ipu_attach = attach->priv;
381
382 if (WARN_ON(!ipu_attach || !ipu_attach->pages || !ipu_attach->npages))
383 return;
384
385 vm_unmap_ram(vaddr, ipu_attach->npages);
386 }
387
388 struct dma_buf_ops ipu_dma_buf_ops = {
389 .attach = ipu_dma_buf_attach,
390 .detach = ipu_dma_buf_detach,
391 .map_dma_buf = ipu_dma_buf_map,
392 .unmap_dma_buf = ipu_dma_buf_unmap,
393 .release = ipu_dma_buf_release,
394 .begin_cpu_access = ipu_dma_buf_begin_cpu_access,
395 .mmap = ipu_dma_buf_mmap,
396 .vmap = ipu_dma_buf_vmap,
397 .vunmap = ipu_dma_buf_vunmap,
398 };
399
400 static int ipu_psys_open(struct inode *inode, struct file *file)
401 {
402 struct ipu_psys *psys = inode_to_ipu_psys(inode);
403 struct ipu_device *isp = psys->adev->isp;
404 struct ipu_psys_fh *fh;
405 int rval;
406
407 if (isp->flr_done)
408 return -EIO;
409
410 fh = kzalloc(sizeof(*fh), GFP_KERNEL);
411 if (!fh)
412 return -ENOMEM;
413
414 fh->psys = psys;
415
416 file->private_data = fh;
417
418 mutex_init(&fh->mutex);
419 INIT_LIST_HEAD(&fh->bufmap);
420 init_waitqueue_head(&fh->wait);
421
422 rval = ipu_psys_fh_init(fh);
423 if (rval)
424 goto open_failed;
425
426 mutex_lock(&psys->mutex);
427 list_add_tail(&fh->list, &psys->fhs);
428 mutex_unlock(&psys->mutex);
429
430 return 0;
431
432 open_failed:
433 mutex_destroy(&fh->mutex);
434 kfree(fh);
435 return rval;
436 }
437
438 static inline void ipu_psys_kbuf_unmap(struct ipu_psys_kbuffer *kbuf)
439 {
440 if (!kbuf)
441 return;
442
443 kbuf->valid = false;
444 if (kbuf->kaddr)
445 dma_buf_vunmap(kbuf->dbuf, kbuf->kaddr);
446 if (kbuf->sgt)
447 dma_buf_unmap_attachment(kbuf->db_attach,
448 kbuf->sgt,
449 DMA_BIDIRECTIONAL);
450 if (kbuf->db_attach)
451 dma_buf_detach(kbuf->dbuf, kbuf->db_attach);
452 dma_buf_put(kbuf->dbuf);
453
454 kbuf->db_attach = NULL;
455 kbuf->dbuf = NULL;
456 kbuf->sgt = NULL;
457 }
458
459 static int ipu_psys_release(struct inode *inode, struct file *file)
460 {
461 struct ipu_psys *psys = inode_to_ipu_psys(inode);
462 struct ipu_psys_fh *fh = file->private_data;
463 struct ipu_psys_kbuffer *kbuf, *kbuf0;
464 struct dma_buf_attachment *db_attach;
465
466 mutex_lock(&fh->mutex);
467 /* clean up buffers */
468 if (!list_empty(&fh->bufmap)) {
469 list_for_each_entry_safe(kbuf, kbuf0, &fh->bufmap, list) {
470 list_del(&kbuf->list);
471 db_attach = kbuf->db_attach;
472
473 /* Unmap and release buffers */
474 if (kbuf->dbuf && db_attach) {
475
476 ipu_psys_kbuf_unmap(kbuf);
477 } else {
478 if (db_attach)
479 ipu_psys_put_userpages(db_attach->priv);
480 kfree(kbuf);
481 }
482 }
483 }
484 mutex_unlock(&fh->mutex);
485
486 mutex_lock(&psys->mutex);
487 list_del(&fh->list);
488
489 mutex_unlock(&psys->mutex);
490 ipu_psys_fh_deinit(fh);
491
492 mutex_lock(&psys->mutex);
493 if (list_empty(&psys->fhs))
494 psys->power_gating = 0;
495 mutex_unlock(&psys->mutex);
496 mutex_destroy(&fh->mutex);
497 kfree(fh);
498
499 return 0;
500 }
501
502 static int ipu_psys_getbuf(struct ipu_psys_buffer *buf, struct ipu_psys_fh *fh)
503 {
504 struct ipu_psys_kbuffer *kbuf;
505 struct ipu_psys *psys = fh->psys;
506
507 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
508 struct dma_buf *dbuf;
509 int ret;
510
511 if (!buf->base.userptr) {
512 dev_err(&psys->adev->dev, "Buffer allocation not supported\n");
513 return -EINVAL;
514 }
515
516 kbuf = kzalloc(sizeof(*kbuf), GFP_KERNEL);
517 if (!kbuf)
518 return -ENOMEM;
519
520 kbuf->len = buf->len;
521 kbuf->userptr = buf->base.userptr;
522 kbuf->flags = buf->flags;
523
524 exp_info.ops = &ipu_dma_buf_ops;
525 exp_info.size = kbuf->len;
526 exp_info.flags = O_RDWR;
527 exp_info.priv = kbuf;
528
529 dbuf = dma_buf_export(&exp_info);
530 if (IS_ERR(dbuf)) {
531 kfree(kbuf);
532 return PTR_ERR(dbuf);
533 }
534
535 ret = dma_buf_fd(dbuf, 0);
536 if (ret < 0) {
537 kfree(kbuf);
538 dma_buf_put(dbuf);
539 return ret;
540 }
541
542 kbuf->fd = ret;
543 buf->base.fd = ret;
544 kbuf->flags = buf->flags &= ~IPU_BUFFER_FLAG_USERPTR;
545 kbuf->flags = buf->flags |= IPU_BUFFER_FLAG_DMA_HANDLE;
546
547 mutex_lock(&fh->mutex);
548 list_add(&kbuf->list, &fh->bufmap);
549 mutex_unlock(&fh->mutex);
550
551 dev_dbg(&psys->adev->dev, "IOC_GETBUF: userptr %p size %llu to fd %d",
552 buf->base.userptr, buf->len, buf->base.fd);
553
554 return 0;
555 }
556
557 static int ipu_psys_putbuf(struct ipu_psys_buffer *buf, struct ipu_psys_fh *fh)
558 {
559 return 0;
560 }
561
562 int ipu_psys_mapbuf_locked(int fd, struct ipu_psys_fh *fh,
563 struct ipu_psys_kbuffer *kbuf)
564 {
565 struct ipu_psys *psys = fh->psys;
566 struct dma_buf *dbuf;
567 int ret;
568
569 dbuf = dma_buf_get(fd);
570 if (IS_ERR(dbuf))
571 return -EINVAL;
572
573 if (!kbuf) {
574 /* This fd isn't generated by ipu_psys_getbuf, it
575 * is a new fd. Create a new kbuf item for this fd, and
576 * add this kbuf to bufmap list.
577 */
578 kbuf = kzalloc(sizeof(*kbuf), GFP_KERNEL);
579 if (!kbuf) {
580 ret = -ENOMEM;
581 goto mapbuf_fail;
582 }
583
584 list_add(&kbuf->list, &fh->bufmap);
585 }
586
587 /* fd valid and found, need remap */
588 if (kbuf->dbuf && (kbuf->dbuf != dbuf || kbuf->len != dbuf->size)) {
589 dev_dbg(&psys->adev->dev,
590 "dmabuf fd %d with kbuf %p changed, need remap.\n",
591 fd, kbuf);
592 ret = ipu_psys_unmapbuf_locked(fd, fh, kbuf);
593 if (ret)
594 goto mapbuf_fail;
595
596 kbuf = ipu_psys_lookup_kbuffer(fh, fd);
597 /* changed external dmabuf */
598 if (!kbuf) {
599 kbuf = kzalloc(sizeof(*kbuf), GFP_KERNEL);
600 if (!kbuf) {
601 ret = -ENOMEM;
602 goto mapbuf_fail;
603 }
604 list_add(&kbuf->list, &fh->bufmap);
605 }
606 }
607
608 if (kbuf->sgt) {
609 dev_dbg(&psys->adev->dev, "fd %d has been mapped!\n", fd);
610 dma_buf_put(dbuf);
611 goto mapbuf_end;
612 }
613
614 kbuf->dbuf = dbuf;
615
616 if (kbuf->len == 0)
617 kbuf->len = kbuf->dbuf->size;
618
619 kbuf->fd = fd;
620
621 kbuf->db_attach = dma_buf_attach(kbuf->dbuf, &psys->adev->dev);
622 if (IS_ERR(kbuf->db_attach)) {
623 ret = PTR_ERR(kbuf->db_attach);
624 dev_dbg(&psys->adev->dev, "dma buf attach failed\n");
625 goto kbuf_map_fail;
626 }
627
628 kbuf->sgt = dma_buf_map_attachment(kbuf->db_attach, DMA_BIDIRECTIONAL);
629 if (IS_ERR_OR_NULL(kbuf->sgt)) {
630 ret = -EINVAL;
631 kbuf->sgt = NULL;
632 dev_dbg(&psys->adev->dev, "dma buf map attachment failed\n");
633 goto kbuf_map_fail;
634 }
635
636 kbuf->dma_addr = sg_dma_address(kbuf->sgt->sgl);
637
638 kbuf->kaddr = dma_buf_vmap(kbuf->dbuf);
639 if (!kbuf->kaddr) {
640 ret = -EINVAL;
641 dev_dbg(&psys->adev->dev, "dma buf vmap failed\n");
642 goto kbuf_map_fail;
643 }
644
645 dev_dbg(&psys->adev->dev, "%s kbuf %p fd %d with len %llu mapped\n",
646 __func__, kbuf, fd, kbuf->len);
647 mapbuf_end:
648
649 kbuf->valid = true;
650
651 return 0;
652
653 kbuf_map_fail:
654 ipu_psys_kbuf_unmap(kbuf);
655
656 list_del(&kbuf->list);
657 if (!kbuf->userptr)
658 kfree(kbuf);
659 return ret;
660
661 mapbuf_fail:
662 dma_buf_put(dbuf);
663
664 dev_err(&psys->adev->dev, "%s failed for fd %d\n", __func__, fd);
665 return ret;
666 }
667
668 static long ipu_psys_mapbuf(int fd, struct ipu_psys_fh *fh)
669 {
670 long ret;
671 struct ipu_psys_kbuffer *kbuf;
672
673 mutex_lock(&fh->mutex);
674 kbuf = ipu_psys_lookup_kbuffer(fh, fd);
675 ret = ipu_psys_mapbuf_locked(fd, fh, kbuf);
676 mutex_unlock(&fh->mutex);
677
678 dev_dbg(&fh->psys->adev->dev, "IOC_MAPBUF ret %ld\n", ret);
679
680 return ret;
681 }
682
683 static int ipu_psys_unmapbuf_locked(int fd, struct ipu_psys_fh *fh,
684 struct ipu_psys_kbuffer *kbuf)
685 {
686 struct ipu_psys *psys = fh->psys;
687
688 if (!kbuf || fd != kbuf->fd) {
689 dev_err(&psys->adev->dev, "invalid kbuffer\n");
690 return -EINVAL;
691 }
692
693 /* From now on it is not safe to use this kbuffer */
694 ipu_psys_kbuf_unmap(kbuf);
695
696 list_del(&kbuf->list);
697
698 if (!kbuf->userptr)
699 kfree(kbuf);
700
701 dev_dbg(&psys->adev->dev, "%s fd %d unmapped\n", __func__, fd);
702
703 return 0;
704 }
705
706 static long ipu_psys_unmapbuf(int fd, struct ipu_psys_fh *fh)
707 {
708 struct ipu_psys_kbuffer *kbuf;
709 long ret;
710
711 mutex_lock(&fh->mutex);
712 kbuf = ipu_psys_lookup_kbuffer(fh, fd);
713 if (!kbuf) {
714 dev_err(&fh->psys->adev->dev,
715 "buffer with fd %d not found\n", fd);
716 mutex_unlock(&fh->mutex);
717 return -EINVAL;
718 }
719 ret = ipu_psys_unmapbuf_locked(fd, fh, kbuf);
720 mutex_unlock(&fh->mutex);
721
722 dev_dbg(&fh->psys->adev->dev, "IOC_UNMAPBUF\n");
723
724 return ret;
725 }
726
727 static unsigned int ipu_psys_poll(struct file *file,
728 struct poll_table_struct *wait)
729 {
730 struct ipu_psys_fh *fh = file->private_data;
731 struct ipu_psys *psys = fh->psys;
732 unsigned int res = 0;
733
734 dev_dbg(&psys->adev->dev, "ipu psys poll\n");
735
736 poll_wait(file, &fh->wait, wait);
737
738 if (ipu_get_completed_kcmd(fh))
739 res = POLLIN;
740
741 dev_dbg(&psys->adev->dev, "ipu psys poll res %u\n", res);
742
743 return res;
744 }
745
746 static long ipu_get_manifest(struct ipu_psys_manifest *manifest,
747 struct ipu_psys_fh *fh)
748 {
749 struct ipu_psys *psys = fh->psys;
750 struct ipu_device *isp = psys->adev->isp;
751 struct ipu_cpd_client_pkg_hdr *client_pkg;
752 u32 entries;
753 void *host_fw_data;
754 dma_addr_t dma_fw_data;
755 u32 client_pkg_offset;
756
757 host_fw_data = (void *)isp->cpd_fw->data;
758 dma_fw_data = sg_dma_address(psys->fw_sgt.sgl);
759
760 entries = ipu_cpd_pkg_dir_get_num_entries(psys->pkg_dir);
761 if (!manifest || manifest->index > entries - 1) {
762 dev_err(&psys->adev->dev, "invalid argument\n");
763 return -EINVAL;
764 }
765
766 if (!ipu_cpd_pkg_dir_get_size(psys->pkg_dir, manifest->index) ||
767 ipu_cpd_pkg_dir_get_type(psys->pkg_dir, manifest->index) <
768 IPU_CPD_PKG_DIR_CLIENT_PG_TYPE) {
769 dev_dbg(&psys->adev->dev, "invalid pkg dir entry\n");
770 return -ENOENT;
771 }
772
773 client_pkg_offset = ipu_cpd_pkg_dir_get_address(psys->pkg_dir,
774 manifest->index);
775 client_pkg_offset -= dma_fw_data;
776
777 client_pkg = host_fw_data + client_pkg_offset;
778 manifest->size = client_pkg->pg_manifest_size;
779
780 if (!manifest->manifest)
781 return 0;
782
783 if (copy_to_user(manifest->manifest,
784 (uint8_t *)client_pkg + client_pkg->pg_manifest_offs,
785 manifest->size)) {
786 return -EFAULT;
787 }
788
789 return 0;
790 }
791
792 static long ipu_psys_ioctl(struct file *file, unsigned int cmd,
793 unsigned long arg)
794 {
795 union {
796 struct ipu_psys_buffer buf;
797 struct ipu_psys_command cmd;
798 struct ipu_psys_event ev;
799 struct ipu_psys_capability caps;
800 struct ipu_psys_manifest m;
801 } karg;
802 struct ipu_psys_fh *fh = file->private_data;
803 long err = 0;
804 void __user *up = (void __user *)arg;
805 bool copy = (cmd != IPU_IOC_MAPBUF && cmd != IPU_IOC_UNMAPBUF);
806
807 if (copy) {
808 if (_IOC_SIZE(cmd) > sizeof(karg))
809 return -ENOTTY;
810
811 if (_IOC_DIR(cmd) & _IOC_WRITE) {
812 err = copy_from_user(&karg, up, _IOC_SIZE(cmd));
813 if (err)
814 return -EFAULT;
815 }
816 }
817
818 switch (cmd) {
819 case IPU_IOC_MAPBUF:
820 err = ipu_psys_mapbuf(arg, fh);
821 break;
822 case IPU_IOC_UNMAPBUF:
823 err = ipu_psys_unmapbuf(arg, fh);
824 break;
825 case IPU_IOC_QUERYCAP:
826 karg.caps = fh->psys->caps;
827 break;
828 case IPU_IOC_GETBUF:
829 err = ipu_psys_getbuf(&karg.buf, fh);
830 break;
831 case IPU_IOC_PUTBUF:
832 err = ipu_psys_putbuf(&karg.buf, fh);
833 break;
834 case IPU_IOC_QCMD:
835 err = ipu_psys_kcmd_new(&karg.cmd, fh);
836 break;
837 case IPU_IOC_DQEVENT:
838 err = ipu_ioctl_dqevent(&karg.ev, fh, file->f_flags);
839 break;
840 case IPU_IOC_GET_MANIFEST:
841 err = ipu_get_manifest(&karg.m, fh);
842 break;
843 default:
844 err = -ENOTTY;
845 break;
846 }
847
848 if (err)
849 return err;
850
851 if (copy && _IOC_DIR(cmd) & _IOC_READ)
852 if (copy_to_user(up, &karg, _IOC_SIZE(cmd)))
853 return -EFAULT;
854
855 return 0;
856 }
857
858 static const struct file_operations ipu_psys_fops = {
859 .open = ipu_psys_open,
860 .release = ipu_psys_release,
861 .unlocked_ioctl = ipu_psys_ioctl,
862 #ifdef CONFIG_COMPAT
863 .compat_ioctl = ipu_psys_compat_ioctl32,
864 #endif
865 .poll = ipu_psys_poll,
866 .owner = THIS_MODULE,
867 };
868
869 static void ipu_psys_dev_release(struct device *dev)
870 {
871 }
872
873 #ifdef CONFIG_PM
874 static int psys_runtime_pm_resume(struct device *dev)
875 {
876 struct ipu_bus_device *adev = to_ipu_bus_device(dev);
877 struct ipu_psys *psys = ipu_bus_get_drvdata(adev);
878 unsigned long flags;
879 int retval;
880
881 if (!psys)
882 return 0;
883
884 /*
885 * In runtime autosuspend mode, if the psys is in power on state, no
886 * need to resume again.
887 */
888 spin_lock_irqsave(&psys->ready_lock, flags);
889 if (psys->ready) {
890 spin_unlock_irqrestore(&psys->ready_lock, flags);
891 return 0;
892 }
893 spin_unlock_irqrestore(&psys->ready_lock, flags);
894
895 retval = ipu_mmu_hw_init(adev->mmu);
896 if (retval)
897 return retval;
898
899 if (async_fw_init && !psys->fwcom) {
900 dev_err(dev,
901 "%s: asynchronous firmware init not finished, skipping\n",
902 __func__);
903 return 0;
904 }
905
906 if (!ipu_buttress_auth_done(adev->isp)) {
907 dev_dbg(dev, "%s: not yet authenticated, skipping\n", __func__);
908 return 0;
909 }
910
911 ipu_psys_setup_hw(psys);
912
913 ipu_psys_subdomains_power(psys, 1);
914 ipu_trace_restore(&psys->adev->dev);
915
916 ipu_configure_spc(adev->isp,
917 &psys->pdata->ipdata->hw_variant,
918 IPU_CPD_PKG_DIR_PSYS_SERVER_IDX,
919 psys->pdata->base, psys->pkg_dir,
920 psys->pkg_dir_dma_addr);
921
922 retval = ipu_fw_psys_open(psys);
923 if (retval) {
924 dev_err(&psys->adev->dev, "Failed to open abi.\n");
925 return retval;
926 }
927
928 spin_lock_irqsave(&psys->ready_lock, flags);
929 psys->ready = 1;
930 spin_unlock_irqrestore(&psys->ready_lock, flags);
931
932 return 0;
933 }
934
935 static int psys_runtime_pm_suspend(struct device *dev)
936 {
937 struct ipu_bus_device *adev = to_ipu_bus_device(dev);
938 struct ipu_psys *psys = ipu_bus_get_drvdata(adev);
939 unsigned long flags;
940 int rval;
941
942 if (!psys)
943 return 0;
944
945 if (!psys->ready)
946 return 0;
947
948 spin_lock_irqsave(&psys->ready_lock, flags);
949 psys->ready = 0;
950 spin_unlock_irqrestore(&psys->ready_lock, flags);
951
952 /*
953 * We can trace failure but better to not return an error.
954 * At suspend we are progressing towards psys power gated state.
955 * Any hang / failure inside psys will be forgotten soon.
956 */
957 rval = ipu_fw_psys_close(psys);
958 if (rval)
959 dev_err(dev, "Device close failure: %d\n", rval);
960
961 ipu_psys_subdomains_power(psys, 0);
962
963 ipu_mmu_hw_cleanup(adev->mmu);
964
965 return 0;
966 }
967
968 /* The following PM callbacks are needed to enable runtime PM in IPU PCI
969 * device resume, otherwise, runtime PM can't work in PCI resume from
970 * S3 state.
971 */
972 static int psys_resume(struct device *dev)
973 {
974 return 0;
975 }
976
977 static int psys_suspend(struct device *dev)
978 {
979 return 0;
980 }
981
982 static const struct dev_pm_ops psys_pm_ops = {
983 .runtime_suspend = psys_runtime_pm_suspend,
984 .runtime_resume = psys_runtime_pm_resume,
985 .suspend = psys_suspend,
986 .resume = psys_resume,
987 };
988
989 #define PSYS_PM_OPS (&psys_pm_ops)
990 #else
991 #define PSYS_PM_OPS NULL
992 #endif
993
994 static int cpd_fw_reload(struct ipu_device *isp)
995 {
996 struct ipu_psys *psys = ipu_bus_get_drvdata(isp->psys);
997 int rval;
998
999 if (!isp->secure_mode) {
1000 dev_warn(&isp->pdev->dev,
1001 "CPD firmware reload was only supported for secure mode.\n");
1002 return -EINVAL;
1003 }
1004
1005 if (isp->cpd_fw) {
1006 ipu_cpd_free_pkg_dir(isp->psys, psys->pkg_dir,
1007 psys->pkg_dir_dma_addr,
1008 psys->pkg_dir_size);
1009
1010 ipu_buttress_unmap_fw_image(isp->psys, &psys->fw_sgt);
1011 release_firmware(isp->cpd_fw);
1012 isp->cpd_fw = NULL;
1013 dev_info(&isp->pdev->dev, "Old FW removed\n");
1014 }
1015
1016 rval = request_cpd_fw(&isp->cpd_fw, isp->cpd_fw_name,
1017 &isp->pdev->dev);
1018 if (rval) {
1019 dev_err(&isp->pdev->dev, "Requesting firmware(%s) failed\n",
1020 isp->cpd_fw_name);
1021 return rval;
1022 }
1023
1024 rval = ipu_cpd_validate_cpd_file(isp, isp->cpd_fw->data,
1025 isp->cpd_fw->size);
1026 if (rval) {
1027 dev_err(&isp->pdev->dev, "Failed to validate cpd file\n");
1028 goto out_release_firmware;
1029 }
1030
1031 rval = ipu_buttress_map_fw_image(isp->psys, isp->cpd_fw, &psys->fw_sgt);
1032 if (rval)
1033 goto out_release_firmware;
1034
1035 psys->pkg_dir = ipu_cpd_create_pkg_dir(isp->psys,
1036 isp->cpd_fw->data,
1037 sg_dma_address(psys->fw_sgt.sgl),
1038 &psys->pkg_dir_dma_addr,
1039 &psys->pkg_dir_size);
1040
1041 if (!psys->pkg_dir) {
1042 rval = -EINVAL;
1043 goto out_unmap_fw_image;
1044 }
1045
1046 isp->pkg_dir = psys->pkg_dir;
1047 isp->pkg_dir_dma_addr = psys->pkg_dir_dma_addr;
1048 isp->pkg_dir_size = psys->pkg_dir_size;
1049
1050 if (!isp->secure_mode)
1051 return 0;
1052
1053 rval = ipu_fw_authenticate(isp, 1);
1054 if (rval)
1055 goto out_free_pkg_dir;
1056
1057 return 0;
1058
1059 out_free_pkg_dir:
1060 ipu_cpd_free_pkg_dir(isp->psys, psys->pkg_dir,
1061 psys->pkg_dir_dma_addr, psys->pkg_dir_size);
1062 out_unmap_fw_image:
1063 ipu_buttress_unmap_fw_image(isp->psys, &psys->fw_sgt);
1064 out_release_firmware:
1065 release_firmware(isp->cpd_fw);
1066 isp->cpd_fw = NULL;
1067
1068 return rval;
1069 }
1070
1071 #ifdef CONFIG_DEBUG_FS
1072 static int ipu_psys_icache_prefetch_sp_get(void *data, u64 *val)
1073 {
1074 struct ipu_psys *psys = data;
1075
1076 *val = psys->icache_prefetch_sp;
1077 return 0;
1078 }
1079
1080 static int ipu_psys_icache_prefetch_sp_set(void *data, u64 val)
1081 {
1082 struct ipu_psys *psys = data;
1083
1084 if (val != !!val)
1085 return -EINVAL;
1086
1087 psys->icache_prefetch_sp = val;
1088
1089 return 0;
1090 }
1091
1092 DEFINE_SIMPLE_ATTRIBUTE(psys_icache_prefetch_sp_fops,
1093 ipu_psys_icache_prefetch_sp_get,
1094 ipu_psys_icache_prefetch_sp_set, "%llu\n");
1095
1096 static int ipu_psys_icache_prefetch_isp_get(void *data, u64 *val)
1097 {
1098 struct ipu_psys *psys = data;
1099
1100 *val = psys->icache_prefetch_isp;
1101 return 0;
1102 }
1103
1104 static int ipu_psys_icache_prefetch_isp_set(void *data, u64 val)
1105 {
1106 struct ipu_psys *psys = data;
1107
1108 if (val != !!val)
1109 return -EINVAL;
1110
1111 psys->icache_prefetch_isp = val;
1112
1113 return 0;
1114 }
1115
1116 DEFINE_SIMPLE_ATTRIBUTE(psys_icache_prefetch_isp_fops,
1117 ipu_psys_icache_prefetch_isp_get,
1118 ipu_psys_icache_prefetch_isp_set, "%llu\n");
1119
1120 static int ipu_psys_init_debugfs(struct ipu_psys *psys)
1121 {
1122 struct dentry *file;
1123 struct dentry *dir;
1124
1125 dir = debugfs_create_dir("psys", psys->adev->isp->ipu_dir);
1126 if (IS_ERR(dir))
1127 return -ENOMEM;
1128
1129 file = debugfs_create_file("icache_prefetch_sp", 0600,
1130 dir, psys, &psys_icache_prefetch_sp_fops);
1131 if (IS_ERR(file))
1132 goto err;
1133
1134 file = debugfs_create_file("icache_prefetch_isp", 0600,
1135 dir, psys, &psys_icache_prefetch_isp_fops);
1136 if (IS_ERR(file))
1137 goto err;
1138
1139 psys->debugfsdir = dir;
1140
1141 #ifdef IPU_PSYS_GPC
1142 if (ipu_psys_gpc_init_debugfs(psys))
1143 return -ENOMEM;
1144 #endif
1145
1146 return 0;
1147 err:
1148 debugfs_remove_recursive(dir);
1149 return -ENOMEM;
1150 }
1151 #endif
1152
1153 static int ipu_psys_sched_cmd(void *ptr)
1154 {
1155 struct ipu_psys *psys = ptr;
1156 size_t pending = 0;
1157
1158 while (1) {
1159 wait_event_interruptible(psys->sched_cmd_wq,
1160 (kthread_should_stop() ||
1161 (pending =
1162 atomic_read(&psys->wakeup_count))));
1163
1164 if (kthread_should_stop())
1165 break;
1166
1167 if (pending == 0)
1168 continue;
1169
1170 mutex_lock(&psys->mutex);
1171 atomic_set(&psys->wakeup_count, 0);
1172 ipu_psys_run_next(psys);
1173 mutex_unlock(&psys->mutex);
1174 }
1175
1176 return 0;
1177 }
1178
1179 static void start_sp(struct ipu_bus_device *adev)
1180 {
1181 struct ipu_psys *psys = ipu_bus_get_drvdata(adev);
1182 void __iomem *spc_regs_base = psys->pdata->base +
1183 psys->pdata->ipdata->hw_variant.spc_offset;
1184 u32 val = 0;
1185
1186 val |= IPU_PSYS_SPC_STATUS_START |
1187 IPU_PSYS_SPC_STATUS_RUN |
1188 IPU_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE;
1189 val |= psys->icache_prefetch_sp ?
1190 IPU_PSYS_SPC_STATUS_ICACHE_PREFETCH : 0;
1191 writel(val, spc_regs_base + IPU_PSYS_REG_SPC_STATUS_CTRL);
1192 }
1193
1194 static int query_sp(struct ipu_bus_device *adev)
1195 {
1196 struct ipu_psys *psys = ipu_bus_get_drvdata(adev);
1197 void __iomem *spc_regs_base = psys->pdata->base +
1198 psys->pdata->ipdata->hw_variant.spc_offset;
1199 u32 val = readl(spc_regs_base + IPU_PSYS_REG_SPC_STATUS_CTRL);
1200
1201 /* return true when READY == 1, START == 0 */
1202 val &= IPU_PSYS_SPC_STATUS_READY | IPU_PSYS_SPC_STATUS_START;
1203
1204 return val == IPU_PSYS_SPC_STATUS_READY;
1205 }
1206
1207 static int ipu_psys_fw_init(struct ipu_psys *psys)
1208 {
1209 unsigned int size;
1210 struct ipu_fw_syscom_queue_config *queue_cfg;
1211 struct ipu_fw_syscom_queue_config fw_psys_event_queue_cfg[] = {
1212 {
1213 IPU_FW_PSYS_EVENT_QUEUE_SIZE,
1214 sizeof(struct ipu_fw_psys_event)
1215 }
1216 };
1217 struct ipu_fw_psys_srv_init server_init = {
1218 .ddr_pkg_dir_address = 0,
1219 .host_ddr_pkg_dir = NULL,
1220 .pkg_dir_size = 0,
1221 .icache_prefetch_sp = psys->icache_prefetch_sp,
1222 .icache_prefetch_isp = psys->icache_prefetch_isp,
1223 };
1224 struct ipu_fw_com_cfg fwcom = {
1225 .num_output_queues = IPU_FW_PSYS_N_PSYS_EVENT_QUEUE_ID,
1226 .output = fw_psys_event_queue_cfg,
1227 .specific_addr = &server_init,
1228 .specific_size = sizeof(server_init),
1229 .cell_start = start_sp,
1230 .cell_ready = query_sp,
1231 .buttress_boot_offset = SYSCOM_BUTTRESS_FW_PARAMS_PSYS_OFFSET,
1232 };
1233 int i;
1234
1235 size = IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID;
1236 if (ipu_ver == IPU_VER_6 || ipu_ver == IPU_VER_6EP)
1237 size = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID;
1238
1239 queue_cfg = devm_kzalloc(&psys->adev->dev, sizeof(*queue_cfg) * size,
1240 GFP_KERNEL);
1241 if (!queue_cfg)
1242 return -ENOMEM;
1243
1244 for (i = 0; i < size; i++) {
1245 queue_cfg[i].queue_size = IPU_FW_PSYS_CMD_QUEUE_SIZE;
1246 queue_cfg[i].token_size = sizeof(struct ipu_fw_psys_cmd);
1247 }
1248
1249 fwcom.input = queue_cfg;
1250 fwcom.num_input_queues = size;
1251 fwcom.dmem_addr = psys->pdata->ipdata->hw_variant.dmem_offset;
1252
1253 psys->fwcom = ipu_fw_com_prepare(&fwcom, psys->adev, psys->pdata->base);
1254 if (!psys->fwcom) {
1255 dev_err(&psys->adev->dev, "psys fw com prepare failed\n");
1256 return -EIO;
1257 }
1258
1259 return 0;
1260 }
1261
1262 static void run_fw_init_work(struct work_struct *work)
1263 {
1264 struct fw_init_task *task = (struct fw_init_task *)work;
1265 struct ipu_psys *psys = task->psys;
1266 int rval;
1267
1268 rval = ipu_psys_fw_init(psys);
1269
1270 if (rval) {
1271 dev_err(&psys->adev->dev, "FW init failed(%d)\n", rval);
1272 ipu_psys_remove(psys->adev);
1273 } else {
1274 dev_info(&psys->adev->dev, "FW init done\n");
1275 }
1276 }
1277
1278 static int ipu_psys_probe(struct ipu_bus_device *adev)
1279 {
1280 struct ipu_device *isp = adev->isp;
1281 struct ipu_psys_pg *kpg, *kpg0;
1282 struct ipu_psys *psys;
1283 unsigned int minor;
1284 int i, rval = -E2BIG;
1285
1286 rval = ipu_mmu_hw_init(adev->mmu);
1287 if (rval)
1288 return rval;
1289
1290 mutex_lock(&ipu_psys_mutex);
1291
1292 minor = find_next_zero_bit(ipu_psys_devices, IPU_PSYS_NUM_DEVICES, 0);
1293 if (minor == IPU_PSYS_NUM_DEVICES) {
1294 dev_err(&adev->dev, "too many devices\n");
1295 goto out_unlock;
1296 }
1297
1298 psys = devm_kzalloc(&adev->dev, sizeof(*psys), GFP_KERNEL);
1299 if (!psys) {
1300 rval = -ENOMEM;
1301 goto out_unlock;
1302 }
1303
1304 psys->adev = adev;
1305 psys->pdata = adev->pdata;
1306 psys->icache_prefetch_sp = 0;
1307
1308 psys->power_gating = 0;
1309
1310 ipu_trace_init(adev->isp, psys->pdata->base, &adev->dev,
1311 psys_trace_blocks);
1312
1313 cdev_init(&psys->cdev, &ipu_psys_fops);
1314 psys->cdev.owner = ipu_psys_fops.owner;
1315
1316 rval = cdev_add(&psys->cdev, MKDEV(MAJOR(ipu_psys_dev_t), minor), 1);
1317 if (rval) {
1318 dev_err(&adev->dev, "cdev_add failed (%d)\n", rval);
1319 goto out_unlock;
1320 }
1321
1322 set_bit(minor, ipu_psys_devices);
1323
1324 spin_lock_init(&psys->ready_lock);
1325 spin_lock_init(&psys->pgs_lock);
1326 psys->ready = 0;
1327 psys->timeout = IPU_PSYS_CMD_TIMEOUT_MS;
1328
1329 mutex_init(&psys->mutex);
1330 INIT_LIST_HEAD(&psys->fhs);
1331 INIT_LIST_HEAD(&psys->pgs);
1332 INIT_LIST_HEAD(&psys->started_kcmds_list);
1333 INIT_WORK(&psys->watchdog_work, ipu_psys_watchdog_work);
1334
1335 init_waitqueue_head(&psys->sched_cmd_wq);
1336 atomic_set(&psys->wakeup_count, 0);
1337 /*
1338 * Create a thread to schedule commands sent to IPU firmware.
1339 * The thread reduces the coupling between the command scheduler
1340 * and queueing commands from the user to driver.
1341 */
1342 psys->sched_cmd_thread = kthread_run(ipu_psys_sched_cmd, psys,
1343 "psys_sched_cmd");
1344
1345 if (IS_ERR(psys->sched_cmd_thread)) {
1346 psys->sched_cmd_thread = NULL;
1347 mutex_destroy(&psys->mutex);
1348 goto out_unlock;
1349 }
1350
1351 ipu_bus_set_drvdata(adev, psys);
1352
1353 rval = ipu_psys_resource_pool_init(&psys->resource_pool_started);
1354 if (rval < 0) {
1355 dev_err(&psys->dev,
1356 "unable to alloc process group resources\n");
1357 goto out_mutex_destroy;
1358 }
1359
1360 rval = ipu_psys_resource_pool_init(&psys->resource_pool_running);
1361 if (rval < 0) {
1362 dev_err(&psys->dev,
1363 "unable to alloc process group resources\n");
1364 goto out_resources_started_free;
1365 }
1366
1367 ipu6_psys_hw_res_variant_init();
1368 psys->pkg_dir = isp->pkg_dir;
1369 psys->pkg_dir_dma_addr = isp->pkg_dir_dma_addr;
1370 psys->pkg_dir_size = isp->pkg_dir_size;
1371 psys->fw_sgt = isp->fw_sgt;
1372
1373 /* allocate and map memory for process groups */
1374 for (i = 0; i < IPU_PSYS_PG_POOL_SIZE; i++) {
1375 kpg = kzalloc(sizeof(*kpg), GFP_KERNEL);
1376 if (!kpg)
1377 goto out_free_pgs;
1378 kpg->pg = dma_alloc_attrs(&adev->dev,
1379 IPU_PSYS_PG_MAX_SIZE,
1380 &kpg->pg_dma_addr,
1381 GFP_KERNEL, 0);
1382 if (!kpg->pg) {
1383 kfree(kpg);
1384 goto out_free_pgs;
1385 }
1386 kpg->size = IPU_PSYS_PG_MAX_SIZE;
1387 list_add(&kpg->list, &psys->pgs);
1388 }
1389
1390 psys->caps.pg_count = ipu_cpd_pkg_dir_get_num_entries(psys->pkg_dir);
1391
1392 dev_info(&adev->dev, "pkg_dir entry count:%d\n", psys->caps.pg_count);
1393 if (async_fw_init) {
1394 INIT_DELAYED_WORK((struct delayed_work *)&fw_init_task,
1395 run_fw_init_work);
1396 fw_init_task.psys = psys;
1397 schedule_delayed_work((struct delayed_work *)&fw_init_task, 0);
1398 } else {
1399 rval = ipu_psys_fw_init(psys);
1400 }
1401
1402 if (rval) {
1403 dev_err(&adev->dev, "FW init failed(%d)\n", rval);
1404 goto out_free_pgs;
1405 }
1406
1407 psys->dev.parent = &adev->dev;
1408 psys->dev.bus = &ipu_psys_bus;
1409 psys->dev.devt = MKDEV(MAJOR(ipu_psys_dev_t), minor);
1410 psys->dev.release = ipu_psys_dev_release;
1411 dev_set_name(&psys->dev, "ipu-psys%d", minor);
1412 rval = device_register(&psys->dev);
1413 if (rval < 0) {
1414 dev_err(&psys->dev, "psys device_register failed\n");
1415 goto out_release_fw_com;
1416 }
1417
1418 /* Add the hw stepping information to caps */
1419 strlcpy(psys->caps.dev_model, IPU_MEDIA_DEV_MODEL_NAME,
1420 sizeof(psys->caps.dev_model));
1421
1422 pm_runtime_set_autosuspend_delay(&psys->adev->dev,
1423 IPU_PSYS_AUTOSUSPEND_DELAY);
1424 pm_runtime_use_autosuspend(&psys->adev->dev);
1425 pm_runtime_mark_last_busy(&psys->adev->dev);
1426
1427 mutex_unlock(&ipu_psys_mutex);
1428
1429 #ifdef CONFIG_DEBUG_FS
1430 /* Debug fs failure is not fatal. */
1431 ipu_psys_init_debugfs(psys);
1432 #endif
1433
1434 adev->isp->cpd_fw_reload = &cpd_fw_reload;
1435
1436 dev_info(&adev->dev, "psys probe minor: %d\n", minor);
1437
1438 ipu_mmu_hw_cleanup(adev->mmu);
1439
1440 return 0;
1441
1442 out_release_fw_com:
1443 ipu_fw_com_release(psys->fwcom, 1);
1444 out_free_pgs:
1445 list_for_each_entry_safe(kpg, kpg0, &psys->pgs, list) {
1446 dma_free_attrs(&adev->dev, kpg->size, kpg->pg,
1447 kpg->pg_dma_addr, 0);
1448 kfree(kpg);
1449 }
1450
1451 ipu_psys_resource_pool_cleanup(&psys->resource_pool_running);
1452 out_resources_started_free:
1453 ipu_psys_resource_pool_cleanup(&psys->resource_pool_started);
1454 out_mutex_destroy:
1455 mutex_destroy(&psys->mutex);
1456 cdev_del(&psys->cdev);
1457 if (psys->sched_cmd_thread) {
1458 kthread_stop(psys->sched_cmd_thread);
1459 psys->sched_cmd_thread = NULL;
1460 }
1461 out_unlock:
1462 /* Safe to call even if the init is not called */
1463 ipu_trace_uninit(&adev->dev);
1464 mutex_unlock(&ipu_psys_mutex);
1465
1466 ipu_mmu_hw_cleanup(adev->mmu);
1467
1468 return rval;
1469 }
1470
1471 static void ipu_psys_remove(struct ipu_bus_device *adev)
1472 {
1473 struct ipu_device *isp = adev->isp;
1474 struct ipu_psys *psys = ipu_bus_get_drvdata(adev);
1475 struct ipu_psys_pg *kpg, *kpg0;
1476
1477 #ifdef CONFIG_DEBUG_FS
1478 if (isp->ipu_dir)
1479 debugfs_remove_recursive(psys->debugfsdir);
1480 #endif
1481
1482 flush_workqueue(IPU_PSYS_WORK_QUEUE);
1483
1484 if (psys->sched_cmd_thread) {
1485 kthread_stop(psys->sched_cmd_thread);
1486 psys->sched_cmd_thread = NULL;
1487 }
1488
1489 pm_runtime_dont_use_autosuspend(&psys->adev->dev);
1490
1491 mutex_lock(&ipu_psys_mutex);
1492
1493 list_for_each_entry_safe(kpg, kpg0, &psys->pgs, list) {
1494 dma_free_attrs(&adev->dev, kpg->size, kpg->pg,
1495 kpg->pg_dma_addr, 0);
1496 kfree(kpg);
1497 }
1498
1499 if (psys->fwcom && ipu_fw_com_release(psys->fwcom, 1))
1500 dev_err(&adev->dev, "fw com release failed.\n");
1501
1502 kfree(psys->server_init);
1503 kfree(psys->syscom_config);
1504
1505 ipu_trace_uninit(&adev->dev);
1506
1507 ipu_psys_resource_pool_cleanup(&psys->resource_pool_started);
1508 ipu_psys_resource_pool_cleanup(&psys->resource_pool_running);
1509
1510 device_unregister(&psys->dev);
1511
1512 clear_bit(MINOR(psys->cdev.dev), ipu_psys_devices);
1513 cdev_del(&psys->cdev);
1514
1515 mutex_unlock(&ipu_psys_mutex);
1516
1517 mutex_destroy(&psys->mutex);
1518
1519 dev_info(&adev->dev, "removed\n");
1520 }
1521
1522 static irqreturn_t psys_isr_threaded(struct ipu_bus_device *adev)
1523 {
1524 struct ipu_psys *psys = ipu_bus_get_drvdata(adev);
1525 void __iomem *base = psys->pdata->base;
1526 u32 status;
1527 int r;
1528
1529 mutex_lock(&psys->mutex);
1530 #ifdef CONFIG_PM
1531 r = pm_runtime_get_if_in_use(&psys->adev->dev);
1532 if (!r || WARN_ON_ONCE(r < 0)) {
1533 mutex_unlock(&psys->mutex);
1534 return IRQ_NONE;
1535 }
1536 #endif
1537
1538 status = readl(base + IPU_REG_PSYS_GPDEV_IRQ_STATUS);
1539 writel(status, base + IPU_REG_PSYS_GPDEV_IRQ_CLEAR);
1540
1541 if (status & IPU_PSYS_GPDEV_IRQ_FWIRQ(IPU_PSYS_GPDEV_FWIRQ0)) {
1542 writel(0, base + IPU_REG_PSYS_GPDEV_FWIRQ(0));
1543 ipu_psys_handle_events(psys);
1544 }
1545
1546 pm_runtime_mark_last_busy(&psys->adev->dev);
1547 pm_runtime_put_autosuspend(&psys->adev->dev);
1548 mutex_unlock(&psys->mutex);
1549
1550 return status ? IRQ_HANDLED : IRQ_NONE;
1551 }
1552
1553 static struct ipu_bus_driver ipu_psys_driver = {
1554 .probe = ipu_psys_probe,
1555 .remove = ipu_psys_remove,
1556 .isr_threaded = psys_isr_threaded,
1557 .wanted = IPU_PSYS_NAME,
1558 .drv = {
1559 .name = IPU_PSYS_NAME,
1560 .owner = THIS_MODULE,
1561 .pm = PSYS_PM_OPS,
1562 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
1563 },
1564 };
1565
1566 static int __init ipu_psys_init(void)
1567 {
1568 int rval = alloc_chrdev_region(&ipu_psys_dev_t, 0,
1569 IPU_PSYS_NUM_DEVICES, IPU_PSYS_NAME);
1570 if (rval) {
1571 pr_err("can't alloc psys chrdev region (%d)\n", rval);
1572 return rval;
1573 }
1574
1575 rval = bus_register(&ipu_psys_bus);
1576 if (rval) {
1577 pr_warn("can't register psys bus (%d)\n", rval);
1578 goto out_bus_register;
1579 }
1580
1581 ipu_bus_register_driver(&ipu_psys_driver);
1582
1583 return rval;
1584
1585 out_bus_register:
1586 unregister_chrdev_region(ipu_psys_dev_t, IPU_PSYS_NUM_DEVICES);
1587
1588 return rval;
1589 }
1590
1591 static void __exit ipu_psys_exit(void)
1592 {
1593 ipu_bus_unregister_driver(&ipu_psys_driver);
1594 bus_unregister(&ipu_psys_bus);
1595 unregister_chrdev_region(ipu_psys_dev_t, IPU_PSYS_NUM_DEVICES);
1596 }
1597
1598 static const struct pci_device_id ipu_pci_tbl[] = {
1599 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6_PCI_ID)},
1600 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6SE_PCI_ID)},
1601 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6EP_PCI_ID)},
1602 {0,}
1603 };
1604 MODULE_DEVICE_TABLE(pci, ipu_pci_tbl);
1605
1606 module_init(ipu_psys_init);
1607 module_exit(ipu_psys_exit);
1608
1609 MODULE_AUTHOR("Antti Laakso <antti.laakso@intel.com>");
1610 MODULE_AUTHOR("Bin Han <bin.b.han@intel.com>");
1611 MODULE_AUTHOR("Renwei Wu <renwei.wu@intel.com>");
1612 MODULE_AUTHOR("Jianxu Zheng <jian.xu.zheng@intel.com>");
1613 MODULE_AUTHOR("Xia Wu <xia.wu@intel.com>");
1614 MODULE_AUTHOR("Bingbu Cao <bingbu.cao@intel.com>");
1615 MODULE_AUTHOR("Zaikuo Wang <zaikuo.wang@intel.com>");
1616 MODULE_AUTHOR("Yunliang Ding <yunliang.ding@intel.com>");
1617 MODULE_LICENSE("GPL");
1618 MODULE_DESCRIPTION("Intel ipu processing system driver");