1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2013 - 2020 Intel Corporation
4 #include <linux/debugfs.h>
5 #include <linux/delay.h>
6 #include <linux/device.h>
7 #include <linux/dma-buf.h>
8 #include <linux/firmware.h>
10 #include <linux/highmem.h>
11 #include <linux/init_task.h>
12 #include <linux/kthread.h>
14 #include <linux/module.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/version.h>
17 #include <linux/poll.h>
18 #include <uapi/linux/sched/types.h>
19 #include <linux/uaccess.h>
20 #include <linux/vmalloc.h>
21 #include <linux/dma-mapping.h>
23 #include <uapi/linux/ipu-psys.h>
28 #include "ipu-platform.h"
29 #include "ipu-buttress.h"
31 #include "ipu-fw-psys.h"
33 #include "ipu-platform-psys.h"
34 #include "ipu-platform-regs.h"
35 #include "ipu-fw-com.h"
37 static bool async_fw_init
;
38 module_param(async_fw_init
, bool, 0664);
39 MODULE_PARM_DESC(async_fw_init
, "Enable asynchronous firmware initialization");
41 #define IPU_PSYS_NUM_DEVICES 4
42 #define IPU_PSYS_AUTOSUSPEND_DELAY 2000
45 static int psys_runtime_pm_resume(struct device
*dev
);
46 static int psys_runtime_pm_suspend(struct device
*dev
);
48 #define pm_runtime_dont_use_autosuspend(d)
49 #define pm_runtime_use_autosuspend(d)
50 #define pm_runtime_set_autosuspend_delay(d, f) 0
51 #define pm_runtime_get_sync(d) 0
52 #define pm_runtime_put(d) 0
53 #define pm_runtime_put_sync(d) 0
54 #define pm_runtime_put_noidle(d) 0
55 #define pm_runtime_put_autosuspend(d) 0
58 static dev_t ipu_psys_dev_t
;
59 static DECLARE_BITMAP(ipu_psys_devices
, IPU_PSYS_NUM_DEVICES
);
60 static DEFINE_MUTEX(ipu_psys_mutex
);
62 static struct fw_init_task
{
63 struct delayed_work work
;
64 struct ipu_psys
*psys
;
67 static void ipu_psys_remove(struct ipu_bus_device
*adev
);
69 static struct bus_type ipu_psys_bus
= {
70 .name
= IPU_PSYS_NAME
,
73 struct ipu_psys_pg
*__get_pg_buf(struct ipu_psys
*psys
, size_t pg_size
)
75 struct ipu_psys_pg
*kpg
;
78 spin_lock_irqsave(&psys
->pgs_lock
, flags
);
79 list_for_each_entry(kpg
, &psys
->pgs
, list
) {
80 if (!kpg
->pg_size
&& kpg
->size
>= pg_size
) {
81 kpg
->pg_size
= pg_size
;
82 spin_unlock_irqrestore(&psys
->pgs_lock
, flags
);
86 spin_unlock_irqrestore(&psys
->pgs_lock
, flags
);
87 /* no big enough buffer available, allocate new one */
88 kpg
= kzalloc(sizeof(*kpg
), GFP_KERNEL
);
92 kpg
->pg
= dma_alloc_attrs(&psys
->adev
->dev
, pg_size
,
93 &kpg
->pg_dma_addr
, GFP_KERNEL
, 0);
99 kpg
->pg_size
= pg_size
;
101 spin_lock_irqsave(&psys
->pgs_lock
, flags
);
102 list_add(&kpg
->list
, &psys
->pgs
);
103 spin_unlock_irqrestore(&psys
->pgs_lock
, flags
);
108 static int ipu_psys_unmapbuf_locked(int fd
, struct ipu_psys_fh
*fh
,
109 struct ipu_psys_kbuffer
*kbuf
);
110 struct ipu_psys_kbuffer
*ipu_psys_lookup_kbuffer(struct ipu_psys_fh
*fh
, int fd
)
112 struct ipu_psys_kbuffer
*kbuf
;
114 list_for_each_entry(kbuf
, &fh
->bufmap
, list
) {
122 struct ipu_psys_kbuffer
*
123 ipu_psys_lookup_kbuffer_by_kaddr(struct ipu_psys_fh
*fh
, void *kaddr
)
125 struct ipu_psys_kbuffer
*kbuffer
;
127 list_for_each_entry(kbuffer
, &fh
->bufmap
, list
) {
128 if (kbuffer
->kaddr
== kaddr
)
135 static int ipu_psys_get_userpages(struct ipu_dma_buf_attach
*attach
)
137 struct vm_area_struct
*vma
;
138 unsigned long start
, end
;
139 int npages
, array_size
;
141 struct sg_table
*sgt
;
145 start
= (unsigned long)attach
->userptr
;
146 end
= PAGE_ALIGN(start
+ attach
->len
);
147 npages
= (end
- (start
& PAGE_MASK
)) >> PAGE_SHIFT
;
148 array_size
= npages
* sizeof(struct page
*);
150 sgt
= kzalloc(sizeof(*sgt
), GFP_KERNEL
);
154 if (attach
->npages
!= 0) {
155 pages
= attach
->pages
;
156 npages
= attach
->npages
;
157 attach
->vma_is_io
= 1;
161 pages
= kvzalloc(array_size
, GFP_KERNEL
);
165 mmap_read_lock(current
->mm
);
166 vma
= find_vma(current
->mm
, start
);
173 * For buffers from Gralloc, VM_PFNMAP is expected,
174 * but VM_IO is set. Possibly bug in Gralloc.
176 attach
->vma_is_io
= vma
->vm_flags
& (VM_IO
| VM_PFNMAP
);
178 if (attach
->vma_is_io
) {
179 unsigned long io_start
= start
;
181 if (vma
->vm_end
< start
+ attach
->len
) {
183 "vma at %lu is too small for %llu bytes\n",
189 for (nr
= 0; nr
< npages
; nr
++, io_start
+= PAGE_SIZE
) {
192 ret
= follow_pfn(vma
, io_start
, &pfn
);
195 pages
[nr
] = pfn_to_page(pfn
);
198 nr
= get_user_pages(start
& PAGE_MASK
, npages
,
204 mmap_read_unlock(current
->mm
);
206 attach
->pages
= pages
;
207 attach
->npages
= npages
;
210 ret
= sg_alloc_table_from_pages(sgt
, pages
, npages
,
211 start
& ~PAGE_MASK
, attach
->len
,
221 mmap_read_unlock(current
->mm
);
223 if (!attach
->vma_is_io
)
225 put_page(pages
[--nr
]);
227 if (array_size
<= PAGE_SIZE
)
234 dev_err(attach
->dev
, "failed to get userpages:%d\n", ret
);
239 static void ipu_psys_put_userpages(struct ipu_dma_buf_attach
*attach
)
241 if (!attach
|| !attach
->userptr
|| !attach
->sgt
)
244 if (!attach
->vma_is_io
) {
245 int i
= attach
->npages
;
248 set_page_dirty_lock(attach
->pages
[i
]);
249 put_page(attach
->pages
[i
]);
253 kvfree(attach
->pages
);
255 sg_free_table(attach
->sgt
);
260 static int ipu_dma_buf_attach(struct dma_buf
*dbuf
,
261 struct dma_buf_attachment
*attach
)
263 struct ipu_psys_kbuffer
*kbuf
= dbuf
->priv
;
264 struct ipu_dma_buf_attach
*ipu_attach
;
266 ipu_attach
= kzalloc(sizeof(*ipu_attach
), GFP_KERNEL
);
270 ipu_attach
->len
= kbuf
->len
;
271 ipu_attach
->userptr
= kbuf
->userptr
;
273 attach
->priv
= ipu_attach
;
277 static void ipu_dma_buf_detach(struct dma_buf
*dbuf
,
278 struct dma_buf_attachment
*attach
)
280 struct ipu_dma_buf_attach
*ipu_attach
= attach
->priv
;
286 static struct sg_table
*ipu_dma_buf_map(struct dma_buf_attachment
*attach
,
287 enum dma_data_direction dir
)
289 struct ipu_dma_buf_attach
*ipu_attach
= attach
->priv
;
293 ret
= ipu_psys_get_userpages(ipu_attach
);
297 attrs
= DMA_ATTR_SKIP_CPU_SYNC
;
298 ret
= dma_map_sg_attrs(attach
->dev
, ipu_attach
->sgt
->sgl
,
299 ipu_attach
->sgt
->orig_nents
, dir
, attrs
);
300 if (ret
< ipu_attach
->sgt
->orig_nents
) {
301 ipu_psys_put_userpages(ipu_attach
);
302 dev_dbg(attach
->dev
, "buf map failed\n");
304 return ERR_PTR(-EIO
);
308 * Initial cache flush to avoid writing dirty pages for buffers which
309 * are later marked as IPU_BUFFER_FLAG_NO_FLUSH.
311 dma_sync_sg_for_device(attach
->dev
, ipu_attach
->sgt
->sgl
,
312 ipu_attach
->sgt
->orig_nents
, DMA_BIDIRECTIONAL
);
314 return ipu_attach
->sgt
;
317 static void ipu_dma_buf_unmap(struct dma_buf_attachment
*attach
,
318 struct sg_table
*sg
, enum dma_data_direction dir
)
320 struct ipu_dma_buf_attach
*ipu_attach
= attach
->priv
;
322 dma_unmap_sg(attach
->dev
, sg
->sgl
, sg
->orig_nents
, dir
);
323 ipu_psys_put_userpages(ipu_attach
);
326 static int ipu_dma_buf_mmap(struct dma_buf
*dbuf
, struct vm_area_struct
*vma
)
331 static void ipu_dma_buf_release(struct dma_buf
*buf
)
333 struct ipu_psys_kbuffer
*kbuf
= buf
->priv
;
338 if (kbuf
->db_attach
) {
339 dev_dbg(kbuf
->db_attach
->dev
,
340 "releasing buffer %d\n", kbuf
->fd
);
341 ipu_psys_put_userpages(kbuf
->db_attach
->priv
);
346 static int ipu_dma_buf_begin_cpu_access(struct dma_buf
*dma_buf
,
347 enum dma_data_direction dir
)
352 static int ipu_dma_buf_vmap(struct dma_buf
*dmabuf
, struct dma_buf_map
*map
)
354 struct dma_buf_attachment
*attach
;
355 struct ipu_dma_buf_attach
*ipu_attach
;
357 if (list_empty(&dmabuf
->attachments
))
360 attach
= list_last_entry(&dmabuf
->attachments
,
361 struct dma_buf_attachment
, node
);
362 ipu_attach
= attach
->priv
;
364 if (!ipu_attach
|| !ipu_attach
->pages
|| !ipu_attach
->npages
)
367 map
->vaddr
= vm_map_ram(ipu_attach
->pages
, ipu_attach
->npages
, 0);
368 map
->is_iomem
= false;
375 static void ipu_dma_buf_vunmap(struct dma_buf
*dmabuf
, struct dma_buf_map
*map
)
377 struct dma_buf_attachment
*attach
;
378 struct ipu_dma_buf_attach
*ipu_attach
;
380 if (WARN_ON(list_empty(&dmabuf
->attachments
)))
383 attach
= list_last_entry(&dmabuf
->attachments
,
384 struct dma_buf_attachment
, node
);
385 ipu_attach
= attach
->priv
;
387 if (WARN_ON(!ipu_attach
|| !ipu_attach
->pages
|| !ipu_attach
->npages
))
390 vm_unmap_ram(map
->vaddr
, ipu_attach
->npages
);
393 struct dma_buf_ops ipu_dma_buf_ops
= {
394 .attach
= ipu_dma_buf_attach
,
395 .detach
= ipu_dma_buf_detach
,
396 .map_dma_buf
= ipu_dma_buf_map
,
397 .unmap_dma_buf
= ipu_dma_buf_unmap
,
398 .release
= ipu_dma_buf_release
,
399 .begin_cpu_access
= ipu_dma_buf_begin_cpu_access
,
400 .mmap
= ipu_dma_buf_mmap
,
401 .vmap
= ipu_dma_buf_vmap
,
402 .vunmap
= ipu_dma_buf_vunmap
,
405 static int ipu_psys_open(struct inode
*inode
, struct file
*file
)
407 struct ipu_psys
*psys
= inode_to_ipu_psys(inode
);
408 struct ipu_device
*isp
= psys
->adev
->isp
;
409 struct ipu_psys_fh
*fh
;
415 fh
= kzalloc(sizeof(*fh
), GFP_KERNEL
);
421 file
->private_data
= fh
;
423 mutex_init(&fh
->mutex
);
424 INIT_LIST_HEAD(&fh
->bufmap
);
425 init_waitqueue_head(&fh
->wait
);
427 rval
= ipu_psys_fh_init(fh
);
431 mutex_lock(&psys
->mutex
);
432 list_add_tail(&fh
->list
, &psys
->fhs
);
433 mutex_unlock(&psys
->mutex
);
438 mutex_destroy(&fh
->mutex
);
443 static inline void ipu_psys_kbuf_unmap(struct ipu_psys_kbuffer
*kbuf
)
450 struct dma_buf_map dmap
;
452 dma_buf_map_set_vaddr(&dmap
, kbuf
->kaddr
);
453 dma_buf_vunmap(kbuf
->dbuf
, &dmap
);
456 dma_buf_unmap_attachment(kbuf
->db_attach
,
460 dma_buf_detach(kbuf
->dbuf
, kbuf
->db_attach
);
461 dma_buf_put(kbuf
->dbuf
);
463 kbuf
->db_attach
= NULL
;
468 static int ipu_psys_release(struct inode
*inode
, struct file
*file
)
470 struct ipu_psys
*psys
= inode_to_ipu_psys(inode
);
471 struct ipu_psys_fh
*fh
= file
->private_data
;
472 struct ipu_psys_kbuffer
*kbuf
, *kbuf0
;
473 struct dma_buf_attachment
*db_attach
;
475 mutex_lock(&fh
->mutex
);
476 /* clean up buffers */
477 if (!list_empty(&fh
->bufmap
)) {
478 list_for_each_entry_safe(kbuf
, kbuf0
, &fh
->bufmap
, list
) {
479 list_del(&kbuf
->list
);
480 db_attach
= kbuf
->db_attach
;
482 /* Unmap and release buffers */
483 if (kbuf
->dbuf
&& db_attach
) {
485 ipu_psys_kbuf_unmap(kbuf
);
488 ipu_psys_put_userpages(db_attach
->priv
);
493 mutex_unlock(&fh
->mutex
);
495 mutex_lock(&psys
->mutex
);
498 mutex_unlock(&psys
->mutex
);
499 ipu_psys_fh_deinit(fh
);
501 mutex_lock(&psys
->mutex
);
502 if (list_empty(&psys
->fhs
))
503 psys
->power_gating
= 0;
504 mutex_unlock(&psys
->mutex
);
505 mutex_destroy(&fh
->mutex
);
511 static int ipu_psys_getbuf(struct ipu_psys_buffer
*buf
, struct ipu_psys_fh
*fh
)
513 struct ipu_psys_kbuffer
*kbuf
;
514 struct ipu_psys
*psys
= fh
->psys
;
516 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
517 struct dma_buf
*dbuf
;
520 if (!buf
->base
.userptr
) {
521 dev_err(&psys
->adev
->dev
, "Buffer allocation not supported\n");
525 kbuf
= kzalloc(sizeof(*kbuf
), GFP_KERNEL
);
529 kbuf
->len
= buf
->len
;
530 kbuf
->userptr
= buf
->base
.userptr
;
531 kbuf
->flags
= buf
->flags
;
533 exp_info
.ops
= &ipu_dma_buf_ops
;
534 exp_info
.size
= kbuf
->len
;
535 exp_info
.flags
= O_RDWR
;
536 exp_info
.priv
= kbuf
;
538 dbuf
= dma_buf_export(&exp_info
);
541 return PTR_ERR(dbuf
);
544 ret
= dma_buf_fd(dbuf
, 0);
553 kbuf
->flags
= buf
->flags
&= ~IPU_BUFFER_FLAG_USERPTR
;
554 kbuf
->flags
= buf
->flags
|= IPU_BUFFER_FLAG_DMA_HANDLE
;
556 mutex_lock(&fh
->mutex
);
557 list_add(&kbuf
->list
, &fh
->bufmap
);
558 mutex_unlock(&fh
->mutex
);
560 dev_dbg(&psys
->adev
->dev
, "IOC_GETBUF: userptr %p size %llu to fd %d",
561 buf
->base
.userptr
, buf
->len
, buf
->base
.fd
);
566 static int ipu_psys_putbuf(struct ipu_psys_buffer
*buf
, struct ipu_psys_fh
*fh
)
571 int ipu_psys_mapbuf_locked(int fd
, struct ipu_psys_fh
*fh
,
572 struct ipu_psys_kbuffer
*kbuf
)
574 struct ipu_psys
*psys
= fh
->psys
;
575 struct dma_buf
*dbuf
;
576 struct dma_buf_map dmap
;
579 dbuf
= dma_buf_get(fd
);
584 /* This fd isn't generated by ipu_psys_getbuf, it
585 * is a new fd. Create a new kbuf item for this fd, and
586 * add this kbuf to bufmap list.
588 kbuf
= kzalloc(sizeof(*kbuf
), GFP_KERNEL
);
594 list_add(&kbuf
->list
, &fh
->bufmap
);
597 /* fd valid and found, need remap */
598 if (kbuf
->dbuf
&& (kbuf
->dbuf
!= dbuf
|| kbuf
->len
!= dbuf
->size
)) {
599 dev_dbg(&psys
->adev
->dev
,
600 "dmabuf fd %d with kbuf %p changed, need remap.\n",
602 ret
= ipu_psys_unmapbuf_locked(fd
, fh
, kbuf
);
606 kbuf
= ipu_psys_lookup_kbuffer(fh
, fd
);
607 /* changed external dmabuf */
609 kbuf
= kzalloc(sizeof(*kbuf
), GFP_KERNEL
);
614 list_add(&kbuf
->list
, &fh
->bufmap
);
619 dev_dbg(&psys
->adev
->dev
, "fd %d has been mapped!\n", fd
);
627 kbuf
->len
= kbuf
->dbuf
->size
;
631 kbuf
->db_attach
= dma_buf_attach(kbuf
->dbuf
, &psys
->adev
->dev
);
632 if (IS_ERR(kbuf
->db_attach
)) {
633 ret
= PTR_ERR(kbuf
->db_attach
);
634 dev_dbg(&psys
->adev
->dev
, "dma buf attach failed\n");
638 kbuf
->sgt
= dma_buf_map_attachment(kbuf
->db_attach
, DMA_BIDIRECTIONAL
);
639 if (IS_ERR_OR_NULL(kbuf
->sgt
)) {
642 dev_dbg(&psys
->adev
->dev
, "dma buf map attachment failed\n");
646 kbuf
->dma_addr
= sg_dma_address(kbuf
->sgt
->sgl
);
648 ret
= dma_buf_vmap(kbuf
->dbuf
, &dmap
);
650 dev_dbg(&psys
->adev
->dev
, "dma buf vmap failed\n");
653 kbuf
->kaddr
= dmap
.vaddr
;
655 dev_dbg(&psys
->adev
->dev
, "%s kbuf %p fd %d with len %llu mapped\n",
656 __func__
, kbuf
, fd
, kbuf
->len
);
664 ipu_psys_kbuf_unmap(kbuf
);
666 list_del(&kbuf
->list
);
674 dev_err(&psys
->adev
->dev
, "%s failed for fd %d\n", __func__
, fd
);
678 static long ipu_psys_mapbuf(int fd
, struct ipu_psys_fh
*fh
)
681 struct ipu_psys_kbuffer
*kbuf
;
683 mutex_lock(&fh
->mutex
);
684 kbuf
= ipu_psys_lookup_kbuffer(fh
, fd
);
685 ret
= ipu_psys_mapbuf_locked(fd
, fh
, kbuf
);
686 mutex_unlock(&fh
->mutex
);
688 dev_dbg(&fh
->psys
->adev
->dev
, "IOC_MAPBUF ret %ld\n", ret
);
693 static int ipu_psys_unmapbuf_locked(int fd
, struct ipu_psys_fh
*fh
,
694 struct ipu_psys_kbuffer
*kbuf
)
696 struct ipu_psys
*psys
= fh
->psys
;
698 if (!kbuf
|| fd
!= kbuf
->fd
) {
699 dev_err(&psys
->adev
->dev
, "invalid kbuffer\n");
703 /* From now on it is not safe to use this kbuffer */
704 ipu_psys_kbuf_unmap(kbuf
);
706 list_del(&kbuf
->list
);
711 dev_dbg(&psys
->adev
->dev
, "%s fd %d unmapped\n", __func__
, fd
);
716 static long ipu_psys_unmapbuf(int fd
, struct ipu_psys_fh
*fh
)
718 struct ipu_psys_kbuffer
*kbuf
;
721 mutex_lock(&fh
->mutex
);
722 kbuf
= ipu_psys_lookup_kbuffer(fh
, fd
);
724 dev_err(&fh
->psys
->adev
->dev
,
725 "buffer with fd %d not found\n", fd
);
726 mutex_unlock(&fh
->mutex
);
729 ret
= ipu_psys_unmapbuf_locked(fd
, fh
, kbuf
);
730 mutex_unlock(&fh
->mutex
);
732 dev_dbg(&fh
->psys
->adev
->dev
, "IOC_UNMAPBUF\n");
737 static unsigned int ipu_psys_poll(struct file
*file
,
738 struct poll_table_struct
*wait
)
740 struct ipu_psys_fh
*fh
= file
->private_data
;
741 struct ipu_psys
*psys
= fh
->psys
;
742 unsigned int res
= 0;
744 dev_dbg(&psys
->adev
->dev
, "ipu psys poll\n");
746 poll_wait(file
, &fh
->wait
, wait
);
748 if (ipu_get_completed_kcmd(fh
))
751 dev_dbg(&psys
->adev
->dev
, "ipu psys poll res %u\n", res
);
756 static long ipu_get_manifest(struct ipu_psys_manifest
*manifest
,
757 struct ipu_psys_fh
*fh
)
759 struct ipu_psys
*psys
= fh
->psys
;
760 struct ipu_device
*isp
= psys
->adev
->isp
;
761 struct ipu_cpd_client_pkg_hdr
*client_pkg
;
764 dma_addr_t dma_fw_data
;
765 u32 client_pkg_offset
;
767 host_fw_data
= (void *)isp
->cpd_fw
->data
;
768 dma_fw_data
= sg_dma_address(psys
->fw_sgt
.sgl
);
770 entries
= ipu_cpd_pkg_dir_get_num_entries(psys
->pkg_dir
);
771 if (!manifest
|| manifest
->index
> entries
- 1) {
772 dev_err(&psys
->adev
->dev
, "invalid argument\n");
776 if (!ipu_cpd_pkg_dir_get_size(psys
->pkg_dir
, manifest
->index
) ||
777 ipu_cpd_pkg_dir_get_type(psys
->pkg_dir
, manifest
->index
) <
778 IPU_CPD_PKG_DIR_CLIENT_PG_TYPE
) {
779 dev_dbg(&psys
->adev
->dev
, "invalid pkg dir entry\n");
783 client_pkg_offset
= ipu_cpd_pkg_dir_get_address(psys
->pkg_dir
,
785 client_pkg_offset
-= dma_fw_data
;
787 client_pkg
= host_fw_data
+ client_pkg_offset
;
788 manifest
->size
= client_pkg
->pg_manifest_size
;
790 if (!manifest
->manifest
)
793 if (copy_to_user(manifest
->manifest
,
794 (uint8_t *)client_pkg
+ client_pkg
->pg_manifest_offs
,
802 static long ipu_psys_ioctl(struct file
*file
, unsigned int cmd
,
806 struct ipu_psys_buffer buf
;
807 struct ipu_psys_command cmd
;
808 struct ipu_psys_event ev
;
809 struct ipu_psys_capability caps
;
810 struct ipu_psys_manifest m
;
812 struct ipu_psys_fh
*fh
= file
->private_data
;
814 void __user
*up
= (void __user
*)arg
;
815 bool copy
= (cmd
!= IPU_IOC_MAPBUF
&& cmd
!= IPU_IOC_UNMAPBUF
);
818 if (_IOC_SIZE(cmd
) > sizeof(karg
))
821 if (_IOC_DIR(cmd
) & _IOC_WRITE
) {
822 err
= copy_from_user(&karg
, up
, _IOC_SIZE(cmd
));
830 err
= ipu_psys_mapbuf(arg
, fh
);
832 case IPU_IOC_UNMAPBUF
:
833 err
= ipu_psys_unmapbuf(arg
, fh
);
835 case IPU_IOC_QUERYCAP
:
836 karg
.caps
= fh
->psys
->caps
;
839 err
= ipu_psys_getbuf(&karg
.buf
, fh
);
842 err
= ipu_psys_putbuf(&karg
.buf
, fh
);
845 err
= ipu_psys_kcmd_new(&karg
.cmd
, fh
);
847 case IPU_IOC_DQEVENT
:
848 err
= ipu_ioctl_dqevent(&karg
.ev
, fh
, file
->f_flags
);
850 case IPU_IOC_GET_MANIFEST
:
851 err
= ipu_get_manifest(&karg
.m
, fh
);
861 if (copy
&& _IOC_DIR(cmd
) & _IOC_READ
)
862 if (copy_to_user(up
, &karg
, _IOC_SIZE(cmd
)))
868 static const struct file_operations ipu_psys_fops
= {
869 .open
= ipu_psys_open
,
870 .release
= ipu_psys_release
,
871 .unlocked_ioctl
= ipu_psys_ioctl
,
873 .compat_ioctl
= ipu_psys_compat_ioctl32
,
875 .poll
= ipu_psys_poll
,
876 .owner
= THIS_MODULE
,
879 static void ipu_psys_dev_release(struct device
*dev
)
884 static int psys_runtime_pm_resume(struct device
*dev
)
886 struct ipu_bus_device
*adev
= to_ipu_bus_device(dev
);
887 struct ipu_psys
*psys
= ipu_bus_get_drvdata(adev
);
895 * In runtime autosuspend mode, if the psys is in power on state, no
896 * need to resume again.
898 spin_lock_irqsave(&psys
->ready_lock
, flags
);
900 spin_unlock_irqrestore(&psys
->ready_lock
, flags
);
903 spin_unlock_irqrestore(&psys
->ready_lock
, flags
);
905 retval
= ipu_mmu_hw_init(adev
->mmu
);
909 if (async_fw_init
&& !psys
->fwcom
) {
911 "%s: asynchronous firmware init not finished, skipping\n",
916 if (!ipu_buttress_auth_done(adev
->isp
)) {
917 dev_dbg(dev
, "%s: not yet authenticated, skipping\n", __func__
);
921 ipu_psys_setup_hw(psys
);
923 ipu_psys_subdomains_power(psys
, 1);
924 ipu_trace_restore(&psys
->adev
->dev
);
926 ipu_configure_spc(adev
->isp
,
927 &psys
->pdata
->ipdata
->hw_variant
,
928 IPU_CPD_PKG_DIR_PSYS_SERVER_IDX
,
929 psys
->pdata
->base
, psys
->pkg_dir
,
930 psys
->pkg_dir_dma_addr
);
932 retval
= ipu_fw_psys_open(psys
);
934 dev_err(&psys
->adev
->dev
, "Failed to open abi.\n");
938 spin_lock_irqsave(&psys
->ready_lock
, flags
);
940 spin_unlock_irqrestore(&psys
->ready_lock
, flags
);
945 static int psys_runtime_pm_suspend(struct device
*dev
)
947 struct ipu_bus_device
*adev
= to_ipu_bus_device(dev
);
948 struct ipu_psys
*psys
= ipu_bus_get_drvdata(adev
);
958 spin_lock_irqsave(&psys
->ready_lock
, flags
);
960 spin_unlock_irqrestore(&psys
->ready_lock
, flags
);
963 * We can trace failure but better to not return an error.
964 * At suspend we are progressing towards psys power gated state.
965 * Any hang / failure inside psys will be forgotten soon.
967 rval
= ipu_fw_psys_close(psys
);
969 dev_err(dev
, "Device close failure: %d\n", rval
);
971 ipu_psys_subdomains_power(psys
, 0);
973 ipu_mmu_hw_cleanup(adev
->mmu
);
978 /* The following PM callbacks are needed to enable runtime PM in IPU PCI
979 * device resume, otherwise, runtime PM can't work in PCI resume from
982 static int psys_resume(struct device
*dev
)
987 static int psys_suspend(struct device
*dev
)
992 static const struct dev_pm_ops psys_pm_ops
= {
993 .runtime_suspend
= psys_runtime_pm_suspend
,
994 .runtime_resume
= psys_runtime_pm_resume
,
995 .suspend
= psys_suspend
,
996 .resume
= psys_resume
,
999 #define PSYS_PM_OPS (&psys_pm_ops)
1001 #define PSYS_PM_OPS NULL
1004 static int cpd_fw_reload(struct ipu_device
*isp
)
1006 struct ipu_psys
*psys
= ipu_bus_get_drvdata(isp
->psys
);
1009 if (!isp
->secure_mode
) {
1010 dev_warn(&isp
->pdev
->dev
,
1011 "CPD firmware reload was only supported for secure mode.\n");
1016 ipu_cpd_free_pkg_dir(isp
->psys
, psys
->pkg_dir
,
1017 psys
->pkg_dir_dma_addr
,
1018 psys
->pkg_dir_size
);
1020 ipu_buttress_unmap_fw_image(isp
->psys
, &psys
->fw_sgt
);
1021 release_firmware(isp
->cpd_fw
);
1023 dev_info(&isp
->pdev
->dev
, "Old FW removed\n");
1026 rval
= request_cpd_fw(&isp
->cpd_fw
, isp
->cpd_fw_name
,
1029 dev_err(&isp
->pdev
->dev
, "Requesting firmware(%s) failed\n",
1034 rval
= ipu_cpd_validate_cpd_file(isp
, isp
->cpd_fw
->data
,
1037 dev_err(&isp
->pdev
->dev
, "Failed to validate cpd file\n");
1038 goto out_release_firmware
;
1041 rval
= ipu_buttress_map_fw_image(isp
->psys
, isp
->cpd_fw
, &psys
->fw_sgt
);
1043 goto out_release_firmware
;
1045 psys
->pkg_dir
= ipu_cpd_create_pkg_dir(isp
->psys
,
1047 sg_dma_address(psys
->fw_sgt
.sgl
),
1048 &psys
->pkg_dir_dma_addr
,
1049 &psys
->pkg_dir_size
);
1051 if (!psys
->pkg_dir
) {
1053 goto out_unmap_fw_image
;
1056 isp
->pkg_dir
= psys
->pkg_dir
;
1057 isp
->pkg_dir_dma_addr
= psys
->pkg_dir_dma_addr
;
1058 isp
->pkg_dir_size
= psys
->pkg_dir_size
;
1060 if (!isp
->secure_mode
)
1063 rval
= ipu_fw_authenticate(isp
, 1);
1065 goto out_free_pkg_dir
;
1070 ipu_cpd_free_pkg_dir(isp
->psys
, psys
->pkg_dir
,
1071 psys
->pkg_dir_dma_addr
, psys
->pkg_dir_size
);
1073 ipu_buttress_unmap_fw_image(isp
->psys
, &psys
->fw_sgt
);
1074 out_release_firmware
:
1075 release_firmware(isp
->cpd_fw
);
1081 #ifdef CONFIG_DEBUG_FS
1082 static int ipu_psys_icache_prefetch_sp_get(void *data
, u64
*val
)
1084 struct ipu_psys
*psys
= data
;
1086 *val
= psys
->icache_prefetch_sp
;
1090 static int ipu_psys_icache_prefetch_sp_set(void *data
, u64 val
)
1092 struct ipu_psys
*psys
= data
;
1097 psys
->icache_prefetch_sp
= val
;
1102 DEFINE_SIMPLE_ATTRIBUTE(psys_icache_prefetch_sp_fops
,
1103 ipu_psys_icache_prefetch_sp_get
,
1104 ipu_psys_icache_prefetch_sp_set
, "%llu\n");
1106 static int ipu_psys_icache_prefetch_isp_get(void *data
, u64
*val
)
1108 struct ipu_psys
*psys
= data
;
1110 *val
= psys
->icache_prefetch_isp
;
1114 static int ipu_psys_icache_prefetch_isp_set(void *data
, u64 val
)
1116 struct ipu_psys
*psys
= data
;
1121 psys
->icache_prefetch_isp
= val
;
1126 DEFINE_SIMPLE_ATTRIBUTE(psys_icache_prefetch_isp_fops
,
1127 ipu_psys_icache_prefetch_isp_get
,
1128 ipu_psys_icache_prefetch_isp_set
, "%llu\n");
1130 static int ipu_psys_init_debugfs(struct ipu_psys
*psys
)
1132 struct dentry
*file
;
1135 dir
= debugfs_create_dir("psys", psys
->adev
->isp
->ipu_dir
);
1139 file
= debugfs_create_file("icache_prefetch_sp", 0600,
1140 dir
, psys
, &psys_icache_prefetch_sp_fops
);
1144 file
= debugfs_create_file("icache_prefetch_isp", 0600,
1145 dir
, psys
, &psys_icache_prefetch_isp_fops
);
1149 psys
->debugfsdir
= dir
;
1152 if (ipu_psys_gpc_init_debugfs(psys
))
1158 debugfs_remove_recursive(dir
);
1163 static int ipu_psys_sched_cmd(void *ptr
)
1165 struct ipu_psys
*psys
= ptr
;
1169 wait_event_interruptible(psys
->sched_cmd_wq
,
1170 (kthread_should_stop() ||
1172 atomic_read(&psys
->wakeup_count
))));
1174 if (kthread_should_stop())
1180 mutex_lock(&psys
->mutex
);
1181 atomic_set(&psys
->wakeup_count
, 0);
1182 ipu_psys_run_next(psys
);
1183 mutex_unlock(&psys
->mutex
);
1189 static void start_sp(struct ipu_bus_device
*adev
)
1191 struct ipu_psys
*psys
= ipu_bus_get_drvdata(adev
);
1192 void __iomem
*spc_regs_base
= psys
->pdata
->base
+
1193 psys
->pdata
->ipdata
->hw_variant
.spc_offset
;
1196 val
|= IPU_PSYS_SPC_STATUS_START
|
1197 IPU_PSYS_SPC_STATUS_RUN
|
1198 IPU_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE
;
1199 val
|= psys
->icache_prefetch_sp
?
1200 IPU_PSYS_SPC_STATUS_ICACHE_PREFETCH
: 0;
1201 writel(val
, spc_regs_base
+ IPU_PSYS_REG_SPC_STATUS_CTRL
);
1204 static int query_sp(struct ipu_bus_device
*adev
)
1206 struct ipu_psys
*psys
= ipu_bus_get_drvdata(adev
);
1207 void __iomem
*spc_regs_base
= psys
->pdata
->base
+
1208 psys
->pdata
->ipdata
->hw_variant
.spc_offset
;
1209 u32 val
= readl(spc_regs_base
+ IPU_PSYS_REG_SPC_STATUS_CTRL
);
1211 /* return true when READY == 1, START == 0 */
1212 val
&= IPU_PSYS_SPC_STATUS_READY
| IPU_PSYS_SPC_STATUS_START
;
1214 return val
== IPU_PSYS_SPC_STATUS_READY
;
1217 static int ipu_psys_fw_init(struct ipu_psys
*psys
)
1220 struct ipu_fw_syscom_queue_config
*queue_cfg
;
1221 struct ipu_fw_syscom_queue_config fw_psys_event_queue_cfg
[] = {
1223 IPU_FW_PSYS_EVENT_QUEUE_SIZE
,
1224 sizeof(struct ipu_fw_psys_event
)
1227 struct ipu_fw_psys_srv_init server_init
= {
1228 .ddr_pkg_dir_address
= 0,
1229 .host_ddr_pkg_dir
= NULL
,
1231 .icache_prefetch_sp
= psys
->icache_prefetch_sp
,
1232 .icache_prefetch_isp
= psys
->icache_prefetch_isp
,
1234 struct ipu_fw_com_cfg fwcom
= {
1235 .num_output_queues
= IPU_FW_PSYS_N_PSYS_EVENT_QUEUE_ID
,
1236 .output
= fw_psys_event_queue_cfg
,
1237 .specific_addr
= &server_init
,
1238 .specific_size
= sizeof(server_init
),
1239 .cell_start
= start_sp
,
1240 .cell_ready
= query_sp
,
1241 .buttress_boot_offset
= SYSCOM_BUTTRESS_FW_PARAMS_PSYS_OFFSET
,
1245 size
= IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID
;
1246 if (ipu_ver
== IPU_VER_6
|| ipu_ver
== IPU_VER_6EP
)
1247 size
= IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID
;
1249 queue_cfg
= devm_kzalloc(&psys
->adev
->dev
, sizeof(*queue_cfg
) * size
,
1254 for (i
= 0; i
< size
; i
++) {
1255 queue_cfg
[i
].queue_size
= IPU_FW_PSYS_CMD_QUEUE_SIZE
;
1256 queue_cfg
[i
].token_size
= sizeof(struct ipu_fw_psys_cmd
);
1259 fwcom
.input
= queue_cfg
;
1260 fwcom
.num_input_queues
= size
;
1261 fwcom
.dmem_addr
= psys
->pdata
->ipdata
->hw_variant
.dmem_offset
;
1263 psys
->fwcom
= ipu_fw_com_prepare(&fwcom
, psys
->adev
, psys
->pdata
->base
);
1265 dev_err(&psys
->adev
->dev
, "psys fw com prepare failed\n");
1272 static void run_fw_init_work(struct work_struct
*work
)
1274 struct fw_init_task
*task
= (struct fw_init_task
*)work
;
1275 struct ipu_psys
*psys
= task
->psys
;
1278 rval
= ipu_psys_fw_init(psys
);
1281 dev_err(&psys
->adev
->dev
, "FW init failed(%d)\n", rval
);
1282 ipu_psys_remove(psys
->adev
);
1284 dev_info(&psys
->adev
->dev
, "FW init done\n");
1288 static int ipu_psys_probe(struct ipu_bus_device
*adev
)
1290 struct ipu_device
*isp
= adev
->isp
;
1291 struct ipu_psys_pg
*kpg
, *kpg0
;
1292 struct ipu_psys
*psys
;
1294 int i
, rval
= -E2BIG
;
1296 rval
= ipu_mmu_hw_init(adev
->mmu
);
1300 mutex_lock(&ipu_psys_mutex
);
1302 minor
= find_next_zero_bit(ipu_psys_devices
, IPU_PSYS_NUM_DEVICES
, 0);
1303 if (minor
== IPU_PSYS_NUM_DEVICES
) {
1304 dev_err(&adev
->dev
, "too many devices\n");
1308 psys
= devm_kzalloc(&adev
->dev
, sizeof(*psys
), GFP_KERNEL
);
1315 psys
->pdata
= adev
->pdata
;
1316 psys
->icache_prefetch_sp
= 0;
1318 psys
->power_gating
= 0;
1320 ipu_trace_init(adev
->isp
, psys
->pdata
->base
, &adev
->dev
,
1323 cdev_init(&psys
->cdev
, &ipu_psys_fops
);
1324 psys
->cdev
.owner
= ipu_psys_fops
.owner
;
1326 rval
= cdev_add(&psys
->cdev
, MKDEV(MAJOR(ipu_psys_dev_t
), minor
), 1);
1328 dev_err(&adev
->dev
, "cdev_add failed (%d)\n", rval
);
1332 set_bit(minor
, ipu_psys_devices
);
1334 spin_lock_init(&psys
->ready_lock
);
1335 spin_lock_init(&psys
->pgs_lock
);
1337 psys
->timeout
= IPU_PSYS_CMD_TIMEOUT_MS
;
1339 mutex_init(&psys
->mutex
);
1340 INIT_LIST_HEAD(&psys
->fhs
);
1341 INIT_LIST_HEAD(&psys
->pgs
);
1342 INIT_LIST_HEAD(&psys
->started_kcmds_list
);
1343 INIT_WORK(&psys
->watchdog_work
, ipu_psys_watchdog_work
);
1345 init_waitqueue_head(&psys
->sched_cmd_wq
);
1346 atomic_set(&psys
->wakeup_count
, 0);
1348 * Create a thread to schedule commands sent to IPU firmware.
1349 * The thread reduces the coupling between the command scheduler
1350 * and queueing commands from the user to driver.
1352 psys
->sched_cmd_thread
= kthread_run(ipu_psys_sched_cmd
, psys
,
1355 if (IS_ERR(psys
->sched_cmd_thread
)) {
1356 psys
->sched_cmd_thread
= NULL
;
1357 mutex_destroy(&psys
->mutex
);
1361 ipu_bus_set_drvdata(adev
, psys
);
1363 rval
= ipu_psys_resource_pool_init(&psys
->resource_pool_started
);
1366 "unable to alloc process group resources\n");
1367 goto out_mutex_destroy
;
1370 rval
= ipu_psys_resource_pool_init(&psys
->resource_pool_running
);
1373 "unable to alloc process group resources\n");
1374 goto out_resources_started_free
;
1377 ipu6_psys_hw_res_variant_init();
1378 psys
->pkg_dir
= isp
->pkg_dir
;
1379 psys
->pkg_dir_dma_addr
= isp
->pkg_dir_dma_addr
;
1380 psys
->pkg_dir_size
= isp
->pkg_dir_size
;
1381 psys
->fw_sgt
= isp
->fw_sgt
;
1383 /* allocate and map memory for process groups */
1384 for (i
= 0; i
< IPU_PSYS_PG_POOL_SIZE
; i
++) {
1385 kpg
= kzalloc(sizeof(*kpg
), GFP_KERNEL
);
1388 kpg
->pg
= dma_alloc_attrs(&adev
->dev
,
1389 IPU_PSYS_PG_MAX_SIZE
,
1396 kpg
->size
= IPU_PSYS_PG_MAX_SIZE
;
1397 list_add(&kpg
->list
, &psys
->pgs
);
1400 psys
->caps
.pg_count
= ipu_cpd_pkg_dir_get_num_entries(psys
->pkg_dir
);
1402 dev_info(&adev
->dev
, "pkg_dir entry count:%d\n", psys
->caps
.pg_count
);
1403 if (async_fw_init
) {
1404 INIT_DELAYED_WORK((struct delayed_work
*)&fw_init_task
,
1406 fw_init_task
.psys
= psys
;
1407 schedule_delayed_work((struct delayed_work
*)&fw_init_task
, 0);
1409 rval
= ipu_psys_fw_init(psys
);
1413 dev_err(&adev
->dev
, "FW init failed(%d)\n", rval
);
1417 psys
->dev
.parent
= &adev
->dev
;
1418 psys
->dev
.bus
= &ipu_psys_bus
;
1419 psys
->dev
.devt
= MKDEV(MAJOR(ipu_psys_dev_t
), minor
);
1420 psys
->dev
.release
= ipu_psys_dev_release
;
1421 dev_set_name(&psys
->dev
, "ipu-psys%d", minor
);
1422 rval
= device_register(&psys
->dev
);
1424 dev_err(&psys
->dev
, "psys device_register failed\n");
1425 goto out_release_fw_com
;
1428 /* Add the hw stepping information to caps */
1429 strlcpy(psys
->caps
.dev_model
, IPU_MEDIA_DEV_MODEL_NAME
,
1430 sizeof(psys
->caps
.dev_model
));
1432 pm_runtime_set_autosuspend_delay(&psys
->adev
->dev
,
1433 IPU_PSYS_AUTOSUSPEND_DELAY
);
1434 pm_runtime_use_autosuspend(&psys
->adev
->dev
);
1435 pm_runtime_mark_last_busy(&psys
->adev
->dev
);
1437 mutex_unlock(&ipu_psys_mutex
);
1439 #ifdef CONFIG_DEBUG_FS
1440 /* Debug fs failure is not fatal. */
1441 ipu_psys_init_debugfs(psys
);
1444 adev
->isp
->cpd_fw_reload
= &cpd_fw_reload
;
1446 dev_info(&adev
->dev
, "psys probe minor: %d\n", minor
);
1448 ipu_mmu_hw_cleanup(adev
->mmu
);
1453 ipu_fw_com_release(psys
->fwcom
, 1);
1455 list_for_each_entry_safe(kpg
, kpg0
, &psys
->pgs
, list
) {
1456 dma_free_attrs(&adev
->dev
, kpg
->size
, kpg
->pg
,
1457 kpg
->pg_dma_addr
, 0);
1461 ipu_psys_resource_pool_cleanup(&psys
->resource_pool_running
);
1462 out_resources_started_free
:
1463 ipu_psys_resource_pool_cleanup(&psys
->resource_pool_started
);
1465 mutex_destroy(&psys
->mutex
);
1466 cdev_del(&psys
->cdev
);
1467 if (psys
->sched_cmd_thread
) {
1468 kthread_stop(psys
->sched_cmd_thread
);
1469 psys
->sched_cmd_thread
= NULL
;
1472 /* Safe to call even if the init is not called */
1473 ipu_trace_uninit(&adev
->dev
);
1474 mutex_unlock(&ipu_psys_mutex
);
1476 ipu_mmu_hw_cleanup(adev
->mmu
);
1481 static void ipu_psys_remove(struct ipu_bus_device
*adev
)
1483 struct ipu_device
*isp
= adev
->isp
;
1484 struct ipu_psys
*psys
= ipu_bus_get_drvdata(adev
);
1485 struct ipu_psys_pg
*kpg
, *kpg0
;
1487 #ifdef CONFIG_DEBUG_FS
1489 debugfs_remove_recursive(psys
->debugfsdir
);
1492 flush_workqueue(IPU_PSYS_WORK_QUEUE
);
1494 if (psys
->sched_cmd_thread
) {
1495 kthread_stop(psys
->sched_cmd_thread
);
1496 psys
->sched_cmd_thread
= NULL
;
1499 pm_runtime_dont_use_autosuspend(&psys
->adev
->dev
);
1501 mutex_lock(&ipu_psys_mutex
);
1503 list_for_each_entry_safe(kpg
, kpg0
, &psys
->pgs
, list
) {
1504 dma_free_attrs(&adev
->dev
, kpg
->size
, kpg
->pg
,
1505 kpg
->pg_dma_addr
, 0);
1509 if (psys
->fwcom
&& ipu_fw_com_release(psys
->fwcom
, 1))
1510 dev_err(&adev
->dev
, "fw com release failed.\n");
1512 kfree(psys
->server_init
);
1513 kfree(psys
->syscom_config
);
1515 ipu_trace_uninit(&adev
->dev
);
1517 ipu_psys_resource_pool_cleanup(&psys
->resource_pool_started
);
1518 ipu_psys_resource_pool_cleanup(&psys
->resource_pool_running
);
1520 device_unregister(&psys
->dev
);
1522 clear_bit(MINOR(psys
->cdev
.dev
), ipu_psys_devices
);
1523 cdev_del(&psys
->cdev
);
1525 mutex_unlock(&ipu_psys_mutex
);
1527 mutex_destroy(&psys
->mutex
);
1529 dev_info(&adev
->dev
, "removed\n");
1532 static irqreturn_t
psys_isr_threaded(struct ipu_bus_device
*adev
)
1534 struct ipu_psys
*psys
= ipu_bus_get_drvdata(adev
);
1535 void __iomem
*base
= psys
->pdata
->base
;
1539 mutex_lock(&psys
->mutex
);
1541 r
= pm_runtime_get_if_in_use(&psys
->adev
->dev
);
1542 if (!r
|| WARN_ON_ONCE(r
< 0)) {
1543 mutex_unlock(&psys
->mutex
);
1548 status
= readl(base
+ IPU_REG_PSYS_GPDEV_IRQ_STATUS
);
1549 writel(status
, base
+ IPU_REG_PSYS_GPDEV_IRQ_CLEAR
);
1551 if (status
& IPU_PSYS_GPDEV_IRQ_FWIRQ(IPU_PSYS_GPDEV_FWIRQ0
)) {
1552 writel(0, base
+ IPU_REG_PSYS_GPDEV_FWIRQ(0));
1553 ipu_psys_handle_events(psys
);
1556 pm_runtime_mark_last_busy(&psys
->adev
->dev
);
1557 pm_runtime_put_autosuspend(&psys
->adev
->dev
);
1558 mutex_unlock(&psys
->mutex
);
1560 return status
? IRQ_HANDLED
: IRQ_NONE
;
1563 static struct ipu_bus_driver ipu_psys_driver
= {
1564 .probe
= ipu_psys_probe
,
1565 .remove
= ipu_psys_remove
,
1566 .isr_threaded
= psys_isr_threaded
,
1567 .wanted
= IPU_PSYS_NAME
,
1569 .name
= IPU_PSYS_NAME
,
1570 .owner
= THIS_MODULE
,
1572 .probe_type
= PROBE_PREFER_ASYNCHRONOUS
,
1576 static int __init
ipu_psys_init(void)
1578 int rval
= alloc_chrdev_region(&ipu_psys_dev_t
, 0,
1579 IPU_PSYS_NUM_DEVICES
, IPU_PSYS_NAME
);
1581 pr_err("can't alloc psys chrdev region (%d)\n", rval
);
1585 rval
= bus_register(&ipu_psys_bus
);
1587 pr_warn("can't register psys bus (%d)\n", rval
);
1588 goto out_bus_register
;
1591 ipu_bus_register_driver(&ipu_psys_driver
);
1596 unregister_chrdev_region(ipu_psys_dev_t
, IPU_PSYS_NUM_DEVICES
);
1601 static void __exit
ipu_psys_exit(void)
1603 ipu_bus_unregister_driver(&ipu_psys_driver
);
1604 bus_unregister(&ipu_psys_bus
);
1605 unregister_chrdev_region(ipu_psys_dev_t
, IPU_PSYS_NUM_DEVICES
);
1608 static const struct pci_device_id ipu_pci_tbl
[] = {
1609 {PCI_DEVICE(PCI_VENDOR_ID_INTEL
, IPU6_PCI_ID
)},
1610 {PCI_DEVICE(PCI_VENDOR_ID_INTEL
, IPU6SE_PCI_ID
)},
1611 {PCI_DEVICE(PCI_VENDOR_ID_INTEL
, IPU6EP_PCI_ID
)},
1614 MODULE_DEVICE_TABLE(pci
, ipu_pci_tbl
);
1616 module_init(ipu_psys_init
);
1617 module_exit(ipu_psys_exit
);
1619 MODULE_AUTHOR("Antti Laakso <antti.laakso@intel.com>");
1620 MODULE_AUTHOR("Bin Han <bin.b.han@intel.com>");
1621 MODULE_AUTHOR("Renwei Wu <renwei.wu@intel.com>");
1622 MODULE_AUTHOR("Jianxu Zheng <jian.xu.zheng@intel.com>");
1623 MODULE_AUTHOR("Xia Wu <xia.wu@intel.com>");
1624 MODULE_AUTHOR("Bingbu Cao <bingbu.cao@intel.com>");
1625 MODULE_AUTHOR("Zaikuo Wang <zaikuo.wang@intel.com>");
1626 MODULE_AUTHOR("Yunliang Ding <yunliang.ding@intel.com>");
1627 MODULE_LICENSE("GPL");
1628 MODULE_DESCRIPTION("Intel ipu processing system driver");