1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2013 - 2020 Intel Corporation
4 #include <linux/debugfs.h>
5 #include <linux/delay.h>
6 #include <linux/device.h>
7 #include <linux/dma-buf.h>
8 #include <linux/firmware.h>
10 #include <linux/highmem.h>
11 #include <linux/init_task.h>
12 #include <linux/kthread.h>
14 #include <linux/module.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/version.h>
17 #include <linux/poll.h>
18 #include <uapi/linux/sched/types.h>
19 #include <linux/uaccess.h>
20 #include <linux/vmalloc.h>
21 #include <linux/dma-mapping.h>
23 #include <uapi/linux/ipu-psys.h>
28 #include "ipu-platform.h"
29 #include "ipu-buttress.h"
31 #include "ipu-fw-psys.h"
33 #include "ipu-platform-psys.h"
34 #include "ipu-platform-regs.h"
35 #include "ipu-fw-com.h"
37 static bool async_fw_init
;
38 module_param(async_fw_init
, bool, 0664);
39 MODULE_PARM_DESC(async_fw_init
, "Enable asynchronous firmware initialization");
41 #define IPU_PSYS_NUM_DEVICES 4
42 #define IPU_PSYS_AUTOSUSPEND_DELAY 2000
45 static int psys_runtime_pm_resume(struct device
*dev
);
46 static int psys_runtime_pm_suspend(struct device
*dev
);
48 #define pm_runtime_dont_use_autosuspend(d)
49 #define pm_runtime_use_autosuspend(d)
50 #define pm_runtime_set_autosuspend_delay(d, f) 0
51 #define pm_runtime_get_sync(d) 0
52 #define pm_runtime_put(d) 0
53 #define pm_runtime_put_sync(d) 0
54 #define pm_runtime_put_noidle(d) 0
55 #define pm_runtime_put_autosuspend(d) 0
58 static dev_t ipu_psys_dev_t
;
59 static DECLARE_BITMAP(ipu_psys_devices
, IPU_PSYS_NUM_DEVICES
);
60 static DEFINE_MUTEX(ipu_psys_mutex
);
62 static struct fw_init_task
{
63 struct delayed_work work
;
64 struct ipu_psys
*psys
;
67 static void ipu_psys_remove(struct ipu_bus_device
*adev
);
69 static struct bus_type ipu_psys_bus
= {
70 .name
= IPU_PSYS_NAME
,
73 struct ipu_psys_pg
*__get_pg_buf(struct ipu_psys
*psys
, size_t pg_size
)
75 struct ipu_psys_pg
*kpg
;
78 spin_lock_irqsave(&psys
->pgs_lock
, flags
);
79 list_for_each_entry(kpg
, &psys
->pgs
, list
) {
80 if (!kpg
->pg_size
&& kpg
->size
>= pg_size
) {
81 kpg
->pg_size
= pg_size
;
82 spin_unlock_irqrestore(&psys
->pgs_lock
, flags
);
86 spin_unlock_irqrestore(&psys
->pgs_lock
, flags
);
87 /* no big enough buffer available, allocate new one */
88 kpg
= kzalloc(sizeof(*kpg
), GFP_KERNEL
);
92 kpg
->pg
= dma_alloc_attrs(&psys
->adev
->dev
, pg_size
,
93 &kpg
->pg_dma_addr
, GFP_KERNEL
, 0);
99 kpg
->pg_size
= pg_size
;
101 spin_lock_irqsave(&psys
->pgs_lock
, flags
);
102 list_add(&kpg
->list
, &psys
->pgs
);
103 spin_unlock_irqrestore(&psys
->pgs_lock
, flags
);
108 static int ipu_psys_unmapbuf_locked(int fd
, struct ipu_psys_fh
*fh
,
109 struct ipu_psys_kbuffer
*kbuf
);
110 struct ipu_psys_kbuffer
*ipu_psys_lookup_kbuffer(struct ipu_psys_fh
*fh
, int fd
)
112 struct ipu_psys_kbuffer
*kbuf
;
114 list_for_each_entry(kbuf
, &fh
->bufmap
, list
) {
122 struct ipu_psys_kbuffer
*
123 ipu_psys_lookup_kbuffer_by_kaddr(struct ipu_psys_fh
*fh
, void *kaddr
)
125 struct ipu_psys_kbuffer
*kbuffer
;
127 list_for_each_entry(kbuffer
, &fh
->bufmap
, list
) {
128 if (kbuffer
->kaddr
== kaddr
)
135 static int ipu_psys_get_userpages(struct ipu_dma_buf_attach
*attach
)
137 struct vm_area_struct
*vma
;
138 unsigned long start
, end
;
139 int npages
, array_size
;
141 struct sg_table
*sgt
;
145 start
= (unsigned long)attach
->userptr
;
146 end
= PAGE_ALIGN(start
+ attach
->len
);
147 npages
= (end
- (start
& PAGE_MASK
)) >> PAGE_SHIFT
;
148 array_size
= npages
* sizeof(struct page
*);
150 sgt
= kzalloc(sizeof(*sgt
), GFP_KERNEL
);
154 if (attach
->npages
!= 0) {
155 pages
= attach
->pages
;
156 npages
= attach
->npages
;
157 attach
->vma_is_io
= 1;
161 pages
= kvzalloc(array_size
, GFP_KERNEL
);
165 mmap_read_lock(current
->mm
);
166 vma
= find_vma(current
->mm
, start
);
173 * For buffers from Gralloc, VM_PFNMAP is expected,
174 * but VM_IO is set. Possibly bug in Gralloc.
176 attach
->vma_is_io
= vma
->vm_flags
& (VM_IO
| VM_PFNMAP
);
178 if (attach
->vma_is_io
) {
179 unsigned long io_start
= start
;
181 if (vma
->vm_end
< start
+ attach
->len
) {
183 "vma at %lu is too small for %llu bytes\n",
189 for (nr
= 0; nr
< npages
; nr
++, io_start
+= PAGE_SIZE
) {
192 ret
= follow_pfn(vma
, io_start
, &pfn
);
195 pages
[nr
] = pfn_to_page(pfn
);
198 nr
= get_user_pages(start
& PAGE_MASK
, npages
,
204 mmap_read_unlock(current
->mm
);
206 attach
->pages
= pages
;
207 attach
->npages
= npages
;
210 ret
= sg_alloc_table_from_pages(sgt
, pages
, npages
,
211 start
& ~PAGE_MASK
, attach
->len
,
221 mmap_read_unlock(current
->mm
);
223 if (!attach
->vma_is_io
)
225 put_page(pages
[--nr
]);
227 if (array_size
<= PAGE_SIZE
)
234 dev_err(attach
->dev
, "failed to get userpages:%d\n", ret
);
239 static void ipu_psys_put_userpages(struct ipu_dma_buf_attach
*attach
)
241 if (!attach
|| !attach
->userptr
|| !attach
->sgt
)
244 if (!attach
->vma_is_io
) {
245 int i
= attach
->npages
;
248 set_page_dirty_lock(attach
->pages
[i
]);
249 put_page(attach
->pages
[i
]);
253 kvfree(attach
->pages
);
255 sg_free_table(attach
->sgt
);
260 static int ipu_dma_buf_attach(struct dma_buf
*dbuf
,
261 struct dma_buf_attachment
*attach
)
263 struct ipu_psys_kbuffer
*kbuf
= dbuf
->priv
;
264 struct ipu_dma_buf_attach
*ipu_attach
;
266 ipu_attach
= kzalloc(sizeof(*ipu_attach
), GFP_KERNEL
);
270 ipu_attach
->len
= kbuf
->len
;
271 ipu_attach
->userptr
= kbuf
->userptr
;
273 attach
->priv
= ipu_attach
;
277 static void ipu_dma_buf_detach(struct dma_buf
*dbuf
,
278 struct dma_buf_attachment
*attach
)
280 struct ipu_dma_buf_attach
*ipu_attach
= attach
->priv
;
286 static struct sg_table
*ipu_dma_buf_map(struct dma_buf_attachment
*attach
,
287 enum dma_data_direction dir
)
289 struct ipu_dma_buf_attach
*ipu_attach
= attach
->priv
;
293 ret
= ipu_psys_get_userpages(ipu_attach
);
297 attrs
= DMA_ATTR_SKIP_CPU_SYNC
;
298 ret
= dma_map_sg_attrs(attach
->dev
, ipu_attach
->sgt
->sgl
,
299 ipu_attach
->sgt
->orig_nents
, dir
, attrs
);
300 if (ret
< ipu_attach
->sgt
->orig_nents
) {
301 ipu_psys_put_userpages(ipu_attach
);
302 dev_dbg(attach
->dev
, "buf map failed\n");
304 return ERR_PTR(-EIO
);
308 * Initial cache flush to avoid writing dirty pages for buffers which
309 * are later marked as IPU_BUFFER_FLAG_NO_FLUSH.
311 dma_sync_sg_for_device(attach
->dev
, ipu_attach
->sgt
->sgl
,
312 ipu_attach
->sgt
->orig_nents
, DMA_BIDIRECTIONAL
);
314 return ipu_attach
->sgt
;
317 static void ipu_dma_buf_unmap(struct dma_buf_attachment
*attach
,
318 struct sg_table
*sg
, enum dma_data_direction dir
)
320 struct ipu_dma_buf_attach
*ipu_attach
= attach
->priv
;
322 dma_unmap_sg(attach
->dev
, sg
->sgl
, sg
->orig_nents
, dir
);
323 ipu_psys_put_userpages(ipu_attach
);
326 static int ipu_dma_buf_mmap(struct dma_buf
*dbuf
, struct vm_area_struct
*vma
)
331 static void ipu_dma_buf_release(struct dma_buf
*buf
)
333 struct ipu_psys_kbuffer
*kbuf
= buf
->priv
;
338 if (kbuf
->db_attach
) {
339 dev_dbg(kbuf
->db_attach
->dev
,
340 "releasing buffer %d\n", kbuf
->fd
);
341 ipu_psys_put_userpages(kbuf
->db_attach
->priv
);
346 static int ipu_dma_buf_begin_cpu_access(struct dma_buf
*dma_buf
,
347 enum dma_data_direction dir
)
352 static void *ipu_dma_buf_vmap(struct dma_buf
*dmabuf
)
354 struct dma_buf_attachment
*attach
;
355 struct ipu_dma_buf_attach
*ipu_attach
;
357 if (list_empty(&dmabuf
->attachments
))
360 attach
= list_last_entry(&dmabuf
->attachments
,
361 struct dma_buf_attachment
, node
);
362 ipu_attach
= attach
->priv
;
364 if (!ipu_attach
|| !ipu_attach
->pages
|| !ipu_attach
->npages
)
367 return vm_map_ram(ipu_attach
->pages
, ipu_attach
->npages
, 0);
370 static void ipu_dma_buf_vunmap(struct dma_buf
*dmabuf
, void *vaddr
)
372 struct dma_buf_attachment
*attach
;
373 struct ipu_dma_buf_attach
*ipu_attach
;
375 if (WARN_ON(list_empty(&dmabuf
->attachments
)))
378 attach
= list_last_entry(&dmabuf
->attachments
,
379 struct dma_buf_attachment
, node
);
380 ipu_attach
= attach
->priv
;
382 if (WARN_ON(!ipu_attach
|| !ipu_attach
->pages
|| !ipu_attach
->npages
))
385 vm_unmap_ram(vaddr
, ipu_attach
->npages
);
388 struct dma_buf_ops ipu_dma_buf_ops
= {
389 .attach
= ipu_dma_buf_attach
,
390 .detach
= ipu_dma_buf_detach
,
391 .map_dma_buf
= ipu_dma_buf_map
,
392 .unmap_dma_buf
= ipu_dma_buf_unmap
,
393 .release
= ipu_dma_buf_release
,
394 .begin_cpu_access
= ipu_dma_buf_begin_cpu_access
,
395 .mmap
= ipu_dma_buf_mmap
,
396 .vmap
= ipu_dma_buf_vmap
,
397 .vunmap
= ipu_dma_buf_vunmap
,
400 static int ipu_psys_open(struct inode
*inode
, struct file
*file
)
402 struct ipu_psys
*psys
= inode_to_ipu_psys(inode
);
403 struct ipu_device
*isp
= psys
->adev
->isp
;
404 struct ipu_psys_fh
*fh
;
410 fh
= kzalloc(sizeof(*fh
), GFP_KERNEL
);
416 file
->private_data
= fh
;
418 mutex_init(&fh
->mutex
);
419 INIT_LIST_HEAD(&fh
->bufmap
);
420 init_waitqueue_head(&fh
->wait
);
422 rval
= ipu_psys_fh_init(fh
);
426 mutex_lock(&psys
->mutex
);
427 list_add_tail(&fh
->list
, &psys
->fhs
);
428 mutex_unlock(&psys
->mutex
);
433 mutex_destroy(&fh
->mutex
);
438 static inline void ipu_psys_kbuf_unmap(struct ipu_psys_kbuffer
*kbuf
)
445 dma_buf_vunmap(kbuf
->dbuf
, kbuf
->kaddr
);
447 dma_buf_unmap_attachment(kbuf
->db_attach
,
451 dma_buf_detach(kbuf
->dbuf
, kbuf
->db_attach
);
452 dma_buf_put(kbuf
->dbuf
);
454 kbuf
->db_attach
= NULL
;
459 static int ipu_psys_release(struct inode
*inode
, struct file
*file
)
461 struct ipu_psys
*psys
= inode_to_ipu_psys(inode
);
462 struct ipu_psys_fh
*fh
= file
->private_data
;
463 struct ipu_psys_kbuffer
*kbuf
, *kbuf0
;
464 struct dma_buf_attachment
*db_attach
;
466 mutex_lock(&fh
->mutex
);
467 /* clean up buffers */
468 if (!list_empty(&fh
->bufmap
)) {
469 list_for_each_entry_safe(kbuf
, kbuf0
, &fh
->bufmap
, list
) {
470 list_del(&kbuf
->list
);
471 db_attach
= kbuf
->db_attach
;
473 /* Unmap and release buffers */
474 if (kbuf
->dbuf
&& db_attach
) {
476 ipu_psys_kbuf_unmap(kbuf
);
479 ipu_psys_put_userpages(db_attach
->priv
);
484 mutex_unlock(&fh
->mutex
);
486 mutex_lock(&psys
->mutex
);
489 mutex_unlock(&psys
->mutex
);
490 ipu_psys_fh_deinit(fh
);
492 mutex_lock(&psys
->mutex
);
493 if (list_empty(&psys
->fhs
))
494 psys
->power_gating
= 0;
495 mutex_unlock(&psys
->mutex
);
496 mutex_destroy(&fh
->mutex
);
502 static int ipu_psys_getbuf(struct ipu_psys_buffer
*buf
, struct ipu_psys_fh
*fh
)
504 struct ipu_psys_kbuffer
*kbuf
;
505 struct ipu_psys
*psys
= fh
->psys
;
507 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
508 struct dma_buf
*dbuf
;
511 if (!buf
->base
.userptr
) {
512 dev_err(&psys
->adev
->dev
, "Buffer allocation not supported\n");
516 kbuf
= kzalloc(sizeof(*kbuf
), GFP_KERNEL
);
520 kbuf
->len
= buf
->len
;
521 kbuf
->userptr
= buf
->base
.userptr
;
522 kbuf
->flags
= buf
->flags
;
524 exp_info
.ops
= &ipu_dma_buf_ops
;
525 exp_info
.size
= kbuf
->len
;
526 exp_info
.flags
= O_RDWR
;
527 exp_info
.priv
= kbuf
;
529 dbuf
= dma_buf_export(&exp_info
);
532 return PTR_ERR(dbuf
);
535 ret
= dma_buf_fd(dbuf
, 0);
544 kbuf
->flags
= buf
->flags
&= ~IPU_BUFFER_FLAG_USERPTR
;
545 kbuf
->flags
= buf
->flags
|= IPU_BUFFER_FLAG_DMA_HANDLE
;
547 mutex_lock(&fh
->mutex
);
548 list_add(&kbuf
->list
, &fh
->bufmap
);
549 mutex_unlock(&fh
->mutex
);
551 dev_dbg(&psys
->adev
->dev
, "IOC_GETBUF: userptr %p size %llu to fd %d",
552 buf
->base
.userptr
, buf
->len
, buf
->base
.fd
);
557 static int ipu_psys_putbuf(struct ipu_psys_buffer
*buf
, struct ipu_psys_fh
*fh
)
562 int ipu_psys_mapbuf_locked(int fd
, struct ipu_psys_fh
*fh
,
563 struct ipu_psys_kbuffer
*kbuf
)
565 struct ipu_psys
*psys
= fh
->psys
;
566 struct dma_buf
*dbuf
;
569 dbuf
= dma_buf_get(fd
);
574 /* This fd isn't generated by ipu_psys_getbuf, it
575 * is a new fd. Create a new kbuf item for this fd, and
576 * add this kbuf to bufmap list.
578 kbuf
= kzalloc(sizeof(*kbuf
), GFP_KERNEL
);
584 list_add(&kbuf
->list
, &fh
->bufmap
);
587 /* fd valid and found, need remap */
588 if (kbuf
->dbuf
&& (kbuf
->dbuf
!= dbuf
|| kbuf
->len
!= dbuf
->size
)) {
589 dev_dbg(&psys
->adev
->dev
,
590 "dmabuf fd %d with kbuf %p changed, need remap.\n",
592 ret
= ipu_psys_unmapbuf_locked(fd
, fh
, kbuf
);
596 kbuf
= ipu_psys_lookup_kbuffer(fh
, fd
);
597 /* changed external dmabuf */
599 kbuf
= kzalloc(sizeof(*kbuf
), GFP_KERNEL
);
604 list_add(&kbuf
->list
, &fh
->bufmap
);
609 dev_dbg(&psys
->adev
->dev
, "fd %d has been mapped!\n", fd
);
617 kbuf
->len
= kbuf
->dbuf
->size
;
621 kbuf
->db_attach
= dma_buf_attach(kbuf
->dbuf
, &psys
->adev
->dev
);
622 if (IS_ERR(kbuf
->db_attach
)) {
623 ret
= PTR_ERR(kbuf
->db_attach
);
624 dev_dbg(&psys
->adev
->dev
, "dma buf attach failed\n");
628 kbuf
->sgt
= dma_buf_map_attachment(kbuf
->db_attach
, DMA_BIDIRECTIONAL
);
629 if (IS_ERR_OR_NULL(kbuf
->sgt
)) {
632 dev_dbg(&psys
->adev
->dev
, "dma buf map attachment failed\n");
636 kbuf
->dma_addr
= sg_dma_address(kbuf
->sgt
->sgl
);
638 kbuf
->kaddr
= dma_buf_vmap(kbuf
->dbuf
);
641 dev_dbg(&psys
->adev
->dev
, "dma buf vmap failed\n");
645 dev_dbg(&psys
->adev
->dev
, "%s kbuf %p fd %d with len %llu mapped\n",
646 __func__
, kbuf
, fd
, kbuf
->len
);
654 ipu_psys_kbuf_unmap(kbuf
);
656 list_del(&kbuf
->list
);
664 dev_err(&psys
->adev
->dev
, "%s failed for fd %d\n", __func__
, fd
);
668 static long ipu_psys_mapbuf(int fd
, struct ipu_psys_fh
*fh
)
671 struct ipu_psys_kbuffer
*kbuf
;
673 mutex_lock(&fh
->mutex
);
674 kbuf
= ipu_psys_lookup_kbuffer(fh
, fd
);
675 ret
= ipu_psys_mapbuf_locked(fd
, fh
, kbuf
);
676 mutex_unlock(&fh
->mutex
);
678 dev_dbg(&fh
->psys
->adev
->dev
, "IOC_MAPBUF ret %ld\n", ret
);
683 static int ipu_psys_unmapbuf_locked(int fd
, struct ipu_psys_fh
*fh
,
684 struct ipu_psys_kbuffer
*kbuf
)
686 struct ipu_psys
*psys
= fh
->psys
;
688 if (!kbuf
|| fd
!= kbuf
->fd
) {
689 dev_err(&psys
->adev
->dev
, "invalid kbuffer\n");
693 /* From now on it is not safe to use this kbuffer */
694 ipu_psys_kbuf_unmap(kbuf
);
696 list_del(&kbuf
->list
);
701 dev_dbg(&psys
->adev
->dev
, "%s fd %d unmapped\n", __func__
, fd
);
706 static long ipu_psys_unmapbuf(int fd
, struct ipu_psys_fh
*fh
)
708 struct ipu_psys_kbuffer
*kbuf
;
711 mutex_lock(&fh
->mutex
);
712 kbuf
= ipu_psys_lookup_kbuffer(fh
, fd
);
714 dev_err(&fh
->psys
->adev
->dev
,
715 "buffer with fd %d not found\n", fd
);
716 mutex_unlock(&fh
->mutex
);
719 ret
= ipu_psys_unmapbuf_locked(fd
, fh
, kbuf
);
720 mutex_unlock(&fh
->mutex
);
722 dev_dbg(&fh
->psys
->adev
->dev
, "IOC_UNMAPBUF\n");
727 static unsigned int ipu_psys_poll(struct file
*file
,
728 struct poll_table_struct
*wait
)
730 struct ipu_psys_fh
*fh
= file
->private_data
;
731 struct ipu_psys
*psys
= fh
->psys
;
732 unsigned int res
= 0;
734 dev_dbg(&psys
->adev
->dev
, "ipu psys poll\n");
736 poll_wait(file
, &fh
->wait
, wait
);
738 if (ipu_get_completed_kcmd(fh
))
741 dev_dbg(&psys
->adev
->dev
, "ipu psys poll res %u\n", res
);
746 static long ipu_get_manifest(struct ipu_psys_manifest
*manifest
,
747 struct ipu_psys_fh
*fh
)
749 struct ipu_psys
*psys
= fh
->psys
;
750 struct ipu_device
*isp
= psys
->adev
->isp
;
751 struct ipu_cpd_client_pkg_hdr
*client_pkg
;
754 dma_addr_t dma_fw_data
;
755 u32 client_pkg_offset
;
757 host_fw_data
= (void *)isp
->cpd_fw
->data
;
758 dma_fw_data
= sg_dma_address(psys
->fw_sgt
.sgl
);
760 entries
= ipu_cpd_pkg_dir_get_num_entries(psys
->pkg_dir
);
761 if (!manifest
|| manifest
->index
> entries
- 1) {
762 dev_err(&psys
->adev
->dev
, "invalid argument\n");
766 if (!ipu_cpd_pkg_dir_get_size(psys
->pkg_dir
, manifest
->index
) ||
767 ipu_cpd_pkg_dir_get_type(psys
->pkg_dir
, manifest
->index
) <
768 IPU_CPD_PKG_DIR_CLIENT_PG_TYPE
) {
769 dev_dbg(&psys
->adev
->dev
, "invalid pkg dir entry\n");
773 client_pkg_offset
= ipu_cpd_pkg_dir_get_address(psys
->pkg_dir
,
775 client_pkg_offset
-= dma_fw_data
;
777 client_pkg
= host_fw_data
+ client_pkg_offset
;
778 manifest
->size
= client_pkg
->pg_manifest_size
;
780 if (!manifest
->manifest
)
783 if (copy_to_user(manifest
->manifest
,
784 (uint8_t *)client_pkg
+ client_pkg
->pg_manifest_offs
,
792 static long ipu_psys_ioctl(struct file
*file
, unsigned int cmd
,
796 struct ipu_psys_buffer buf
;
797 struct ipu_psys_command cmd
;
798 struct ipu_psys_event ev
;
799 struct ipu_psys_capability caps
;
800 struct ipu_psys_manifest m
;
802 struct ipu_psys_fh
*fh
= file
->private_data
;
804 void __user
*up
= (void __user
*)arg
;
805 bool copy
= (cmd
!= IPU_IOC_MAPBUF
&& cmd
!= IPU_IOC_UNMAPBUF
);
808 if (_IOC_SIZE(cmd
) > sizeof(karg
))
811 if (_IOC_DIR(cmd
) & _IOC_WRITE
) {
812 err
= copy_from_user(&karg
, up
, _IOC_SIZE(cmd
));
820 err
= ipu_psys_mapbuf(arg
, fh
);
822 case IPU_IOC_UNMAPBUF
:
823 err
= ipu_psys_unmapbuf(arg
, fh
);
825 case IPU_IOC_QUERYCAP
:
826 karg
.caps
= fh
->psys
->caps
;
829 err
= ipu_psys_getbuf(&karg
.buf
, fh
);
832 err
= ipu_psys_putbuf(&karg
.buf
, fh
);
835 err
= ipu_psys_kcmd_new(&karg
.cmd
, fh
);
837 case IPU_IOC_DQEVENT
:
838 err
= ipu_ioctl_dqevent(&karg
.ev
, fh
, file
->f_flags
);
840 case IPU_IOC_GET_MANIFEST
:
841 err
= ipu_get_manifest(&karg
.m
, fh
);
851 if (copy
&& _IOC_DIR(cmd
) & _IOC_READ
)
852 if (copy_to_user(up
, &karg
, _IOC_SIZE(cmd
)))
858 static const struct file_operations ipu_psys_fops
= {
859 .open
= ipu_psys_open
,
860 .release
= ipu_psys_release
,
861 .unlocked_ioctl
= ipu_psys_ioctl
,
863 .compat_ioctl
= ipu_psys_compat_ioctl32
,
865 .poll
= ipu_psys_poll
,
866 .owner
= THIS_MODULE
,
869 static void ipu_psys_dev_release(struct device
*dev
)
874 static int psys_runtime_pm_resume(struct device
*dev
)
876 struct ipu_bus_device
*adev
= to_ipu_bus_device(dev
);
877 struct ipu_psys
*psys
= ipu_bus_get_drvdata(adev
);
885 * In runtime autosuspend mode, if the psys is in power on state, no
886 * need to resume again.
888 spin_lock_irqsave(&psys
->ready_lock
, flags
);
890 spin_unlock_irqrestore(&psys
->ready_lock
, flags
);
893 spin_unlock_irqrestore(&psys
->ready_lock
, flags
);
895 retval
= ipu_mmu_hw_init(adev
->mmu
);
899 if (async_fw_init
&& !psys
->fwcom
) {
901 "%s: asynchronous firmware init not finished, skipping\n",
906 if (!ipu_buttress_auth_done(adev
->isp
)) {
907 dev_dbg(dev
, "%s: not yet authenticated, skipping\n", __func__
);
911 ipu_psys_setup_hw(psys
);
913 ipu_psys_subdomains_power(psys
, 1);
914 ipu_trace_restore(&psys
->adev
->dev
);
916 ipu_configure_spc(adev
->isp
,
917 &psys
->pdata
->ipdata
->hw_variant
,
918 IPU_CPD_PKG_DIR_PSYS_SERVER_IDX
,
919 psys
->pdata
->base
, psys
->pkg_dir
,
920 psys
->pkg_dir_dma_addr
);
922 retval
= ipu_fw_psys_open(psys
);
924 dev_err(&psys
->adev
->dev
, "Failed to open abi.\n");
928 spin_lock_irqsave(&psys
->ready_lock
, flags
);
930 spin_unlock_irqrestore(&psys
->ready_lock
, flags
);
935 static int psys_runtime_pm_suspend(struct device
*dev
)
937 struct ipu_bus_device
*adev
= to_ipu_bus_device(dev
);
938 struct ipu_psys
*psys
= ipu_bus_get_drvdata(adev
);
948 spin_lock_irqsave(&psys
->ready_lock
, flags
);
950 spin_unlock_irqrestore(&psys
->ready_lock
, flags
);
953 * We can trace failure but better to not return an error.
954 * At suspend we are progressing towards psys power gated state.
955 * Any hang / failure inside psys will be forgotten soon.
957 rval
= ipu_fw_psys_close(psys
);
959 dev_err(dev
, "Device close failure: %d\n", rval
);
961 ipu_psys_subdomains_power(psys
, 0);
963 ipu_mmu_hw_cleanup(adev
->mmu
);
968 /* The following PM callbacks are needed to enable runtime PM in IPU PCI
969 * device resume, otherwise, runtime PM can't work in PCI resume from
972 static int psys_resume(struct device
*dev
)
977 static int psys_suspend(struct device
*dev
)
982 static const struct dev_pm_ops psys_pm_ops
= {
983 .runtime_suspend
= psys_runtime_pm_suspend
,
984 .runtime_resume
= psys_runtime_pm_resume
,
985 .suspend
= psys_suspend
,
986 .resume
= psys_resume
,
989 #define PSYS_PM_OPS (&psys_pm_ops)
991 #define PSYS_PM_OPS NULL
994 static int cpd_fw_reload(struct ipu_device
*isp
)
996 struct ipu_psys
*psys
= ipu_bus_get_drvdata(isp
->psys
);
999 if (!isp
->secure_mode
) {
1000 dev_warn(&isp
->pdev
->dev
,
1001 "CPD firmware reload was only supported for secure mode.\n");
1006 ipu_cpd_free_pkg_dir(isp
->psys
, psys
->pkg_dir
,
1007 psys
->pkg_dir_dma_addr
,
1008 psys
->pkg_dir_size
);
1010 ipu_buttress_unmap_fw_image(isp
->psys
, &psys
->fw_sgt
);
1011 release_firmware(isp
->cpd_fw
);
1013 dev_info(&isp
->pdev
->dev
, "Old FW removed\n");
1016 rval
= request_cpd_fw(&isp
->cpd_fw
, isp
->cpd_fw_name
,
1019 dev_err(&isp
->pdev
->dev
, "Requesting firmware(%s) failed\n",
1024 rval
= ipu_cpd_validate_cpd_file(isp
, isp
->cpd_fw
->data
,
1027 dev_err(&isp
->pdev
->dev
, "Failed to validate cpd file\n");
1028 goto out_release_firmware
;
1031 rval
= ipu_buttress_map_fw_image(isp
->psys
, isp
->cpd_fw
, &psys
->fw_sgt
);
1033 goto out_release_firmware
;
1035 psys
->pkg_dir
= ipu_cpd_create_pkg_dir(isp
->psys
,
1037 sg_dma_address(psys
->fw_sgt
.sgl
),
1038 &psys
->pkg_dir_dma_addr
,
1039 &psys
->pkg_dir_size
);
1041 if (!psys
->pkg_dir
) {
1043 goto out_unmap_fw_image
;
1046 isp
->pkg_dir
= psys
->pkg_dir
;
1047 isp
->pkg_dir_dma_addr
= psys
->pkg_dir_dma_addr
;
1048 isp
->pkg_dir_size
= psys
->pkg_dir_size
;
1050 if (!isp
->secure_mode
)
1053 rval
= ipu_fw_authenticate(isp
, 1);
1055 goto out_free_pkg_dir
;
1060 ipu_cpd_free_pkg_dir(isp
->psys
, psys
->pkg_dir
,
1061 psys
->pkg_dir_dma_addr
, psys
->pkg_dir_size
);
1063 ipu_buttress_unmap_fw_image(isp
->psys
, &psys
->fw_sgt
);
1064 out_release_firmware
:
1065 release_firmware(isp
->cpd_fw
);
1071 #ifdef CONFIG_DEBUG_FS
1072 static int ipu_psys_icache_prefetch_sp_get(void *data
, u64
*val
)
1074 struct ipu_psys
*psys
= data
;
1076 *val
= psys
->icache_prefetch_sp
;
1080 static int ipu_psys_icache_prefetch_sp_set(void *data
, u64 val
)
1082 struct ipu_psys
*psys
= data
;
1087 psys
->icache_prefetch_sp
= val
;
1092 DEFINE_SIMPLE_ATTRIBUTE(psys_icache_prefetch_sp_fops
,
1093 ipu_psys_icache_prefetch_sp_get
,
1094 ipu_psys_icache_prefetch_sp_set
, "%llu\n");
1096 static int ipu_psys_icache_prefetch_isp_get(void *data
, u64
*val
)
1098 struct ipu_psys
*psys
= data
;
1100 *val
= psys
->icache_prefetch_isp
;
1104 static int ipu_psys_icache_prefetch_isp_set(void *data
, u64 val
)
1106 struct ipu_psys
*psys
= data
;
1111 psys
->icache_prefetch_isp
= val
;
1116 DEFINE_SIMPLE_ATTRIBUTE(psys_icache_prefetch_isp_fops
,
1117 ipu_psys_icache_prefetch_isp_get
,
1118 ipu_psys_icache_prefetch_isp_set
, "%llu\n");
1120 static int ipu_psys_init_debugfs(struct ipu_psys
*psys
)
1122 struct dentry
*file
;
1125 dir
= debugfs_create_dir("psys", psys
->adev
->isp
->ipu_dir
);
1129 file
= debugfs_create_file("icache_prefetch_sp", 0600,
1130 dir
, psys
, &psys_icache_prefetch_sp_fops
);
1134 file
= debugfs_create_file("icache_prefetch_isp", 0600,
1135 dir
, psys
, &psys_icache_prefetch_isp_fops
);
1139 psys
->debugfsdir
= dir
;
1142 if (ipu_psys_gpc_init_debugfs(psys
))
1148 debugfs_remove_recursive(dir
);
1153 static int ipu_psys_sched_cmd(void *ptr
)
1155 struct ipu_psys
*psys
= ptr
;
1159 wait_event_interruptible(psys
->sched_cmd_wq
,
1160 (kthread_should_stop() ||
1162 atomic_read(&psys
->wakeup_count
))));
1164 if (kthread_should_stop())
1170 mutex_lock(&psys
->mutex
);
1171 atomic_set(&psys
->wakeup_count
, 0);
1172 ipu_psys_run_next(psys
);
1173 mutex_unlock(&psys
->mutex
);
1179 static void start_sp(struct ipu_bus_device
*adev
)
1181 struct ipu_psys
*psys
= ipu_bus_get_drvdata(adev
);
1182 void __iomem
*spc_regs_base
= psys
->pdata
->base
+
1183 psys
->pdata
->ipdata
->hw_variant
.spc_offset
;
1186 val
|= IPU_PSYS_SPC_STATUS_START
|
1187 IPU_PSYS_SPC_STATUS_RUN
|
1188 IPU_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE
;
1189 val
|= psys
->icache_prefetch_sp
?
1190 IPU_PSYS_SPC_STATUS_ICACHE_PREFETCH
: 0;
1191 writel(val
, spc_regs_base
+ IPU_PSYS_REG_SPC_STATUS_CTRL
);
1194 static int query_sp(struct ipu_bus_device
*adev
)
1196 struct ipu_psys
*psys
= ipu_bus_get_drvdata(adev
);
1197 void __iomem
*spc_regs_base
= psys
->pdata
->base
+
1198 psys
->pdata
->ipdata
->hw_variant
.spc_offset
;
1199 u32 val
= readl(spc_regs_base
+ IPU_PSYS_REG_SPC_STATUS_CTRL
);
1201 /* return true when READY == 1, START == 0 */
1202 val
&= IPU_PSYS_SPC_STATUS_READY
| IPU_PSYS_SPC_STATUS_START
;
1204 return val
== IPU_PSYS_SPC_STATUS_READY
;
1207 static int ipu_psys_fw_init(struct ipu_psys
*psys
)
1210 struct ipu_fw_syscom_queue_config
*queue_cfg
;
1211 struct ipu_fw_syscom_queue_config fw_psys_event_queue_cfg
[] = {
1213 IPU_FW_PSYS_EVENT_QUEUE_SIZE
,
1214 sizeof(struct ipu_fw_psys_event
)
1217 struct ipu_fw_psys_srv_init server_init
= {
1218 .ddr_pkg_dir_address
= 0,
1219 .host_ddr_pkg_dir
= NULL
,
1221 .icache_prefetch_sp
= psys
->icache_prefetch_sp
,
1222 .icache_prefetch_isp
= psys
->icache_prefetch_isp
,
1224 struct ipu_fw_com_cfg fwcom
= {
1225 .num_output_queues
= IPU_FW_PSYS_N_PSYS_EVENT_QUEUE_ID
,
1226 .output
= fw_psys_event_queue_cfg
,
1227 .specific_addr
= &server_init
,
1228 .specific_size
= sizeof(server_init
),
1229 .cell_start
= start_sp
,
1230 .cell_ready
= query_sp
,
1231 .buttress_boot_offset
= SYSCOM_BUTTRESS_FW_PARAMS_PSYS_OFFSET
,
1235 size
= IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID
;
1236 if (ipu_ver
== IPU_VER_6
|| ipu_ver
== IPU_VER_6EP
)
1237 size
= IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID
;
1239 queue_cfg
= devm_kzalloc(&psys
->adev
->dev
, sizeof(*queue_cfg
) * size
,
1244 for (i
= 0; i
< size
; i
++) {
1245 queue_cfg
[i
].queue_size
= IPU_FW_PSYS_CMD_QUEUE_SIZE
;
1246 queue_cfg
[i
].token_size
= sizeof(struct ipu_fw_psys_cmd
);
1249 fwcom
.input
= queue_cfg
;
1250 fwcom
.num_input_queues
= size
;
1251 fwcom
.dmem_addr
= psys
->pdata
->ipdata
->hw_variant
.dmem_offset
;
1253 psys
->fwcom
= ipu_fw_com_prepare(&fwcom
, psys
->adev
, psys
->pdata
->base
);
1255 dev_err(&psys
->adev
->dev
, "psys fw com prepare failed\n");
1262 static void run_fw_init_work(struct work_struct
*work
)
1264 struct fw_init_task
*task
= (struct fw_init_task
*)work
;
1265 struct ipu_psys
*psys
= task
->psys
;
1268 rval
= ipu_psys_fw_init(psys
);
1271 dev_err(&psys
->adev
->dev
, "FW init failed(%d)\n", rval
);
1272 ipu_psys_remove(psys
->adev
);
1274 dev_info(&psys
->adev
->dev
, "FW init done\n");
1278 static int ipu_psys_probe(struct ipu_bus_device
*adev
)
1280 struct ipu_device
*isp
= adev
->isp
;
1281 struct ipu_psys_pg
*kpg
, *kpg0
;
1282 struct ipu_psys
*psys
;
1284 int i
, rval
= -E2BIG
;
1286 rval
= ipu_mmu_hw_init(adev
->mmu
);
1290 mutex_lock(&ipu_psys_mutex
);
1292 minor
= find_next_zero_bit(ipu_psys_devices
, IPU_PSYS_NUM_DEVICES
, 0);
1293 if (minor
== IPU_PSYS_NUM_DEVICES
) {
1294 dev_err(&adev
->dev
, "too many devices\n");
1298 psys
= devm_kzalloc(&adev
->dev
, sizeof(*psys
), GFP_KERNEL
);
1305 psys
->pdata
= adev
->pdata
;
1306 psys
->icache_prefetch_sp
= 0;
1308 psys
->power_gating
= 0;
1310 ipu_trace_init(adev
->isp
, psys
->pdata
->base
, &adev
->dev
,
1313 cdev_init(&psys
->cdev
, &ipu_psys_fops
);
1314 psys
->cdev
.owner
= ipu_psys_fops
.owner
;
1316 rval
= cdev_add(&psys
->cdev
, MKDEV(MAJOR(ipu_psys_dev_t
), minor
), 1);
1318 dev_err(&adev
->dev
, "cdev_add failed (%d)\n", rval
);
1322 set_bit(minor
, ipu_psys_devices
);
1324 spin_lock_init(&psys
->ready_lock
);
1325 spin_lock_init(&psys
->pgs_lock
);
1327 psys
->timeout
= IPU_PSYS_CMD_TIMEOUT_MS
;
1329 mutex_init(&psys
->mutex
);
1330 INIT_LIST_HEAD(&psys
->fhs
);
1331 INIT_LIST_HEAD(&psys
->pgs
);
1332 INIT_LIST_HEAD(&psys
->started_kcmds_list
);
1333 INIT_WORK(&psys
->watchdog_work
, ipu_psys_watchdog_work
);
1335 init_waitqueue_head(&psys
->sched_cmd_wq
);
1336 atomic_set(&psys
->wakeup_count
, 0);
1338 * Create a thread to schedule commands sent to IPU firmware.
1339 * The thread reduces the coupling between the command scheduler
1340 * and queueing commands from the user to driver.
1342 psys
->sched_cmd_thread
= kthread_run(ipu_psys_sched_cmd
, psys
,
1345 if (IS_ERR(psys
->sched_cmd_thread
)) {
1346 psys
->sched_cmd_thread
= NULL
;
1347 mutex_destroy(&psys
->mutex
);
1351 ipu_bus_set_drvdata(adev
, psys
);
1353 rval
= ipu_psys_resource_pool_init(&psys
->resource_pool_started
);
1356 "unable to alloc process group resources\n");
1357 goto out_mutex_destroy
;
1360 rval
= ipu_psys_resource_pool_init(&psys
->resource_pool_running
);
1363 "unable to alloc process group resources\n");
1364 goto out_resources_started_free
;
1367 ipu6_psys_hw_res_variant_init();
1368 psys
->pkg_dir
= isp
->pkg_dir
;
1369 psys
->pkg_dir_dma_addr
= isp
->pkg_dir_dma_addr
;
1370 psys
->pkg_dir_size
= isp
->pkg_dir_size
;
1371 psys
->fw_sgt
= isp
->fw_sgt
;
1373 /* allocate and map memory for process groups */
1374 for (i
= 0; i
< IPU_PSYS_PG_POOL_SIZE
; i
++) {
1375 kpg
= kzalloc(sizeof(*kpg
), GFP_KERNEL
);
1378 kpg
->pg
= dma_alloc_attrs(&adev
->dev
,
1379 IPU_PSYS_PG_MAX_SIZE
,
1386 kpg
->size
= IPU_PSYS_PG_MAX_SIZE
;
1387 list_add(&kpg
->list
, &psys
->pgs
);
1390 psys
->caps
.pg_count
= ipu_cpd_pkg_dir_get_num_entries(psys
->pkg_dir
);
1392 dev_info(&adev
->dev
, "pkg_dir entry count:%d\n", psys
->caps
.pg_count
);
1393 if (async_fw_init
) {
1394 INIT_DELAYED_WORK((struct delayed_work
*)&fw_init_task
,
1396 fw_init_task
.psys
= psys
;
1397 schedule_delayed_work((struct delayed_work
*)&fw_init_task
, 0);
1399 rval
= ipu_psys_fw_init(psys
);
1403 dev_err(&adev
->dev
, "FW init failed(%d)\n", rval
);
1407 psys
->dev
.parent
= &adev
->dev
;
1408 psys
->dev
.bus
= &ipu_psys_bus
;
1409 psys
->dev
.devt
= MKDEV(MAJOR(ipu_psys_dev_t
), minor
);
1410 psys
->dev
.release
= ipu_psys_dev_release
;
1411 dev_set_name(&psys
->dev
, "ipu-psys%d", minor
);
1412 rval
= device_register(&psys
->dev
);
1414 dev_err(&psys
->dev
, "psys device_register failed\n");
1415 goto out_release_fw_com
;
1418 /* Add the hw stepping information to caps */
1419 strlcpy(psys
->caps
.dev_model
, IPU_MEDIA_DEV_MODEL_NAME
,
1420 sizeof(psys
->caps
.dev_model
));
1422 pm_runtime_set_autosuspend_delay(&psys
->adev
->dev
,
1423 IPU_PSYS_AUTOSUSPEND_DELAY
);
1424 pm_runtime_use_autosuspend(&psys
->adev
->dev
);
1425 pm_runtime_mark_last_busy(&psys
->adev
->dev
);
1427 mutex_unlock(&ipu_psys_mutex
);
1429 #ifdef CONFIG_DEBUG_FS
1430 /* Debug fs failure is not fatal. */
1431 ipu_psys_init_debugfs(psys
);
1434 adev
->isp
->cpd_fw_reload
= &cpd_fw_reload
;
1436 dev_info(&adev
->dev
, "psys probe minor: %d\n", minor
);
1438 ipu_mmu_hw_cleanup(adev
->mmu
);
1443 ipu_fw_com_release(psys
->fwcom
, 1);
1445 list_for_each_entry_safe(kpg
, kpg0
, &psys
->pgs
, list
) {
1446 dma_free_attrs(&adev
->dev
, kpg
->size
, kpg
->pg
,
1447 kpg
->pg_dma_addr
, 0);
1451 ipu_psys_resource_pool_cleanup(&psys
->resource_pool_running
);
1452 out_resources_started_free
:
1453 ipu_psys_resource_pool_cleanup(&psys
->resource_pool_started
);
1455 mutex_destroy(&psys
->mutex
);
1456 cdev_del(&psys
->cdev
);
1457 if (psys
->sched_cmd_thread
) {
1458 kthread_stop(psys
->sched_cmd_thread
);
1459 psys
->sched_cmd_thread
= NULL
;
1462 /* Safe to call even if the init is not called */
1463 ipu_trace_uninit(&adev
->dev
);
1464 mutex_unlock(&ipu_psys_mutex
);
1466 ipu_mmu_hw_cleanup(adev
->mmu
);
1471 static void ipu_psys_remove(struct ipu_bus_device
*adev
)
1473 struct ipu_device
*isp
= adev
->isp
;
1474 struct ipu_psys
*psys
= ipu_bus_get_drvdata(adev
);
1475 struct ipu_psys_pg
*kpg
, *kpg0
;
1477 #ifdef CONFIG_DEBUG_FS
1479 debugfs_remove_recursive(psys
->debugfsdir
);
1482 flush_workqueue(IPU_PSYS_WORK_QUEUE
);
1484 if (psys
->sched_cmd_thread
) {
1485 kthread_stop(psys
->sched_cmd_thread
);
1486 psys
->sched_cmd_thread
= NULL
;
1489 pm_runtime_dont_use_autosuspend(&psys
->adev
->dev
);
1491 mutex_lock(&ipu_psys_mutex
);
1493 list_for_each_entry_safe(kpg
, kpg0
, &psys
->pgs
, list
) {
1494 dma_free_attrs(&adev
->dev
, kpg
->size
, kpg
->pg
,
1495 kpg
->pg_dma_addr
, 0);
1499 if (psys
->fwcom
&& ipu_fw_com_release(psys
->fwcom
, 1))
1500 dev_err(&adev
->dev
, "fw com release failed.\n");
1502 kfree(psys
->server_init
);
1503 kfree(psys
->syscom_config
);
1505 ipu_trace_uninit(&adev
->dev
);
1507 ipu_psys_resource_pool_cleanup(&psys
->resource_pool_started
);
1508 ipu_psys_resource_pool_cleanup(&psys
->resource_pool_running
);
1510 device_unregister(&psys
->dev
);
1512 clear_bit(MINOR(psys
->cdev
.dev
), ipu_psys_devices
);
1513 cdev_del(&psys
->cdev
);
1515 mutex_unlock(&ipu_psys_mutex
);
1517 mutex_destroy(&psys
->mutex
);
1519 dev_info(&adev
->dev
, "removed\n");
1522 static irqreturn_t
psys_isr_threaded(struct ipu_bus_device
*adev
)
1524 struct ipu_psys
*psys
= ipu_bus_get_drvdata(adev
);
1525 void __iomem
*base
= psys
->pdata
->base
;
1529 mutex_lock(&psys
->mutex
);
1531 r
= pm_runtime_get_if_in_use(&psys
->adev
->dev
);
1532 if (!r
|| WARN_ON_ONCE(r
< 0)) {
1533 mutex_unlock(&psys
->mutex
);
1538 status
= readl(base
+ IPU_REG_PSYS_GPDEV_IRQ_STATUS
);
1539 writel(status
, base
+ IPU_REG_PSYS_GPDEV_IRQ_CLEAR
);
1541 if (status
& IPU_PSYS_GPDEV_IRQ_FWIRQ(IPU_PSYS_GPDEV_FWIRQ0
)) {
1542 writel(0, base
+ IPU_REG_PSYS_GPDEV_FWIRQ(0));
1543 ipu_psys_handle_events(psys
);
1546 pm_runtime_mark_last_busy(&psys
->adev
->dev
);
1547 pm_runtime_put_autosuspend(&psys
->adev
->dev
);
1548 mutex_unlock(&psys
->mutex
);
1550 return status
? IRQ_HANDLED
: IRQ_NONE
;
1553 static struct ipu_bus_driver ipu_psys_driver
= {
1554 .probe
= ipu_psys_probe
,
1555 .remove
= ipu_psys_remove
,
1556 .isr_threaded
= psys_isr_threaded
,
1557 .wanted
= IPU_PSYS_NAME
,
1559 .name
= IPU_PSYS_NAME
,
1560 .owner
= THIS_MODULE
,
1562 .probe_type
= PROBE_PREFER_ASYNCHRONOUS
,
1566 static int __init
ipu_psys_init(void)
1568 int rval
= alloc_chrdev_region(&ipu_psys_dev_t
, 0,
1569 IPU_PSYS_NUM_DEVICES
, IPU_PSYS_NAME
);
1571 pr_err("can't alloc psys chrdev region (%d)\n", rval
);
1575 rval
= bus_register(&ipu_psys_bus
);
1577 pr_warn("can't register psys bus (%d)\n", rval
);
1578 goto out_bus_register
;
1581 ipu_bus_register_driver(&ipu_psys_driver
);
1586 unregister_chrdev_region(ipu_psys_dev_t
, IPU_PSYS_NUM_DEVICES
);
1591 static void __exit
ipu_psys_exit(void)
1593 ipu_bus_unregister_driver(&ipu_psys_driver
);
1594 bus_unregister(&ipu_psys_bus
);
1595 unregister_chrdev_region(ipu_psys_dev_t
, IPU_PSYS_NUM_DEVICES
);
1598 static const struct pci_device_id ipu_pci_tbl
[] = {
1599 {PCI_DEVICE(PCI_VENDOR_ID_INTEL
, IPU6_PCI_ID
)},
1600 {PCI_DEVICE(PCI_VENDOR_ID_INTEL
, IPU6SE_PCI_ID
)},
1601 {PCI_DEVICE(PCI_VENDOR_ID_INTEL
, IPU6EP_PCI_ID
)},
1604 MODULE_DEVICE_TABLE(pci
, ipu_pci_tbl
);
1606 module_init(ipu_psys_init
);
1607 module_exit(ipu_psys_exit
);
1609 MODULE_AUTHOR("Antti Laakso <antti.laakso@intel.com>");
1610 MODULE_AUTHOR("Bin Han <bin.b.han@intel.com>");
1611 MODULE_AUTHOR("Renwei Wu <renwei.wu@intel.com>");
1612 MODULE_AUTHOR("Jianxu Zheng <jian.xu.zheng@intel.com>");
1613 MODULE_AUTHOR("Xia Wu <xia.wu@intel.com>");
1614 MODULE_AUTHOR("Bingbu Cao <bingbu.cao@intel.com>");
1615 MODULE_AUTHOR("Zaikuo Wang <zaikuo.wang@intel.com>");
1616 MODULE_AUTHOR("Yunliang Ding <yunliang.ding@intel.com>");
1617 MODULE_LICENSE("GPL");
1618 MODULE_DESCRIPTION("Intel ipu processing system driver");