2 * Copyright (C) 2008-2009 QUALCOMM Incorporated.
5 /* FIXME: most allocations need not be GFP_ATOMIC */
6 /* FIXME: management of mutexes */
7 /* FIXME: msm_pmem_region_lookup return values */
8 /* FIXME: way too many copy to/from user */
9 /* FIXME: does region->active mean free */
10 /* FIXME: check limits on command lenghts passed from userspace */
11 /* FIXME: __msm_release: which queues should we flush when opencnt != 0 */
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/sched.h>
18 #include <mach/board.h>
21 #include <linux/list.h>
22 #include <linux/uaccess.h>
23 #include <linux/android_pmem.h>
24 #include <linux/poll.h>
25 #include <media/msm_camera.h>
26 #include <mach/camera.h>
28 #define MSM_MAX_CAMERA_SENSORS 5
30 #define ERR_USER_COPY(to) pr_err("%s(%d): copy %s user\n", \
31 __func__, __LINE__, ((to) ? "to" : "from"))
32 #define ERR_COPY_FROM_USER() ERR_USER_COPY(0)
33 #define ERR_COPY_TO_USER() ERR_USER_COPY(1)
35 static struct class *msm_class
;
36 static dev_t msm_devno
;
37 static LIST_HEAD(msm_sensors
);
39 #define __CONTAINS(r, v, l, field) ({ \
42 typeof(v) __e = __v + l; \
43 int res = __v >= __r->field && \
44 __e <= __r->field + __r->len; \
48 #define CONTAINS(r1, r2, field) ({ \
49 typeof(r2) __r2 = r2; \
50 __CONTAINS(r1, __r2->field, __r2->len, field); \
53 #define IN_RANGE(r, v, field) ({ \
56 int res = ((__vv >= __r->field) && \
57 (__vv < (__r->field + __r->len))); \
61 #define OVERLAPS(r1, r2, field) ({ \
62 typeof(r1) __r1 = r1; \
63 typeof(r2) __r2 = r2; \
64 typeof(__r2->field) __v = __r2->field; \
65 typeof(__v) __e = __v + __r2->len - 1; \
66 int res = (IN_RANGE(__r1, __v, field) || \
67 IN_RANGE(__r1, __e, field)); \
71 #define MSM_DRAIN_QUEUE_NOSYNC(sync, name) do { \
72 struct msm_queue_cmd *qcmd = NULL; \
73 CDBG("%s: draining queue "#name"\n", __func__); \
74 while (!list_empty(&(sync)->name)) { \
75 qcmd = list_first_entry(&(sync)->name, \
76 struct msm_queue_cmd, list); \
77 list_del_init(&qcmd->list); \
82 #define MSM_DRAIN_QUEUE(sync, name) do { \
83 unsigned long flags; \
84 spin_lock_irqsave(&(sync)->name##_lock, flags); \
85 MSM_DRAIN_QUEUE_NOSYNC(sync, name); \
86 spin_unlock_irqrestore(&(sync)->name##_lock, flags); \
89 static int check_overlap(struct hlist_head
*ptype
,
93 struct msm_pmem_region
*region
;
94 struct msm_pmem_region t
= { .paddr
= paddr
, .len
= len
};
95 struct hlist_node
*node
;
97 hlist_for_each_entry(region
, node
, ptype
, list
) {
98 if (CONTAINS(region
, &t
, paddr
) ||
99 CONTAINS(&t
, region
, paddr
) ||
100 OVERLAPS(region
, &t
, paddr
)) {
102 " region (PHYS %p len %ld)"
103 " clashes with registered region"
104 " (paddr %p len %ld)\n",
105 (void *)t
.paddr
, t
.len
,
106 (void *)region
->paddr
, region
->len
);
114 static int msm_pmem_table_add(struct hlist_head
*ptype
,
115 struct msm_pmem_info
*info
)
119 unsigned long vstart
;
122 struct msm_pmem_region
*region
;
124 rc
= get_pmem_file(info
->fd
, &paddr
, &vstart
, &len
, &file
);
126 pr_err("msm_pmem_table_add: get_pmem_file fd %d error %d\n",
131 if (check_overlap(ptype
, paddr
, len
) < 0)
134 CDBG("%s: type = %d, paddr = 0x%lx, vaddr = 0x%lx\n",
136 info
->type
, paddr
, (unsigned long)info
->vaddr
);
138 region
= kmalloc(sizeof(*region
), GFP_KERNEL
);
142 INIT_HLIST_NODE(®ion
->list
);
144 region
->type
= info
->type
;
145 region
->vaddr
= info
->vaddr
;
146 region
->paddr
= paddr
;
149 region
->y_off
= info
->y_off
;
150 region
->cbcr_off
= info
->cbcr_off
;
151 region
->fd
= info
->fd
;
152 region
->active
= info
->active
;
154 hlist_add_head(&(region
->list
), ptype
);
159 /* return of 0 means failure */
160 static uint8_t msm_pmem_region_lookup(struct hlist_head
*ptype
,
161 int pmem_type
, struct msm_pmem_region
*reg
, uint8_t maxcount
)
163 struct msm_pmem_region
*region
;
164 struct msm_pmem_region
*regptr
;
165 struct hlist_node
*node
, *n
;
171 hlist_for_each_entry_safe(region
, node
, n
, ptype
, list
) {
172 if (region
->type
== pmem_type
&& region
->active
) {
184 static unsigned long msm_pmem_frame_ptov_lookup(struct msm_sync
*sync
,
185 unsigned long pyaddr
,
186 unsigned long pcbcraddr
,
187 uint32_t *yoff
, uint32_t *cbcroff
, int *fd
)
189 struct msm_pmem_region
*region
;
190 struct hlist_node
*node
, *n
;
192 hlist_for_each_entry_safe(region
, node
, n
, &sync
->frame
, list
) {
193 if (pyaddr
== (region
->paddr
+ region
->y_off
) &&
194 pcbcraddr
== (region
->paddr
+
197 /* offset since we could pass vaddr inside
198 * a registerd pmem buffer
200 *yoff
= region
->y_off
;
201 *cbcroff
= region
->cbcr_off
;
204 return (unsigned long)(region
->vaddr
);
211 static unsigned long msm_pmem_stats_ptov_lookup(struct msm_sync
*sync
,
212 unsigned long addr
, int *fd
)
214 struct msm_pmem_region
*region
;
215 struct hlist_node
*node
, *n
;
217 hlist_for_each_entry_safe(region
, node
, n
, &sync
->stats
, list
) {
218 if (addr
== region
->paddr
&& region
->active
) {
219 /* offset since we could pass vaddr inside a
220 * registered pmem buffer */
223 return (unsigned long)(region
->vaddr
);
230 static unsigned long msm_pmem_frame_vtop_lookup(struct msm_sync
*sync
,
231 unsigned long buffer
,
232 uint32_t yoff
, uint32_t cbcroff
, int fd
)
234 struct msm_pmem_region
*region
;
235 struct hlist_node
*node
, *n
;
237 hlist_for_each_entry_safe(region
,
238 node
, n
, &sync
->frame
, list
) {
239 if (((unsigned long)(region
->vaddr
) == buffer
) &&
240 (region
->y_off
== yoff
) &&
241 (region
->cbcr_off
== cbcroff
) &&
242 (region
->fd
== fd
) &&
243 (region
->active
== 0)) {
246 return region
->paddr
;
253 static unsigned long msm_pmem_stats_vtop_lookup(
254 struct msm_sync
*sync
,
255 unsigned long buffer
,
258 struct msm_pmem_region
*region
;
259 struct hlist_node
*node
, *n
;
261 hlist_for_each_entry_safe(region
, node
, n
, &sync
->stats
, list
) {
262 if (((unsigned long)(region
->vaddr
) == buffer
) &&
263 (region
->fd
== fd
) && region
->active
== 0) {
265 return region
->paddr
;
272 static int __msm_pmem_table_del(struct msm_sync
*sync
,
273 struct msm_pmem_info
*pinfo
)
276 struct msm_pmem_region
*region
;
277 struct hlist_node
*node
, *n
;
279 switch (pinfo
->type
) {
280 case MSM_PMEM_OUTPUT1
:
281 case MSM_PMEM_OUTPUT2
:
282 case MSM_PMEM_THUMBAIL
:
283 case MSM_PMEM_MAINIMG
:
284 case MSM_PMEM_RAW_MAINIMG
:
285 hlist_for_each_entry_safe(region
, node
, n
,
286 &sync
->frame
, list
) {
288 if (pinfo
->type
== region
->type
&&
289 pinfo
->vaddr
== region
->vaddr
&&
290 pinfo
->fd
== region
->fd
) {
292 put_pmem_file(region
->file
);
298 case MSM_PMEM_AEC_AWB
:
300 hlist_for_each_entry_safe(region
, node
, n
,
301 &sync
->stats
, list
) {
303 if (pinfo
->type
== region
->type
&&
304 pinfo
->vaddr
== region
->vaddr
&&
305 pinfo
->fd
== region
->fd
) {
307 put_pmem_file(region
->file
);
321 static int msm_pmem_table_del(struct msm_sync
*sync
, void __user
*arg
)
323 struct msm_pmem_info info
;
325 if (copy_from_user(&info
, arg
, sizeof(info
))) {
326 ERR_COPY_FROM_USER();
330 return __msm_pmem_table_del(sync
, &info
);
333 static int __msm_get_frame(struct msm_sync
*sync
,
334 struct msm_frame
*frame
)
339 struct msm_queue_cmd
*qcmd
= NULL
;
340 struct msm_vfe_phy_info
*pphy
;
342 spin_lock_irqsave(&sync
->prev_frame_q_lock
, flags
);
343 if (!list_empty(&sync
->prev_frame_q
)) {
344 qcmd
= list_first_entry(&sync
->prev_frame_q
,
345 struct msm_queue_cmd
, list
);
346 list_del_init(&qcmd
->list
);
348 spin_unlock_irqrestore(&sync
->prev_frame_q_lock
, flags
);
351 pr_err("%s: no preview frame.\n", __func__
);
355 pphy
= (struct msm_vfe_phy_info
*)(qcmd
->command
);
358 msm_pmem_frame_ptov_lookup(sync
,
360 pphy
->cbcr_phy
, &(frame
->y_off
),
361 &(frame
->cbcr_off
), &(frame
->fd
));
362 if (!frame
->buffer
) {
363 pr_err("%s: cannot get frame, invalid lookup address "
364 "y=%x cbcr=%x offset=%d\n",
372 CDBG("__msm_get_frame: y=0x%x, cbcr=0x%x, qcmd=0x%x, virt_addr=0x%x\n",
373 pphy
->y_phy
, pphy
->cbcr_phy
, (int) qcmd
, (int) frame
->buffer
);
379 static int msm_get_frame(struct msm_sync
*sync
, void __user
*arg
)
382 struct msm_frame frame
;
384 if (copy_from_user(&frame
,
386 sizeof(struct msm_frame
))) {
387 ERR_COPY_FROM_USER();
391 rc
= __msm_get_frame(sync
, &frame
);
396 if (frame
.croplen
> sync
->croplen
) {
397 pr_err("msm_get_frame: invalid frame croplen %d\n",
402 if (copy_to_user((void *)frame
.cropinfo
,
410 if (copy_to_user((void *)arg
,
411 &frame
, sizeof(struct msm_frame
))) {
416 CDBG("Got frame!!!\n");
421 static int msm_enable_vfe(struct msm_sync
*sync
, void __user
*arg
)
424 struct camera_enable_cmd cfg
;
426 if (copy_from_user(&cfg
,
428 sizeof(struct camera_enable_cmd
))) {
429 ERR_COPY_FROM_USER();
433 if (sync
->vfefn
.vfe_enable
)
434 rc
= sync
->vfefn
.vfe_enable(&cfg
);
436 CDBG("msm_enable_vfe: returned rc = %d\n", rc
);
440 static int msm_disable_vfe(struct msm_sync
*sync
, void __user
*arg
)
443 struct camera_enable_cmd cfg
;
445 if (copy_from_user(&cfg
,
447 sizeof(struct camera_enable_cmd
))) {
448 ERR_COPY_FROM_USER();
452 if (sync
->vfefn
.vfe_disable
)
453 rc
= sync
->vfefn
.vfe_disable(&cfg
, NULL
);
455 CDBG("msm_disable_vfe: returned rc = %d\n", rc
);
459 static struct msm_queue_cmd
*__msm_control(struct msm_sync
*sync
,
460 struct msm_control_device_queue
*queue
,
461 struct msm_queue_cmd
*qcmd
,
467 spin_lock_irqsave(&sync
->msg_event_q_lock
, flags
);
468 list_add_tail(&qcmd
->list
, &sync
->msg_event_q
);
469 /* wake up config thread */
470 wake_up(&sync
->msg_event_wait
);
471 spin_unlock_irqrestore(&sync
->msg_event_q_lock
, flags
);
476 /* wait for config status */
477 rc
= wait_event_interruptible_timeout(
478 queue
->ctrl_status_wait
,
479 !list_empty_careful(&queue
->ctrl_status_q
),
481 if (list_empty_careful(&queue
->ctrl_status_q
)) {
485 pr_err("msm_control: wait_event error %d\n", rc
);
487 /* This is a bit scary. If we time out too early, we
488 * will free qcmd at the end of this function, and the
489 * dsp may do the same when it does respond, so we
490 * remove the message from the source queue.
492 pr_err("%s: error waiting for ctrl_status_q: %d\n",
494 spin_lock_irqsave(&sync
->msg_event_q_lock
, flags
);
495 list_del_init(&qcmd
->list
);
496 spin_unlock_irqrestore(&sync
->msg_event_q_lock
, flags
);
502 /* control command status is ready */
503 spin_lock_irqsave(&queue
->ctrl_status_q_lock
, flags
);
504 BUG_ON(list_empty(&queue
->ctrl_status_q
));
505 qcmd
= list_first_entry(&queue
->ctrl_status_q
,
506 struct msm_queue_cmd
, list
);
507 list_del_init(&qcmd
->list
);
508 spin_unlock_irqrestore(&queue
->ctrl_status_q_lock
, flags
);
513 static int msm_control(struct msm_control_device
*ctrl_pmsm
,
519 struct msm_sync
*sync
= ctrl_pmsm
->pmsm
->sync
;
520 struct msm_ctrl_cmd udata
, *ctrlcmd
;
521 struct msm_queue_cmd
*qcmd
= NULL
, *qcmd_temp
;
523 if (copy_from_user(&udata
, arg
, sizeof(struct msm_ctrl_cmd
))) {
524 ERR_COPY_FROM_USER();
529 qcmd
= kmalloc(sizeof(struct msm_queue_cmd
) +
530 sizeof(struct msm_ctrl_cmd
) + udata
.length
,
533 pr_err("msm_control: cannot allocate buffer\n");
538 qcmd
->type
= MSM_CAM_Q_CTRL
;
539 qcmd
->command
= ctrlcmd
= (struct msm_ctrl_cmd
*)(qcmd
+ 1);
541 ctrlcmd
->value
= ctrlcmd
+ 1;
544 if (copy_from_user(ctrlcmd
->value
,
545 udata
.value
, udata
.length
)) {
546 ERR_COPY_FROM_USER();
553 /* qcmd will be set to NULL */
554 qcmd
= __msm_control(sync
, NULL
, qcmd
, 0);
558 qcmd_temp
= __msm_control(sync
,
560 qcmd
, MAX_SCHEDULE_TIMEOUT
);
562 if (IS_ERR(qcmd_temp
)) {
563 rc
= PTR_ERR(qcmd_temp
);
569 void __user
*to
= udata
.value
;
570 udata
= *(struct msm_ctrl_cmd
*)qcmd
->command
;
571 if (udata
.length
> 0) {
582 if (copy_to_user((void *)arg
, &udata
,
583 sizeof(struct msm_ctrl_cmd
))) {
591 /* Note: if we get here as a result of an error, we will free the
592 * qcmd that we kmalloc() in this function. When we come here as
593 * a result of a successful completion, we are freeing the qcmd that
594 * we dequeued from queue->ctrl_status_q.
598 CDBG("msm_control: end rc = %d\n", rc
);
602 static int msm_get_stats(struct msm_sync
*sync
, void __user
*arg
)
608 struct msm_stats_event_ctrl se
;
610 struct msm_queue_cmd
*qcmd
= NULL
;
611 struct msm_ctrl_cmd
*ctrl
= NULL
;
612 struct msm_vfe_resp
*data
= NULL
;
613 struct msm_stats_buf stats
;
615 if (copy_from_user(&se
, arg
,
616 sizeof(struct msm_stats_event_ctrl
))) {
617 ERR_COPY_FROM_USER();
621 timeout
= (int)se
.timeout_ms
;
623 CDBG("msm_get_stats timeout %d\n", timeout
);
624 rc
= wait_event_interruptible_timeout(
625 sync
->msg_event_wait
,
626 !list_empty_careful(&sync
->msg_event_q
),
627 msecs_to_jiffies(timeout
));
628 if (list_empty_careful(&sync
->msg_event_q
)) {
632 pr_err("msm_get_stats error %d\n", rc
);
636 CDBG("msm_get_stats returned from wait: %d\n", rc
);
638 spin_lock_irqsave(&sync
->msg_event_q_lock
, flags
);
639 BUG_ON(list_empty(&sync
->msg_event_q
));
640 qcmd
= list_first_entry(&sync
->msg_event_q
,
641 struct msm_queue_cmd
, list
);
642 list_del_init(&qcmd
->list
);
643 spin_unlock_irqrestore(&sync
->msg_event_q_lock
, flags
);
645 CDBG("=== received from DSP === %d\n", qcmd
->type
);
647 switch (qcmd
->type
) {
648 case MSM_CAM_Q_VFE_EVT
:
649 case MSM_CAM_Q_VFE_MSG
:
650 data
= (struct msm_vfe_resp
*)(qcmd
->command
);
652 /* adsp event and message */
653 se
.resptype
= MSM_CAM_RESP_STAT_EVT_MSG
;
655 /* 0 - msg from aDSP, 1 - event from mARM */
656 se
.stats_event
.type
= data
->evt_msg
.type
;
657 se
.stats_event
.msg_id
= data
->evt_msg
.msg_id
;
658 se
.stats_event
.len
= data
->evt_msg
.len
;
660 CDBG("msm_get_stats, qcmd->type = %d\n", qcmd
->type
);
661 CDBG("length = %d\n", se
.stats_event
.len
);
662 CDBG("msg_id = %d\n", se
.stats_event
.msg_id
);
664 if ((data
->type
== VFE_MSG_STATS_AF
) ||
665 (data
->type
== VFE_MSG_STATS_WE
)) {
668 msm_pmem_stats_ptov_lookup(sync
,
672 pr_err("%s: msm_pmem_stats_ptov_lookup error\n",
678 if (copy_to_user((void *)(se
.stats_event
.data
),
680 sizeof(struct msm_stats_buf
))) {
685 } else if ((data
->evt_msg
.len
> 0) &&
686 (data
->type
== VFE_MSG_GENERAL
)) {
687 if (copy_to_user((void *)(se
.stats_event
.data
),
689 data
->evt_msg
.len
)) {
693 } else if (data
->type
== VFE_MSG_OUTPUT1
||
694 data
->type
== VFE_MSG_OUTPUT2
) {
695 if (copy_to_user((void *)(se
.stats_event
.data
),
701 } else if (data
->type
== VFE_MSG_SNAPSHOT
&& sync
->pict_pp
) {
702 struct msm_postproc buf
;
703 struct msm_pmem_region region
;
704 buf
.fmnum
= msm_pmem_region_lookup(&sync
->frame
,
707 if (buf
.fmnum
== 1) {
708 buf
.fmain
.buffer
= (unsigned long)region
.vaddr
;
709 buf
.fmain
.y_off
= region
.y_off
;
710 buf
.fmain
.cbcr_off
= region
.cbcr_off
;
711 buf
.fmain
.fd
= region
.fd
;
713 buf
.fmnum
= msm_pmem_region_lookup(&sync
->frame
,
714 MSM_PMEM_RAW_MAINIMG
,
716 if (buf
.fmnum
== 1) {
717 buf
.fmain
.path
= MSM_FRAME_PREV_2
;
719 (unsigned long)region
.vaddr
;
720 buf
.fmain
.fd
= region
.fd
;
722 pr_err("%s: pmem lookup failed\n",
728 if (copy_to_user((void *)(se
.stats_event
.data
), &buf
,
734 CDBG("snapshot copy_to_user!\n");
739 /* control command from control thread */
740 ctrl
= (struct msm_ctrl_cmd
*)(qcmd
->command
);
742 CDBG("msm_get_stats, qcmd->type = %d\n", qcmd
->type
);
743 CDBG("length = %d\n", ctrl
->length
);
745 if (ctrl
->length
> 0) {
746 if (copy_to_user((void *)(se
.ctrl_cmd
.value
),
755 se
.resptype
= MSM_CAM_RESP_CTRL
;
757 /* what to control */
758 se
.ctrl_cmd
.type
= ctrl
->type
;
759 se
.ctrl_cmd
.length
= ctrl
->length
;
760 se
.ctrl_cmd
.resp_fd
= ctrl
->resp_fd
;
763 case MSM_CAM_Q_V4L2_REQ
:
764 /* control command from v4l2 client */
765 ctrl
= (struct msm_ctrl_cmd
*)(qcmd
->command
);
767 CDBG("msm_get_stats, qcmd->type = %d\n", qcmd
->type
);
768 CDBG("length = %d\n", ctrl
->length
);
770 if (ctrl
->length
> 0) {
771 if (copy_to_user((void *)(se
.ctrl_cmd
.value
),
772 ctrl
->value
, ctrl
->length
)) {
779 /* 2 tells config thread this is v4l2 request */
780 se
.resptype
= MSM_CAM_RESP_V4L2
;
782 /* what to control */
783 se
.ctrl_cmd
.type
= ctrl
->type
;
784 se
.ctrl_cmd
.length
= ctrl
->length
;
790 } /* switch qcmd->type */
792 if (copy_to_user((void *)arg
, &se
, sizeof(se
))) {
800 CDBG("msm_get_stats: %d\n", rc
);
804 static int msm_ctrl_cmd_done(struct msm_control_device
*ctrl_pmsm
,
810 struct msm_ctrl_cmd udata
, *ctrlcmd
;
811 struct msm_queue_cmd
*qcmd
= NULL
;
813 if (copy_from_user(&udata
, arg
, sizeof(struct msm_ctrl_cmd
))) {
814 ERR_COPY_FROM_USER();
819 qcmd
= kmalloc(sizeof(struct msm_queue_cmd
) +
820 sizeof(struct msm_ctrl_cmd
) + udata
.length
,
827 qcmd
->command
= ctrlcmd
= (struct msm_ctrl_cmd
*)(qcmd
+ 1);
829 if (udata
.length
> 0) {
830 ctrlcmd
->value
= ctrlcmd
+ 1;
831 if (copy_from_user(ctrlcmd
->value
,
834 ERR_COPY_FROM_USER();
840 ctrlcmd
->value
= NULL
;
843 CDBG("msm_ctrl_cmd_done: end rc = %d\n", rc
);
845 /* wake up control thread */
846 spin_lock_irqsave(&ctrl_pmsm
->ctrl_q
.ctrl_status_q_lock
, flags
);
847 list_add_tail(&qcmd
->list
, &ctrl_pmsm
->ctrl_q
.ctrl_status_q
);
848 wake_up(&ctrl_pmsm
->ctrl_q
.ctrl_status_wait
);
849 spin_unlock_irqrestore(&ctrl_pmsm
->ctrl_q
.ctrl_status_q_lock
, flags
);
855 static int msm_config_vfe(struct msm_sync
*sync
, void __user
*arg
)
857 struct msm_vfe_cfg_cmd cfgcmd
;
858 struct msm_pmem_region region
[8];
859 struct axidata axi_data
;
863 memset(&axi_data
, 0, sizeof(axi_data
));
865 if (copy_from_user(&cfgcmd
, arg
, sizeof(cfgcmd
))) {
866 ERR_COPY_FROM_USER();
870 switch (cfgcmd
.cmd_type
) {
871 case CMD_STATS_ENABLE
:
873 msm_pmem_region_lookup(&sync
->stats
,
874 MSM_PMEM_AEC_AWB
, ®ion
[0],
875 NUM_WB_EXP_STAT_OUTPUT_BUFFERS
);
876 if (!axi_data
.bufnum1
) {
877 pr_err("%s: pmem region lookup error\n", __func__
);
880 axi_data
.region
= ®ion
[0];
883 case CMD_STATS_AF_ENABLE
:
885 msm_pmem_region_lookup(&sync
->stats
,
886 MSM_PMEM_AF
, ®ion
[0],
887 NUM_AF_STAT_OUTPUT_BUFFERS
);
888 if (!axi_data
.bufnum1
) {
889 pr_err("%s: pmem region lookup error\n", __func__
);
892 axi_data
.region
= ®ion
[0];
896 case CMD_STATS_DISABLE
:
899 pr_err("%s: unknown command type %d\n",
900 __func__
, cfgcmd
.cmd_type
);
905 if (sync
->vfefn
.vfe_config
)
906 rc
= sync
->vfefn
.vfe_config(&cfgcmd
, data
);
911 static int msm_frame_axi_cfg(struct msm_sync
*sync
,
912 struct msm_vfe_cfg_cmd
*cfgcmd
)
915 struct axidata axi_data
;
916 void *data
= &axi_data
;
917 struct msm_pmem_region region
[8];
920 memset(&axi_data
, 0, sizeof(axi_data
));
922 switch (cfgcmd
->cmd_type
) {
923 case CMD_AXI_CFG_OUT1
:
924 pmem_type
= MSM_PMEM_OUTPUT1
;
926 msm_pmem_region_lookup(&sync
->frame
, pmem_type
,
928 if (!axi_data
.bufnum1
) {
929 pr_err("%s: pmem region lookup error\n", __func__
);
934 case CMD_AXI_CFG_OUT2
:
935 pmem_type
= MSM_PMEM_OUTPUT2
;
937 msm_pmem_region_lookup(&sync
->frame
, pmem_type
,
939 if (!axi_data
.bufnum2
) {
940 pr_err("%s: pmem region lookup error\n", __func__
);
945 case CMD_AXI_CFG_SNAP_O1_AND_O2
:
946 pmem_type
= MSM_PMEM_THUMBAIL
;
948 msm_pmem_region_lookup(&sync
->frame
, pmem_type
,
950 if (!axi_data
.bufnum1
) {
951 pr_err("%s: pmem region lookup error\n", __func__
);
955 pmem_type
= MSM_PMEM_MAINIMG
;
957 msm_pmem_region_lookup(&sync
->frame
, pmem_type
,
958 ®ion
[axi_data
.bufnum1
], 8);
959 if (!axi_data
.bufnum2
) {
960 pr_err("%s: pmem region lookup error\n", __func__
);
965 case CMD_RAW_PICT_AXI_CFG
:
966 pmem_type
= MSM_PMEM_RAW_MAINIMG
;
968 msm_pmem_region_lookup(&sync
->frame
, pmem_type
,
970 if (!axi_data
.bufnum2
) {
971 pr_err("%s: pmem region lookup error\n", __func__
);
981 pr_err("%s: unknown command type %d\n",
982 __func__
, cfgcmd
->cmd_type
);
986 axi_data
.region
= ®ion
[0];
988 /* send the AXI configuration command to driver */
989 if (sync
->vfefn
.vfe_config
)
990 rc
= sync
->vfefn
.vfe_config(cfgcmd
, data
);
995 static int msm_get_sensor_info(struct msm_sync
*sync
, void __user
*arg
)
998 struct msm_camsensor_info info
;
999 struct msm_camera_sensor_info
*sdata
;
1001 if (copy_from_user(&info
,
1003 sizeof(struct msm_camsensor_info
))) {
1004 ERR_COPY_FROM_USER();
1008 sdata
= sync
->pdev
->dev
.platform_data
;
1009 CDBG("sensor_name %s\n", sdata
->sensor_name
);
1011 memcpy(&info
.name
[0],
1014 info
.flash_enabled
= sdata
->flash_type
!= MSM_CAMERA_FLASH_NONE
;
1016 /* copy back to user space */
1017 if (copy_to_user((void *)arg
,
1019 sizeof(struct msm_camsensor_info
))) {
1027 static int __msm_put_frame_buf(struct msm_sync
*sync
,
1028 struct msm_frame
*pb
)
1031 struct msm_vfe_cfg_cmd cfgcmd
;
1035 pphy
= msm_pmem_frame_vtop_lookup(sync
,
1037 pb
->y_off
, pb
->cbcr_off
, pb
->fd
);
1040 CDBG("rel: vaddr = 0x%lx, paddr = 0x%lx\n",
1042 cfgcmd
.cmd_type
= CMD_FRAME_BUF_RELEASE
;
1043 cfgcmd
.value
= (void *)pb
;
1044 if (sync
->vfefn
.vfe_config
)
1045 rc
= sync
->vfefn
.vfe_config(&cfgcmd
, &pphy
);
1047 pr_err("%s: msm_pmem_frame_vtop_lookup failed\n",
1055 static int msm_put_frame_buffer(struct msm_sync
*sync
, void __user
*arg
)
1057 struct msm_frame buf_t
;
1059 if (copy_from_user(&buf_t
,
1061 sizeof(struct msm_frame
))) {
1062 ERR_COPY_FROM_USER();
1066 return __msm_put_frame_buf(sync
, &buf_t
);
1069 static int __msm_register_pmem(struct msm_sync
*sync
,
1070 struct msm_pmem_info
*pinfo
)
1074 switch (pinfo
->type
) {
1075 case MSM_PMEM_OUTPUT1
:
1076 case MSM_PMEM_OUTPUT2
:
1077 case MSM_PMEM_THUMBAIL
:
1078 case MSM_PMEM_MAINIMG
:
1079 case MSM_PMEM_RAW_MAINIMG
:
1080 rc
= msm_pmem_table_add(&sync
->frame
, pinfo
);
1083 case MSM_PMEM_AEC_AWB
:
1085 rc
= msm_pmem_table_add(&sync
->stats
, pinfo
);
1096 static int msm_register_pmem(struct msm_sync
*sync
, void __user
*arg
)
1098 struct msm_pmem_info info
;
1100 if (copy_from_user(&info
, arg
, sizeof(info
))) {
1101 ERR_COPY_FROM_USER();
1105 return __msm_register_pmem(sync
, &info
);
1108 static int msm_stats_axi_cfg(struct msm_sync
*sync
,
1109 struct msm_vfe_cfg_cmd
*cfgcmd
)
1112 struct axidata axi_data
;
1113 void *data
= &axi_data
;
1115 struct msm_pmem_region region
[3];
1116 int pmem_type
= MSM_PMEM_MAX
;
1118 memset(&axi_data
, 0, sizeof(axi_data
));
1120 switch (cfgcmd
->cmd_type
) {
1121 case CMD_STATS_AXI_CFG
:
1122 pmem_type
= MSM_PMEM_AEC_AWB
;
1124 case CMD_STATS_AF_AXI_CFG
:
1125 pmem_type
= MSM_PMEM_AF
;
1131 pr_err("%s: unknown command type %d\n",
1132 __func__
, cfgcmd
->cmd_type
);
1136 if (cfgcmd
->cmd_type
!= CMD_GENERAL
) {
1138 msm_pmem_region_lookup(&sync
->stats
, pmem_type
,
1139 ®ion
[0], NUM_WB_EXP_STAT_OUTPUT_BUFFERS
);
1140 if (!axi_data
.bufnum1
) {
1141 pr_err("%s: pmem region lookup error\n", __func__
);
1144 axi_data
.region
= ®ion
[0];
1147 /* send the AEC/AWB STATS configuration command to driver */
1148 if (sync
->vfefn
.vfe_config
)
1149 rc
= sync
->vfefn
.vfe_config(cfgcmd
, &axi_data
);
1154 static int msm_put_stats_buffer(struct msm_sync
*sync
, void __user
*arg
)
1158 struct msm_stats_buf buf
;
1160 struct msm_vfe_cfg_cmd cfgcmd
;
1162 if (copy_from_user(&buf
, arg
,
1163 sizeof(struct msm_stats_buf
))) {
1164 ERR_COPY_FROM_USER();
1168 CDBG("msm_put_stats_buffer\n");
1169 pphy
= msm_pmem_stats_vtop_lookup(sync
, buf
.buffer
, buf
.fd
);
1172 if (buf
.type
== STAT_AEAW
)
1173 cfgcmd
.cmd_type
= CMD_STATS_BUF_RELEASE
;
1174 else if (buf
.type
== STAT_AF
)
1175 cfgcmd
.cmd_type
= CMD_STATS_AF_BUF_RELEASE
;
1177 pr_err("%s: invalid buf type %d\n",
1184 cfgcmd
.value
= (void *)&buf
;
1186 if (sync
->vfefn
.vfe_config
) {
1187 rc
= sync
->vfefn
.vfe_config(&cfgcmd
, &pphy
);
1189 pr_err("msm_put_stats_buffer: "\
1190 "vfe_config err %d\n", rc
);
1192 pr_err("msm_put_stats_buffer: vfe_config is NULL\n");
1194 pr_err("msm_put_stats_buffer: NULL physical address\n");
1202 static int msm_axi_config(struct msm_sync
*sync
, void __user
*arg
)
1204 struct msm_vfe_cfg_cmd cfgcmd
;
1206 if (copy_from_user(&cfgcmd
, arg
, sizeof(cfgcmd
))) {
1207 ERR_COPY_FROM_USER();
1211 switch (cfgcmd
.cmd_type
) {
1212 case CMD_AXI_CFG_OUT1
:
1213 case CMD_AXI_CFG_OUT2
:
1214 case CMD_AXI_CFG_SNAP_O1_AND_O2
:
1215 case CMD_RAW_PICT_AXI_CFG
:
1216 return msm_frame_axi_cfg(sync
, &cfgcmd
);
1218 case CMD_STATS_AXI_CFG
:
1219 case CMD_STATS_AF_AXI_CFG
:
1220 return msm_stats_axi_cfg(sync
, &cfgcmd
);
1223 pr_err("%s: unknown command type %d\n",
1232 static int __msm_get_pic(struct msm_sync
*sync
, struct msm_ctrl_cmd
*ctrl
)
1234 unsigned long flags
;
1238 struct msm_queue_cmd
*qcmd
= NULL
;
1240 tm
= (int)ctrl
->timeout_ms
;
1242 rc
= wait_event_interruptible_timeout(
1243 sync
->pict_frame_wait
,
1244 !list_empty_careful(&sync
->pict_frame_q
),
1245 msecs_to_jiffies(tm
));
1246 if (list_empty_careful(&sync
->pict_frame_q
)) {
1250 pr_err("msm_camera_get_picture, rc = %d\n", rc
);
1255 spin_lock_irqsave(&sync
->pict_frame_q_lock
, flags
);
1256 BUG_ON(list_empty(&sync
->pict_frame_q
));
1257 qcmd
= list_first_entry(&sync
->pict_frame_q
,
1258 struct msm_queue_cmd
, list
);
1259 list_del_init(&qcmd
->list
);
1260 spin_unlock_irqrestore(&sync
->pict_frame_q_lock
, flags
);
1262 if (qcmd
->command
!= NULL
) {
1263 struct msm_ctrl_cmd
*q
=
1264 (struct msm_ctrl_cmd
*)qcmd
->command
;
1265 ctrl
->type
= q
->type
;
1266 ctrl
->status
= q
->status
;
1276 static int msm_get_pic(struct msm_sync
*sync
, void __user
*arg
)
1278 struct msm_ctrl_cmd ctrlcmd_t
;
1281 if (copy_from_user(&ctrlcmd_t
,
1283 sizeof(struct msm_ctrl_cmd
))) {
1284 ERR_COPY_FROM_USER();
1288 rc
= __msm_get_pic(sync
, &ctrlcmd_t
);
1292 if (sync
->croplen
) {
1293 if (ctrlcmd_t
.length
< sync
->croplen
) {
1294 pr_err("msm_get_pic: invalid len %d\n",
1298 if (copy_to_user(ctrlcmd_t
.value
,
1306 if (copy_to_user((void *)arg
,
1308 sizeof(struct msm_ctrl_cmd
))) {
1315 static int msm_set_crop(struct msm_sync
*sync
, void __user
*arg
)
1317 struct crop_info crop
;
1319 if (copy_from_user(&crop
,
1321 sizeof(struct crop_info
))) {
1322 ERR_COPY_FROM_USER();
1326 if (!sync
->croplen
) {
1327 sync
->cropinfo
= kmalloc(crop
.len
, GFP_KERNEL
);
1328 if (!sync
->cropinfo
)
1330 } else if (sync
->croplen
< crop
.len
)
1333 if (copy_from_user(sync
->cropinfo
,
1336 ERR_COPY_FROM_USER();
1337 kfree(sync
->cropinfo
);
1341 sync
->croplen
= crop
.len
;
1346 static int msm_pict_pp_done(struct msm_sync
*sync
, void __user
*arg
)
1348 struct msm_ctrl_cmd udata
;
1349 struct msm_ctrl_cmd
*ctrlcmd
= NULL
;
1350 struct msm_queue_cmd
*qcmd
= NULL
;
1351 unsigned long flags
;
1357 if (copy_from_user(&udata
, arg
, sizeof(struct msm_ctrl_cmd
))) {
1358 ERR_COPY_FROM_USER();
1363 qcmd
= kmalloc(sizeof(struct msm_queue_cmd
) +
1364 sizeof(struct msm_ctrl_cmd
),
1371 qcmd
->type
= MSM_CAM_Q_VFE_MSG
;
1372 qcmd
->command
= ctrlcmd
= (struct msm_ctrl_cmd
*)(qcmd
+ 1);
1373 memset(ctrlcmd
, 0, sizeof(struct msm_ctrl_cmd
));
1374 ctrlcmd
->type
= udata
.type
;
1375 ctrlcmd
->status
= udata
.status
;
1377 spin_lock_irqsave(&sync
->pict_frame_q_lock
, flags
);
1378 list_add_tail(&qcmd
->list
, &sync
->pict_frame_q
);
1379 spin_unlock_irqrestore(&sync
->pict_frame_q_lock
, flags
);
1380 wake_up(&sync
->pict_frame_wait
);
1386 static long msm_ioctl_common(struct msm_device
*pmsm
,
1390 CDBG("msm_ioctl_common\n");
1392 case MSM_CAM_IOCTL_REGISTER_PMEM
:
1393 return msm_register_pmem(pmsm
->sync
, argp
);
1394 case MSM_CAM_IOCTL_UNREGISTER_PMEM
:
1395 return msm_pmem_table_del(pmsm
->sync
, argp
);
1401 static long msm_ioctl_config(struct file
*filep
, unsigned int cmd
,
1405 void __user
*argp
= (void __user
*)arg
;
1406 struct msm_device
*pmsm
= filep
->private_data
;
1408 CDBG("msm_ioctl_config cmd = %d\n", _IOC_NR(cmd
));
1411 case MSM_CAM_IOCTL_GET_SENSOR_INFO
:
1412 rc
= msm_get_sensor_info(pmsm
->sync
, argp
);
1415 case MSM_CAM_IOCTL_CONFIG_VFE
:
1416 /* Coming from config thread for update */
1417 rc
= msm_config_vfe(pmsm
->sync
, argp
);
1420 case MSM_CAM_IOCTL_GET_STATS
:
1421 /* Coming from config thread wait
1422 * for vfe statistics and control requests */
1423 rc
= msm_get_stats(pmsm
->sync
, argp
);
1426 case MSM_CAM_IOCTL_ENABLE_VFE
:
1427 /* This request comes from control thread:
1428 * enable either QCAMTASK or VFETASK */
1429 rc
= msm_enable_vfe(pmsm
->sync
, argp
);
1432 case MSM_CAM_IOCTL_DISABLE_VFE
:
1433 /* This request comes from control thread:
1434 * disable either QCAMTASK or VFETASK */
1435 rc
= msm_disable_vfe(pmsm
->sync
, argp
);
1438 case MSM_CAM_IOCTL_VFE_APPS_RESET
:
1439 msm_camio_vfe_blk_reset();
1443 case MSM_CAM_IOCTL_RELEASE_STATS_BUFFER
:
1444 rc
= msm_put_stats_buffer(pmsm
->sync
, argp
);
1447 case MSM_CAM_IOCTL_AXI_CONFIG
:
1448 rc
= msm_axi_config(pmsm
->sync
, argp
);
1451 case MSM_CAM_IOCTL_SET_CROP
:
1452 rc
= msm_set_crop(pmsm
->sync
, argp
);
1455 case MSM_CAM_IOCTL_PICT_PP
: {
1457 if (copy_from_user(&enable
, argp
, sizeof(enable
))) {
1458 ERR_COPY_FROM_USER();
1461 pmsm
->sync
->pict_pp
= enable
;
1467 case MSM_CAM_IOCTL_PICT_PP_DONE
:
1468 rc
= msm_pict_pp_done(pmsm
->sync
, argp
);
1471 case MSM_CAM_IOCTL_SENSOR_IO_CFG
:
1472 rc
= pmsm
->sync
->sctrl
.s_config(argp
);
1475 case MSM_CAM_IOCTL_FLASH_LED_CFG
: {
1477 if (copy_from_user(&led_state
, argp
, sizeof(led_state
))) {
1478 ERR_COPY_FROM_USER();
1481 rc
= msm_camera_flash_set_led_state(led_state
);
1486 rc
= msm_ioctl_common(pmsm
, cmd
, argp
);
1490 CDBG("msm_ioctl_config cmd = %d DONE\n", _IOC_NR(cmd
));
1494 static int msm_unblock_poll_frame(struct msm_sync
*);
1496 static long msm_ioctl_frame(struct file
*filep
, unsigned int cmd
,
1500 void __user
*argp
= (void __user
*)arg
;
1501 struct msm_device
*pmsm
= filep
->private_data
;
1505 case MSM_CAM_IOCTL_GETFRAME
:
1506 /* Coming from frame thread to get frame
1507 * after SELECT is done */
1508 rc
= msm_get_frame(pmsm
->sync
, argp
);
1510 case MSM_CAM_IOCTL_RELEASE_FRAME_BUFFER
:
1511 rc
= msm_put_frame_buffer(pmsm
->sync
, argp
);
1513 case MSM_CAM_IOCTL_UNBLOCK_POLL_FRAME
:
1514 rc
= msm_unblock_poll_frame(pmsm
->sync
);
1524 static long msm_ioctl_control(struct file
*filep
, unsigned int cmd
,
1528 void __user
*argp
= (void __user
*)arg
;
1529 struct msm_control_device
*ctrl_pmsm
= filep
->private_data
;
1530 struct msm_device
*pmsm
= ctrl_pmsm
->pmsm
;
1533 case MSM_CAM_IOCTL_CTRL_COMMAND
:
1534 /* Coming from control thread, may need to wait for
1536 rc
= msm_control(ctrl_pmsm
, 1, argp
);
1538 case MSM_CAM_IOCTL_CTRL_COMMAND_2
:
1539 /* Sends a message, returns immediately */
1540 rc
= msm_control(ctrl_pmsm
, 0, argp
);
1542 case MSM_CAM_IOCTL_CTRL_CMD_DONE
:
1543 /* Config thread calls the control thread to notify it
1544 * of the result of a MSM_CAM_IOCTL_CTRL_COMMAND.
1546 rc
= msm_ctrl_cmd_done(ctrl_pmsm
, argp
);
1548 case MSM_CAM_IOCTL_GET_PICTURE
:
1549 rc
= msm_get_pic(pmsm
->sync
, argp
);
1552 rc
= msm_ioctl_common(pmsm
, cmd
, argp
);
1559 static int __msm_release(struct msm_sync
*sync
)
1561 struct msm_pmem_region
*region
;
1562 struct hlist_node
*hnode
;
1563 struct hlist_node
*n
;
1565 mutex_lock(&sync
->lock
);
1569 if (!sync
->opencnt
) {
1570 /* need to clean up system resource */
1571 if (sync
->vfefn
.vfe_release
)
1572 sync
->vfefn
.vfe_release(sync
->pdev
);
1574 if (sync
->cropinfo
) {
1575 kfree(sync
->cropinfo
);
1576 sync
->cropinfo
= NULL
;
1580 hlist_for_each_entry_safe(region
, hnode
, n
,
1581 &sync
->frame
, list
) {
1583 put_pmem_file(region
->file
);
1587 hlist_for_each_entry_safe(region
, hnode
, n
,
1588 &sync
->stats
, list
) {
1590 put_pmem_file(region
->file
);
1594 MSM_DRAIN_QUEUE(sync
, msg_event_q
);
1595 MSM_DRAIN_QUEUE(sync
, prev_frame_q
);
1596 MSM_DRAIN_QUEUE(sync
, pict_frame_q
);
1598 sync
->sctrl
.s_release();
1600 sync
->apps_id
= NULL
;
1601 CDBG("msm_release completed!\n");
1603 mutex_unlock(&sync
->lock
);
1608 static int msm_release_config(struct inode
*node
, struct file
*filep
)
1611 struct msm_device
*pmsm
= filep
->private_data
;
1612 printk("msm_camera: RELEASE %s\n", filep
->f_path
.dentry
->d_name
.name
);
1613 rc
= __msm_release(pmsm
->sync
);
1614 atomic_set(&pmsm
->opened
, 0);
1618 static int msm_release_control(struct inode
*node
, struct file
*filep
)
1621 struct msm_control_device
*ctrl_pmsm
= filep
->private_data
;
1622 struct msm_device
*pmsm
= ctrl_pmsm
->pmsm
;
1623 printk(KERN_INFO
"msm_camera: RELEASE %s\n",
1624 filep
->f_path
.dentry
->d_name
.name
);
1625 rc
= __msm_release(pmsm
->sync
);
1627 MSM_DRAIN_QUEUE(&ctrl_pmsm
->ctrl_q
, ctrl_status_q
);
1628 MSM_DRAIN_QUEUE(pmsm
->sync
, pict_frame_q
);
1634 static int msm_release_frame(struct inode
*node
, struct file
*filep
)
1637 struct msm_device
*pmsm
= filep
->private_data
;
1638 printk(KERN_INFO
"msm_camera: RELEASE %s\n",
1639 filep
->f_path
.dentry
->d_name
.name
);
1640 rc
= __msm_release(pmsm
->sync
);
1642 MSM_DRAIN_QUEUE(pmsm
->sync
, prev_frame_q
);
1643 atomic_set(&pmsm
->opened
, 0);
1648 static int msm_unblock_poll_frame(struct msm_sync
*sync
)
1650 unsigned long flags
;
1651 CDBG("msm_unblock_poll_frame\n");
1652 spin_lock_irqsave(&sync
->prev_frame_q_lock
, flags
);
1653 sync
->unblock_poll_frame
= 1;
1654 wake_up(&sync
->prev_frame_wait
);
1655 spin_unlock_irqrestore(&sync
->prev_frame_q_lock
, flags
);
1659 static unsigned int __msm_poll_frame(struct msm_sync
*sync
,
1661 struct poll_table_struct
*pll_table
)
1664 unsigned long flags
;
1666 poll_wait(filep
, &sync
->prev_frame_wait
, pll_table
);
1668 spin_lock_irqsave(&sync
->prev_frame_q_lock
, flags
);
1669 if (!list_empty_careful(&sync
->prev_frame_q
))
1671 rc
= POLLIN
| POLLRDNORM
;
1672 if (sync
->unblock_poll_frame
) {
1673 CDBG("%s: sync->unblock_poll_frame is true\n", __func__
);
1675 sync
->unblock_poll_frame
= 0;
1677 spin_unlock_irqrestore(&sync
->prev_frame_q_lock
, flags
);
1682 static unsigned int msm_poll_frame(struct file
*filep
,
1683 struct poll_table_struct
*pll_table
)
1685 struct msm_device
*pmsm
= filep
->private_data
;
1686 return __msm_poll_frame(pmsm
->sync
, filep
, pll_table
);
1690 * This function executes in interrupt context.
1693 static void *msm_vfe_sync_alloc(int size
,
1694 void *syncdata
__attribute__((unused
)))
1696 struct msm_queue_cmd
*qcmd
=
1697 kmalloc(sizeof(struct msm_queue_cmd
) + size
, GFP_ATOMIC
);
1698 return qcmd
? qcmd
+ 1 : NULL
;
1702 * This function executes in interrupt context.
1705 static void msm_vfe_sync(struct msm_vfe_resp
*vdata
,
1706 enum msm_queue qtype
, void *syncdata
)
1708 struct msm_queue_cmd
*qcmd
= NULL
;
1709 struct msm_queue_cmd
*qcmd_frame
= NULL
;
1710 struct msm_vfe_phy_info
*fphy
;
1712 unsigned long flags
;
1713 struct msm_sync
*sync
= (struct msm_sync
*)syncdata
;
1715 pr_err("msm_camera: no context in dsp callback.\n");
1719 qcmd
= ((struct msm_queue_cmd
*)vdata
) - 1;
1722 if (qtype
== MSM_CAM_Q_VFE_MSG
) {
1723 switch (vdata
->type
) {
1724 case VFE_MSG_OUTPUT1
:
1725 case VFE_MSG_OUTPUT2
:
1727 kmalloc(sizeof(struct msm_queue_cmd
) +
1728 sizeof(struct msm_vfe_phy_info
),
1732 fphy
= (struct msm_vfe_phy_info
*)(qcmd_frame
+ 1);
1735 qcmd_frame
->type
= MSM_CAM_Q_VFE_MSG
;
1736 qcmd_frame
->command
= fphy
;
1738 CDBG("qcmd_frame= 0x%x phy_y= 0x%x, phy_cbcr= 0x%x\n",
1739 (int) qcmd_frame
, fphy
->y_phy
, fphy
->cbcr_phy
);
1741 spin_lock_irqsave(&sync
->prev_frame_q_lock
, flags
);
1742 list_add_tail(&qcmd_frame
->list
, &sync
->prev_frame_q
);
1743 wake_up(&sync
->prev_frame_wait
);
1744 spin_unlock_irqrestore(&sync
->prev_frame_q_lock
, flags
);
1745 CDBG("woke up frame thread\n");
1747 case VFE_MSG_SNAPSHOT
:
1751 CDBG("snapshot pp = %d\n", sync
->pict_pp
);
1753 kmalloc(sizeof(struct msm_queue_cmd
),
1757 qcmd_frame
->type
= MSM_CAM_Q_VFE_MSG
;
1758 qcmd_frame
->command
= NULL
;
1759 spin_lock_irqsave(&sync
->pict_frame_q_lock
,
1761 list_add_tail(&qcmd_frame
->list
, &sync
->pict_frame_q
);
1762 wake_up(&sync
->pict_frame_wait
);
1763 spin_unlock_irqrestore(&sync
->pict_frame_q_lock
, flags
);
1764 CDBG("woke up picture thread\n");
1767 CDBG("%s: qtype = %d not handled\n",
1768 __func__
, vdata
->type
);
1773 qcmd
->command
= (void *)vdata
;
1774 CDBG("vdata->type = %d\n", vdata
->type
);
1776 spin_lock_irqsave(&sync
->msg_event_q_lock
, flags
);
1777 list_add_tail(&qcmd
->list
, &sync
->msg_event_q
);
1778 wake_up(&sync
->msg_event_wait
);
1779 spin_unlock_irqrestore(&sync
->msg_event_q_lock
, flags
);
1780 CDBG("woke up config thread\n");
1787 static struct msm_vfe_callback msm_vfe_s
= {
1788 .vfe_resp
= msm_vfe_sync
,
1789 .vfe_alloc
= msm_vfe_sync_alloc
,
1792 static int __msm_open(struct msm_sync
*sync
, const char *const apps_id
)
1796 mutex_lock(&sync
->lock
);
1797 if (sync
->apps_id
&& strcmp(sync
->apps_id
, apps_id
)) {
1798 pr_err("msm_camera(%s): sensor %s is already opened for %s\n",
1800 sync
->sdata
->sensor_name
,
1806 sync
->apps_id
= apps_id
;
1808 if (!sync
->opencnt
) {
1810 msm_camvfe_fn_init(&sync
->vfefn
, sync
);
1811 if (sync
->vfefn
.vfe_init
) {
1812 rc
= sync
->vfefn
.vfe_init(&msm_vfe_s
,
1815 pr_err("vfe_init failed at %d\n", rc
);
1818 rc
= sync
->sctrl
.s_init(sync
->sdata
);
1820 pr_err("sensor init failed: %d\n", rc
);
1824 pr_err("no sensor init func\n");
1830 INIT_HLIST_HEAD(&sync
->frame
);
1831 INIT_HLIST_HEAD(&sync
->stats
);
1832 sync
->unblock_poll_frame
= 0;
1838 mutex_unlock(&sync
->lock
);
1842 static int msm_open_common(struct inode
*inode
, struct file
*filep
,
1846 struct msm_device
*pmsm
=
1847 container_of(inode
->i_cdev
, struct msm_device
, cdev
);
1849 CDBG("msm_camera: open %s\n", filep
->f_path
.dentry
->d_name
.name
);
1851 if (atomic_cmpxchg(&pmsm
->opened
, 0, 1) && once
) {
1852 pr_err("msm_camera: %s is already opened.\n",
1853 filep
->f_path
.dentry
->d_name
.name
);
1857 rc
= nonseekable_open(inode
, filep
);
1859 pr_err("msm_open: nonseekable_open error %d\n", rc
);
1863 rc
= __msm_open(pmsm
->sync
, MSM_APPS_ID_PROP
);
1867 filep
->private_data
= pmsm
;
1869 CDBG("msm_open() open: rc = %d\n", rc
);
1873 static int msm_open(struct inode
*inode
, struct file
*filep
)
1875 return msm_open_common(inode
, filep
, 1);
1878 static int msm_open_control(struct inode
*inode
, struct file
*filep
)
1882 struct msm_control_device
*ctrl_pmsm
=
1883 kmalloc(sizeof(struct msm_control_device
), GFP_KERNEL
);
1887 rc
= msm_open_common(inode
, filep
, 0);
1893 ctrl_pmsm
->pmsm
= filep
->private_data
;
1894 filep
->private_data
= ctrl_pmsm
;
1895 spin_lock_init(&ctrl_pmsm
->ctrl_q
.ctrl_status_q_lock
);
1896 INIT_LIST_HEAD(&ctrl_pmsm
->ctrl_q
.ctrl_status_q
);
1897 init_waitqueue_head(&ctrl_pmsm
->ctrl_q
.ctrl_status_wait
);
1899 CDBG("msm_open() open: rc = %d\n", rc
);
1903 static int __msm_v4l2_control(struct msm_sync
*sync
,
1904 struct msm_ctrl_cmd
*out
)
1908 struct msm_queue_cmd
*qcmd
= NULL
, *rcmd
= NULL
;
1909 struct msm_ctrl_cmd
*ctrl
;
1910 struct msm_control_device_queue FIXME
;
1912 /* wake up config thread, 4 is for V4L2 application */
1913 qcmd
= kmalloc(sizeof(struct msm_queue_cmd
), GFP_KERNEL
);
1915 pr_err("msm_control: cannot allocate buffer\n");
1919 qcmd
->type
= MSM_CAM_Q_V4L2_REQ
;
1920 qcmd
->command
= out
;
1922 rcmd
= __msm_control(sync
, &FIXME
, qcmd
, out
->timeout_ms
);
1928 ctrl
= (struct msm_ctrl_cmd
*)(rcmd
->command
);
1929 /* FIXME: we should just set out->length = ctrl->length; */
1930 BUG_ON(out
->length
< ctrl
->length
);
1931 memcpy(out
->value
, ctrl
->value
, ctrl
->length
);
1935 CDBG("__msm_v4l2_control: end rc = %d\n", rc
);
1939 static const struct file_operations msm_fops_config
= {
1940 .owner
= THIS_MODULE
,
1942 .unlocked_ioctl
= msm_ioctl_config
,
1943 .release
= msm_release_config
,
1944 .llseek
= no_llseek
,
1947 static const struct file_operations msm_fops_control
= {
1948 .owner
= THIS_MODULE
,
1949 .open
= msm_open_control
,
1950 .unlocked_ioctl
= msm_ioctl_control
,
1951 .release
= msm_release_control
,
1952 .llseek
= no_llseek
,
1955 static const struct file_operations msm_fops_frame
= {
1956 .owner
= THIS_MODULE
,
1958 .unlocked_ioctl
= msm_ioctl_frame
,
1959 .release
= msm_release_frame
,
1960 .poll
= msm_poll_frame
,
1961 .llseek
= no_llseek
,
1964 static int msm_setup_cdev(struct msm_device
*msm
,
1968 const struct file_operations
*fops
)
1972 struct device
*device
=
1973 device_create(msm_class
, NULL
,
1975 "%s%d", suffix
, node
);
1977 if (IS_ERR(device
)) {
1978 rc
= PTR_ERR(device
);
1979 pr_err("msm_camera: error creating device: %d\n", rc
);
1983 cdev_init(&msm
->cdev
, fops
);
1984 msm
->cdev
.owner
= THIS_MODULE
;
1986 rc
= cdev_add(&msm
->cdev
, devno
, 1);
1988 pr_err("msm_camera: error adding cdev: %d\n", rc
);
1989 device_destroy(msm_class
, devno
);
1996 static int msm_tear_down_cdev(struct msm_device
*msm
, dev_t devno
)
1998 cdev_del(&msm
->cdev
);
1999 device_destroy(msm_class
, devno
);
2003 int msm_v4l2_register(struct msm_v4l2_driver
*drv
)
2005 /* FIXME: support multiple sensors */
2006 if (list_empty(&msm_sensors
))
2009 drv
->sync
= list_first_entry(&msm_sensors
, struct msm_sync
, list
);
2010 drv
->open
= __msm_open
;
2011 drv
->release
= __msm_release
;
2012 drv
->ctrl
= __msm_v4l2_control
;
2013 drv
->reg_pmem
= __msm_register_pmem
;
2014 drv
->get_frame
= __msm_get_frame
;
2015 drv
->put_frame
= __msm_put_frame_buf
;
2016 drv
->get_pict
= __msm_get_pic
;
2017 drv
->drv_poll
= __msm_poll_frame
;
2021 EXPORT_SYMBOL(msm_v4l2_register
);
2023 int msm_v4l2_unregister(struct msm_v4l2_driver
*drv
)
2028 EXPORT_SYMBOL(msm_v4l2_unregister
);
2030 static int msm_sync_init(struct msm_sync
*sync
,
2031 struct platform_device
*pdev
,
2032 int (*sensor_probe
)(const struct msm_camera_sensor_info
*,
2033 struct msm_sensor_ctrl
*))
2036 struct msm_sensor_ctrl sctrl
;
2037 sync
->sdata
= pdev
->dev
.platform_data
;
2039 spin_lock_init(&sync
->msg_event_q_lock
);
2040 INIT_LIST_HEAD(&sync
->msg_event_q
);
2041 init_waitqueue_head(&sync
->msg_event_wait
);
2043 spin_lock_init(&sync
->prev_frame_q_lock
);
2044 INIT_LIST_HEAD(&sync
->prev_frame_q
);
2045 init_waitqueue_head(&sync
->prev_frame_wait
);
2047 spin_lock_init(&sync
->pict_frame_q_lock
);
2048 INIT_LIST_HEAD(&sync
->pict_frame_q
);
2049 init_waitqueue_head(&sync
->pict_frame_wait
);
2051 rc
= msm_camio_probe_on(pdev
);
2054 rc
= sensor_probe(sync
->sdata
, &sctrl
);
2057 sync
->sctrl
= sctrl
;
2059 msm_camio_probe_off(pdev
);
2061 pr_err("msm_camera: failed to initialize %s\n",
2062 sync
->sdata
->sensor_name
);
2067 mutex_init(&sync
->lock
);
2068 CDBG("initialized %s\n", sync
->sdata
->sensor_name
);
2072 static int msm_sync_destroy(struct msm_sync
*sync
)
2077 static int msm_device_init(struct msm_device
*pmsm
,
2078 struct msm_sync
*sync
,
2081 int dev_num
= 3 * node
;
2082 int rc
= msm_setup_cdev(pmsm
, node
,
2083 MKDEV(MAJOR(msm_devno
), dev_num
),
2084 "control", &msm_fops_control
);
2086 pr_err("error creating control node: %d\n", rc
);
2090 rc
= msm_setup_cdev(pmsm
+ 1, node
,
2091 MKDEV(MAJOR(msm_devno
), dev_num
+ 1),
2092 "config", &msm_fops_config
);
2094 pr_err("error creating config node: %d\n", rc
);
2095 msm_tear_down_cdev(pmsm
, MKDEV(MAJOR(msm_devno
),
2100 rc
= msm_setup_cdev(pmsm
+ 2, node
,
2101 MKDEV(MAJOR(msm_devno
), dev_num
+ 2),
2102 "frame", &msm_fops_frame
);
2104 pr_err("error creating frame node: %d\n", rc
);
2105 msm_tear_down_cdev(pmsm
,
2106 MKDEV(MAJOR(msm_devno
), dev_num
));
2107 msm_tear_down_cdev(pmsm
+ 1,
2108 MKDEV(MAJOR(msm_devno
), dev_num
+ 1));
2112 atomic_set(&pmsm
[0].opened
, 0);
2113 atomic_set(&pmsm
[1].opened
, 0);
2114 atomic_set(&pmsm
[2].opened
, 0);
2116 pmsm
[0].sync
= sync
;
2117 pmsm
[1].sync
= sync
;
2118 pmsm
[2].sync
= sync
;
2123 int msm_camera_drv_start(struct platform_device
*dev
,
2124 int (*sensor_probe
)(const struct msm_camera_sensor_info
*,
2125 struct msm_sensor_ctrl
*))
2127 struct msm_device
*pmsm
= NULL
;
2128 struct msm_sync
*sync
;
2130 static int camera_node
;
2132 if (camera_node
>= MSM_MAX_CAMERA_SENSORS
) {
2133 pr_err("msm_camera: too many camera sensors\n");
2138 /* There are three device nodes per sensor */
2139 rc
= alloc_chrdev_region(&msm_devno
, 0,
2140 3 * MSM_MAX_CAMERA_SENSORS
,
2143 pr_err("msm_camera: failed to allocate chrdev: %d\n",
2148 msm_class
= class_create(THIS_MODULE
, "msm_camera");
2149 if (IS_ERR(msm_class
)) {
2150 rc
= PTR_ERR(msm_class
);
2151 pr_err("msm_camera: create device class failed: %d\n",
2157 pmsm
= kzalloc(sizeof(struct msm_device
) * 3 +
2158 sizeof(struct msm_sync
), GFP_ATOMIC
);
2161 sync
= (struct msm_sync
*)(pmsm
+ 3);
2163 rc
= msm_sync_init(sync
, dev
, sensor_probe
);
2169 CDBG("setting camera node %d\n", camera_node
);
2170 rc
= msm_device_init(pmsm
, sync
, camera_node
);
2172 msm_sync_destroy(sync
);
2178 list_add(&sync
->list
, &msm_sensors
);
2181 EXPORT_SYMBOL(msm_camera_drv_start
);