]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/media/platform/omap3isp/isphist.c
media: media-device: fix ioctl function types
[mirror_ubuntu-hirsute-kernel.git] / drivers / media / platform / omap3isp / isphist.c
CommitLineData
68e342b3
DC
1/*
2 * isphist.c
3 *
4 * TI OMAP3 ISP - Histogram module
5 *
6 * Copyright (C) 2010 Nokia Corporation
7 * Copyright (C) 2009 Texas Instruments, Inc.
8 *
9 * Contacts: David Cohen <dacohen@gmail.com>
10 * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
11 * Sakari Ailus <sakari.ailus@iki.fi>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
68e342b3
DC
16 */
17
18#include <linux/delay.h>
0ff4e419
LP
19#include <linux/device.h>
20#include <linux/dmaengine.h>
68e342b3
DC
21#include <linux/slab.h>
22#include <linux/uaccess.h>
68e342b3
DC
23
24#include "isp.h"
25#include "ispreg.h"
26#include "isphist.h"
27
28#define HIST_CONFIG_DMA 1
29
68e342b3
DC
30/*
31 * hist_reset_mem - clear Histogram memory before start stats engine.
32 */
33static void hist_reset_mem(struct ispstat *hist)
34{
35 struct isp_device *isp = hist->isp;
36 struct omap3isp_hist_config *conf = hist->priv;
37 unsigned int i;
38
39 isp_reg_writel(isp, 0, OMAP3_ISP_IOMEM_HIST, ISPHIST_ADDR);
40
41 /*
42 * By setting it, the histogram internal buffer is being cleared at the
43 * same time it's being read. This bit must be cleared afterwards.
44 */
45 isp_reg_set(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, ISPHIST_CNT_CLEAR);
46
47 /*
48 * We'll clear 4 words at each iteration for optimization. It avoids
49 * 3/4 of the jumps. We also know HIST_MEM_SIZE is divisible by 4.
50 */
51 for (i = OMAP3ISP_HIST_MEM_SIZE / 4; i > 0; i--) {
52 isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
53 isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
54 isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
55 isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
56 }
57 isp_reg_clr(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, ISPHIST_CNT_CLEAR);
58
59 hist->wait_acc_frames = conf->num_acc_frames;
60}
61
68e342b3
DC
62/*
63 * hist_setup_regs - Helper function to update Histogram registers.
64 */
65static void hist_setup_regs(struct ispstat *hist, void *priv)
66{
67 struct isp_device *isp = hist->isp;
68 struct omap3isp_hist_config *conf = priv;
69 int c;
70 u32 cnt;
71 u32 wb_gain;
72 u32 reg_hor[OMAP3ISP_HIST_MAX_REGIONS];
73 u32 reg_ver[OMAP3ISP_HIST_MAX_REGIONS];
74
75 if (!hist->update || hist->state == ISPSTAT_DISABLED ||
76 hist->state == ISPSTAT_DISABLING)
77 return;
78
79 cnt = conf->cfa << ISPHIST_CNT_CFA_SHIFT;
80
81 wb_gain = conf->wg[0] << ISPHIST_WB_GAIN_WG00_SHIFT;
82 wb_gain |= conf->wg[1] << ISPHIST_WB_GAIN_WG01_SHIFT;
83 wb_gain |= conf->wg[2] << ISPHIST_WB_GAIN_WG02_SHIFT;
84 if (conf->cfa == OMAP3ISP_HIST_CFA_BAYER)
85 wb_gain |= conf->wg[3] << ISPHIST_WB_GAIN_WG03_SHIFT;
86
87 /* Regions size and position */
88 for (c = 0; c < OMAP3ISP_HIST_MAX_REGIONS; c++) {
89 if (c < conf->num_regions) {
58bc8b7e
JS
90 reg_hor[c] = (conf->region[c].h_start <<
91 ISPHIST_REG_START_SHIFT)
92 | (conf->region[c].h_end <<
93 ISPHIST_REG_END_SHIFT);
94 reg_ver[c] = (conf->region[c].v_start <<
95 ISPHIST_REG_START_SHIFT)
96 | (conf->region[c].v_end <<
97 ISPHIST_REG_END_SHIFT);
68e342b3
DC
98 } else {
99 reg_hor[c] = 0;
100 reg_ver[c] = 0;
101 }
102 }
103
104 cnt |= conf->hist_bins << ISPHIST_CNT_BINS_SHIFT;
105 switch (conf->hist_bins) {
106 case OMAP3ISP_HIST_BINS_256:
107 cnt |= (ISPHIST_IN_BIT_WIDTH_CCDC - 8) <<
108 ISPHIST_CNT_SHIFT_SHIFT;
109 break;
110 case OMAP3ISP_HIST_BINS_128:
111 cnt |= (ISPHIST_IN_BIT_WIDTH_CCDC - 7) <<
112 ISPHIST_CNT_SHIFT_SHIFT;
113 break;
114 case OMAP3ISP_HIST_BINS_64:
115 cnt |= (ISPHIST_IN_BIT_WIDTH_CCDC - 6) <<
116 ISPHIST_CNT_SHIFT_SHIFT;
117 break;
118 default: /* OMAP3ISP_HIST_BINS_32 */
119 cnt |= (ISPHIST_IN_BIT_WIDTH_CCDC - 5) <<
120 ISPHIST_CNT_SHIFT_SHIFT;
121 break;
122 }
123
124 hist_reset_mem(hist);
125
126 isp_reg_writel(isp, cnt, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT);
127 isp_reg_writel(isp, wb_gain, OMAP3_ISP_IOMEM_HIST, ISPHIST_WB_GAIN);
128 isp_reg_writel(isp, reg_hor[0], OMAP3_ISP_IOMEM_HIST, ISPHIST_R0_HORZ);
129 isp_reg_writel(isp, reg_ver[0], OMAP3_ISP_IOMEM_HIST, ISPHIST_R0_VERT);
130 isp_reg_writel(isp, reg_hor[1], OMAP3_ISP_IOMEM_HIST, ISPHIST_R1_HORZ);
131 isp_reg_writel(isp, reg_ver[1], OMAP3_ISP_IOMEM_HIST, ISPHIST_R1_VERT);
132 isp_reg_writel(isp, reg_hor[2], OMAP3_ISP_IOMEM_HIST, ISPHIST_R2_HORZ);
133 isp_reg_writel(isp, reg_ver[2], OMAP3_ISP_IOMEM_HIST, ISPHIST_R2_VERT);
134 isp_reg_writel(isp, reg_hor[3], OMAP3_ISP_IOMEM_HIST, ISPHIST_R3_HORZ);
135 isp_reg_writel(isp, reg_ver[3], OMAP3_ISP_IOMEM_HIST, ISPHIST_R3_VERT);
136
137 hist->update = 0;
138 hist->config_counter += hist->inc_config;
139 hist->inc_config = 0;
140 hist->buf_size = conf->buf_size;
141}
142
143static void hist_enable(struct ispstat *hist, int enable)
144{
145 if (enable) {
146 isp_reg_set(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_PCR,
147 ISPHIST_PCR_ENABLE);
be9a1b98 148 omap3isp_subclk_enable(hist->isp, OMAP3_ISP_SUBCLK_HIST);
68e342b3
DC
149 } else {
150 isp_reg_clr(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_PCR,
151 ISPHIST_PCR_ENABLE);
be9a1b98 152 omap3isp_subclk_disable(hist->isp, OMAP3_ISP_SUBCLK_HIST);
68e342b3
DC
153 }
154}
155
156static int hist_busy(struct ispstat *hist)
157{
158 return isp_reg_readl(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_PCR)
159 & ISPHIST_PCR_BUSY;
160}
161
0ff4e419 162static void hist_dma_cb(void *data)
68e342b3
DC
163{
164 struct ispstat *hist = data;
165
0ff4e419
LP
166 /* FIXME: The DMA engine API can't report transfer errors :-/ */
167
68e342b3
DC
168 isp_reg_clr(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT,
169 ISPHIST_CNT_CLEAR);
170
171 omap3isp_stat_dma_isr(hist);
172 if (hist->state != ISPSTAT_DISABLED)
173 omap3isp_hist_dma_done(hist->isp);
174}
175
176static int hist_buf_dma(struct ispstat *hist)
177{
178 dma_addr_t dma_addr = hist->active_buf->dma_addr;
0ff4e419
LP
179 struct dma_async_tx_descriptor *tx;
180 struct dma_slave_config cfg;
181 dma_cookie_t cookie;
182 int ret;
68e342b3
DC
183
184 if (unlikely(!dma_addr)) {
185 dev_dbg(hist->isp->dev, "hist: invalid DMA buffer address\n");
0ff4e419 186 goto error;
68e342b3
DC
187 }
188
189 isp_reg_writel(hist->isp, 0, OMAP3_ISP_IOMEM_HIST, ISPHIST_ADDR);
190 isp_reg_set(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT,
191 ISPHIST_CNT_CLEAR);
192 omap3isp_flush(hist->isp);
68e342b3 193
0ff4e419 194 memset(&cfg, 0, sizeof(cfg));
4fcfeca8 195 cfg.src_addr = hist->isp->mmio_hist_base_phys + ISPHIST_DATA;
0ff4e419
LP
196 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
197 cfg.src_maxburst = hist->buf_size / 4;
198
199 ret = dmaengine_slave_config(hist->dma_ch, &cfg);
200 if (ret < 0) {
201 dev_dbg(hist->isp->dev,
202 "hist: DMA slave configuration failed\n");
203 goto error;
204 }
205
206 tx = dmaengine_prep_slave_single(hist->dma_ch, dma_addr,
207 hist->buf_size, DMA_DEV_TO_MEM,
208 DMA_CTRL_ACK);
209 if (tx == NULL) {
210 dev_dbg(hist->isp->dev,
211 "hist: DMA slave preparation failed\n");
212 goto error;
213 }
214
215 tx->callback = hist_dma_cb;
216 tx->callback_param = hist;
217 cookie = tx->tx_submit(tx);
218 if (dma_submit_error(cookie)) {
219 dev_dbg(hist->isp->dev, "hist: DMA submission failed\n");
220 goto error;
221 }
222
223 dma_async_issue_pending(hist->dma_ch);
68e342b3
DC
224
225 return STAT_BUF_WAITING_DMA;
0ff4e419
LP
226
227error:
228 hist_reset_mem(hist);
229 return STAT_NO_BUF;
68e342b3
DC
230}
231
232static int hist_buf_pio(struct ispstat *hist)
233{
234 struct isp_device *isp = hist->isp;
235 u32 *buf = hist->active_buf->virt_addr;
236 unsigned int i;
237
238 if (!buf) {
239 dev_dbg(isp->dev, "hist: invalid PIO buffer address\n");
240 hist_reset_mem(hist);
241 return STAT_NO_BUF;
242 }
243
244 isp_reg_writel(isp, 0, OMAP3_ISP_IOMEM_HIST, ISPHIST_ADDR);
245
246 /*
247 * By setting it, the histogram internal buffer is being cleared at the
248 * same time it's being read. This bit must be cleared just after all
249 * data is acquired.
250 */
251 isp_reg_set(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, ISPHIST_CNT_CLEAR);
252
253 /*
254 * We'll read 4 times a 4-bytes-word at each iteration for
255 * optimization. It avoids 3/4 of the jumps. We also know buf_size is
256 * divisible by 16.
257 */
258 for (i = hist->buf_size / 16; i > 0; i--) {
259 *buf++ = isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
260 *buf++ = isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
261 *buf++ = isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
262 *buf++ = isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
263 }
264 isp_reg_clr(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT,
265 ISPHIST_CNT_CLEAR);
266
267 return STAT_BUF_DONE;
268}
269
270/*
271 * hist_buf_process - Callback from ISP driver for HIST interrupt.
272 */
273static int hist_buf_process(struct ispstat *hist)
274{
275 struct omap3isp_hist_config *user_cfg = hist->priv;
276 int ret;
277
278 if (atomic_read(&hist->buf_err) || hist->state != ISPSTAT_ENABLED) {
279 hist_reset_mem(hist);
280 return STAT_NO_BUF;
281 }
282
283 if (--(hist->wait_acc_frames))
284 return STAT_NO_BUF;
285
0ff4e419 286 if (hist->dma_ch)
68e342b3
DC
287 ret = hist_buf_dma(hist);
288 else
289 ret = hist_buf_pio(hist);
290
291 hist->wait_acc_frames = user_cfg->num_acc_frames;
292
293 return ret;
294}
295
296static u32 hist_get_buf_size(struct omap3isp_hist_config *conf)
297{
298 return OMAP3ISP_HIST_MEM_SIZE_BINS(conf->hist_bins) * conf->num_regions;
299}
300
301/*
302 * hist_validate_params - Helper function to check user given params.
872aba51 303 * @new_conf: Pointer to user configuration structure.
68e342b3
DC
304 *
305 * Returns 0 on success configuration.
306 */
307static int hist_validate_params(struct ispstat *hist, void *new_conf)
308{
309 struct omap3isp_hist_config *user_cfg = new_conf;
310 int c;
311 u32 buf_size;
312
313 if (user_cfg->cfa > OMAP3ISP_HIST_CFA_FOVEONX3)
314 return -EINVAL;
315
316 /* Regions size and position */
317
318 if ((user_cfg->num_regions < OMAP3ISP_HIST_MIN_REGIONS) ||
319 (user_cfg->num_regions > OMAP3ISP_HIST_MAX_REGIONS))
320 return -EINVAL;
321
322 /* Regions */
323 for (c = 0; c < user_cfg->num_regions; c++) {
324 if (user_cfg->region[c].h_start & ~ISPHIST_REG_START_END_MASK)
325 return -EINVAL;
326 if (user_cfg->region[c].h_end & ~ISPHIST_REG_START_END_MASK)
327 return -EINVAL;
328 if (user_cfg->region[c].v_start & ~ISPHIST_REG_START_END_MASK)
329 return -EINVAL;
330 if (user_cfg->region[c].v_end & ~ISPHIST_REG_START_END_MASK)
331 return -EINVAL;
332 if (user_cfg->region[c].h_start > user_cfg->region[c].h_end)
333 return -EINVAL;
334 if (user_cfg->region[c].v_start > user_cfg->region[c].v_end)
335 return -EINVAL;
336 }
337
338 switch (user_cfg->num_regions) {
339 case 1:
340 if (user_cfg->hist_bins > OMAP3ISP_HIST_BINS_256)
341 return -EINVAL;
342 break;
343 case 2:
344 if (user_cfg->hist_bins > OMAP3ISP_HIST_BINS_128)
345 return -EINVAL;
346 break;
347 default: /* 3 or 4 */
348 if (user_cfg->hist_bins > OMAP3ISP_HIST_BINS_64)
349 return -EINVAL;
350 break;
351 }
352
353 buf_size = hist_get_buf_size(user_cfg);
354 if (buf_size > user_cfg->buf_size)
25aeb418 355 /* User's buf_size request wasn't enough */
68e342b3
DC
356 user_cfg->buf_size = buf_size;
357 else if (user_cfg->buf_size > OMAP3ISP_HIST_MAX_BUF_SIZE)
358 user_cfg->buf_size = OMAP3ISP_HIST_MAX_BUF_SIZE;
359
360 return 0;
361}
362
363static int hist_comp_params(struct ispstat *hist,
364 struct omap3isp_hist_config *user_cfg)
365{
366 struct omap3isp_hist_config *cur_cfg = hist->priv;
367 int c;
368
369 if (cur_cfg->cfa != user_cfg->cfa)
370 return 1;
371
372 if (cur_cfg->num_acc_frames != user_cfg->num_acc_frames)
373 return 1;
374
375 if (cur_cfg->hist_bins != user_cfg->hist_bins)
376 return 1;
377
378 for (c = 0; c < OMAP3ISP_HIST_MAX_WG; c++) {
379 if (c == 3 && user_cfg->cfa == OMAP3ISP_HIST_CFA_FOVEONX3)
380 break;
381 else if (cur_cfg->wg[c] != user_cfg->wg[c])
382 return 1;
383 }
384
385 if (cur_cfg->num_regions != user_cfg->num_regions)
386 return 1;
387
388 /* Regions */
389 for (c = 0; c < user_cfg->num_regions; c++) {
390 if (cur_cfg->region[c].h_start != user_cfg->region[c].h_start)
391 return 1;
392 if (cur_cfg->region[c].h_end != user_cfg->region[c].h_end)
393 return 1;
394 if (cur_cfg->region[c].v_start != user_cfg->region[c].v_start)
395 return 1;
396 if (cur_cfg->region[c].v_end != user_cfg->region[c].v_end)
397 return 1;
398 }
399
400 return 0;
401}
402
403/*
404 * hist_update_params - Helper function to check and store user given params.
405 * @new_conf: Pointer to user configuration structure.
406 */
407static void hist_set_params(struct ispstat *hist, void *new_conf)
408{
409 struct omap3isp_hist_config *user_cfg = new_conf;
410 struct omap3isp_hist_config *cur_cfg = hist->priv;
411
412 if (!hist->configured || hist_comp_params(hist, user_cfg)) {
413 memcpy(cur_cfg, user_cfg, sizeof(*user_cfg));
414 if (user_cfg->num_acc_frames == 0)
415 user_cfg->num_acc_frames = 1;
416 hist->inc_config++;
417 hist->update = 1;
418 /*
419 * User might be asked for a bigger buffer than necessary for
420 * this configuration. In order to return the right amount of
421 * data during buffer request, let's calculate the size here
422 * instead of stick with user_cfg->buf_size.
423 */
424 cur_cfg->buf_size = hist_get_buf_size(cur_cfg);
425
426 }
427}
428
429static long hist_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
430{
431 struct ispstat *stat = v4l2_get_subdevdata(sd);
432
433 switch (cmd) {
434 case VIDIOC_OMAP3ISP_HIST_CFG:
435 return omap3isp_stat_config(stat, arg);
436 case VIDIOC_OMAP3ISP_STAT_REQ:
437 return omap3isp_stat_request_statistics(stat, arg);
438 case VIDIOC_OMAP3ISP_STAT_EN: {
439 int *en = arg;
440 return omap3isp_stat_enable(stat, !!*en);
441 }
442 }
443
444 return -ENOIOCTLCMD;
445
446}
447
448static const struct ispstat_ops hist_ops = {
449 .validate_params = hist_validate_params,
450 .set_params = hist_set_params,
451 .setup_regs = hist_setup_regs,
452 .enable = hist_enable,
453 .busy = hist_busy,
454 .buf_process = hist_buf_process,
455};
456
457static const struct v4l2_subdev_core_ops hist_subdev_core_ops = {
458 .ioctl = hist_ioctl,
459 .subscribe_event = omap3isp_stat_subscribe_event,
460 .unsubscribe_event = omap3isp_stat_unsubscribe_event,
461};
462
463static const struct v4l2_subdev_video_ops hist_subdev_video_ops = {
464 .s_stream = omap3isp_stat_s_stream,
465};
466
467static const struct v4l2_subdev_ops hist_subdev_ops = {
468 .core = &hist_subdev_core_ops,
469 .video = &hist_subdev_video_ops,
470};
471
472/*
473 * omap3isp_hist_init - Module Initialization.
474 */
475int omap3isp_hist_init(struct isp_device *isp)
476{
477 struct ispstat *hist = &isp->isp_hist;
478 struct omap3isp_hist_config *hist_cfg;
479 int ret = -1;
480
cf2b4cf6 481 hist_cfg = devm_kzalloc(isp->dev, sizeof(*hist_cfg), GFP_KERNEL);
68e342b3
DC
482 if (hist_cfg == NULL)
483 return -ENOMEM;
484
d83501a0
LP
485 hist->isp = isp;
486
0ff4e419 487 if (HIST_CONFIG_DMA) {
0ff4e419
LP
488 dma_cap_mask_t mask;
489
a9943f6b
PU
490 /*
491 * We need slave capable channel without DMA request line for
492 * reading out the data.
493 * For this we can use dma_request_chan_by_mask() as we are
494 * happy with any channel as long as it is capable of slave
495 * configuration.
496 */
0ff4e419
LP
497 dma_cap_zero(mask);
498 dma_cap_set(DMA_SLAVE, mask);
a9943f6b
PU
499 hist->dma_ch = dma_request_chan_by_mask(&mask);
500 if (IS_ERR(hist->dma_ch)) {
501 ret = PTR_ERR(hist->dma_ch);
502 if (ret == -EPROBE_DEFER)
503 return ret;
0ff4e419 504
a9943f6b 505 hist->dma_ch = NULL;
0ff4e419
LP
506 dev_warn(isp->dev,
507 "hist: DMA channel request failed, using PIO\n");
a9943f6b 508 } else {
0ff4e419
LP
509 dev_dbg(isp->dev, "hist: using DMA channel %s\n",
510 dma_chan_name(hist->dma_ch));
a9943f6b 511 }
68e342b3
DC
512 }
513
514 hist->ops = &hist_ops;
515 hist->priv = hist_cfg;
516 hist->event_type = V4L2_EVENT_OMAP3ISP_HIST;
68e342b3
DC
517
518 ret = omap3isp_stat_init(hist, "histogram", &hist_subdev_ops);
519 if (ret) {
0ff4e419
LP
520 if (hist->dma_ch)
521 dma_release_channel(hist->dma_ch);
68e342b3
DC
522 }
523
524 return ret;
525}
526
527/*
528 * omap3isp_hist_cleanup - Module cleanup.
529 */
530void omap3isp_hist_cleanup(struct isp_device *isp)
531{
0ff4e419
LP
532 struct ispstat *hist = &isp->isp_hist;
533
534 if (hist->dma_ch)
535 dma_release_channel(hist->dma_ch);
536
537 omap3isp_stat_cleanup(hist);
68e342b3 538}