]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/media/common/saa7146_hlp.c
V4L/DVB (3194): Revert one 64-bit fix and improved other 64-bit fixes
[mirror_ubuntu-hirsute-kernel.git] / drivers / media / common / saa7146_hlp.c
CommitLineData
1da177e4
LT
1#include <linux/kernel.h>
2#include <media/saa7146_vv.h>
3
4static void calculate_output_format_register(struct saa7146_dev* saa, u32 palette, u32* clip_format)
5{
6 /* clear out the necessary bits */
7 *clip_format &= 0x0000ffff;
8 /* set these bits new */
9 *clip_format |= (( ((palette&0xf00)>>8) << 30) | ((palette&0x00f) << 24) | (((palette&0x0f0)>>4) << 16));
10}
11
12static void calculate_hps_source_and_sync(struct saa7146_dev *dev, int source, int sync, u32* hps_ctrl)
13{
14 *hps_ctrl &= ~(MASK_30 | MASK_31 | MASK_28);
15 *hps_ctrl |= (source << 30) | (sync << 28);
16}
17
18static void calculate_hxo_and_hyo(struct saa7146_vv *vv, u32* hps_h_scale, u32* hps_ctrl)
19{
20 int hyo = 0, hxo = 0;
21
22 hyo = vv->standard->v_offset;
23 hxo = vv->standard->h_offset;
24
25 *hps_h_scale &= ~(MASK_B0 | 0xf00);
26 *hps_h_scale |= (hxo << 0);
27
28 *hps_ctrl &= ~(MASK_W0 | MASK_B2);
29 *hps_ctrl |= (hyo << 12);
30}
31
32/* helper functions for the calculation of the horizontal- and vertical
33 scaling registers, clip-format-register etc ...
34 these functions take pointers to the (most-likely read-out
35 original-values) and manipulate them according to the requested
36 changes.
37*/
38
39/* hps_coeff used for CXY and CXUV; scale 1/1 -> scale 1/64 */
40static struct {
41 u16 hps_coeff;
42 u16 weight_sum;
43} hps_h_coeff_tab [] = {
44 {0x00, 2}, {0x02, 4}, {0x00, 4}, {0x06, 8}, {0x02, 8},
45 {0x08, 8}, {0x00, 8}, {0x1E, 16}, {0x0E, 8}, {0x26, 8},
46 {0x06, 8}, {0x42, 8}, {0x02, 8}, {0x80, 8}, {0x00, 8},
47 {0xFE, 16}, {0xFE, 8}, {0x7E, 8}, {0x7E, 8}, {0x3E, 8},
48 {0x3E, 8}, {0x1E, 8}, {0x1E, 8}, {0x0E, 8}, {0x0E, 8},
49 {0x06, 8}, {0x06, 8}, {0x02, 8}, {0x02, 8}, {0x00, 8},
50 {0x00, 8}, {0xFE, 16}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8},
51 {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8},
52 {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8},
53 {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0x7E, 8},
54 {0x7E, 8}, {0x3E, 8}, {0x3E, 8}, {0x1E, 8}, {0x1E, 8},
55 {0x0E, 8}, {0x0E, 8}, {0x06, 8}, {0x06, 8}, {0x02, 8},
56 {0x02, 8}, {0x00, 8}, {0x00, 8}, {0xFE, 16}
57};
58
59/* table of attenuation values for horizontal scaling */
60static u8 h_attenuation[] = { 1, 2, 4, 8, 2, 4, 8, 16, 0};
61
62/* calculate horizontal scale registers */
63static int calculate_h_scale_registers(struct saa7146_dev *dev,
64 int in_x, int out_x, int flip_lr,
65 u32* hps_ctrl, u32* hps_v_gain, u32* hps_h_prescale, u32* hps_h_scale)
66{
67 /* horizontal prescaler */
68 u32 dcgx = 0, xpsc = 0, xacm = 0, cxy = 0, cxuv = 0;
69 /* horizontal scaler */
70 u32 xim = 0, xp = 0, xsci =0;
71 /* vertical scale & gain */
72 u32 pfuv = 0;
73
74 /* helper variables */
75 u32 h_atten = 0, i = 0;
76
77 if ( 0 == out_x ) {
78 return -EINVAL;
79 }
80
81 /* mask out vanity-bit */
82 *hps_ctrl &= ~MASK_29;
83
84 /* calculate prescale-(xspc)-value: [n .. 1/2) : 1
85 [1/2 .. 1/3) : 2
86 [1/3 .. 1/4) : 3
87 ... */
88 if (in_x > out_x) {
89 xpsc = in_x / out_x;
90 }
91 else {
92 /* zooming */
93 xpsc = 1;
94 }
95
96 /* if flip_lr-bit is set, number of pixels after
97 horizontal prescaling must be < 384 */
98 if ( 0 != flip_lr ) {
99
100 /* set vanity bit */
101 *hps_ctrl |= MASK_29;
102
103 while (in_x / xpsc >= 384 )
104 xpsc++;
105 }
106 /* if zooming is wanted, number of pixels after
107 horizontal prescaling must be < 768 */
108 else {
109 while ( in_x / xpsc >= 768 )
110 xpsc++;
111 }
112
113 /* maximum prescale is 64 (p.69) */
114 if ( xpsc > 64 )
115 xpsc = 64;
116
117 /* keep xacm clear*/
118 xacm = 0;
119
120 /* set horizontal filter parameters (CXY = CXUV) */
121 cxy = hps_h_coeff_tab[( (xpsc - 1) < 63 ? (xpsc - 1) : 63 )].hps_coeff;
122 cxuv = cxy;
123
124 /* calculate and set horizontal fine scale (xsci) */
125
126 /* bypass the horizontal scaler ? */
127 if ( (in_x == out_x) && ( 1 == xpsc ) )
128 xsci = 0x400;
129 else
130 xsci = ( (1024 * in_x) / (out_x * xpsc) ) + xpsc;
131
132 /* set start phase for horizontal fine scale (xp) to 0 */
133 xp = 0;
134
135 /* set xim, if we bypass the horizontal scaler */
136 if ( 0x400 == xsci )
137 xim = 1;
138 else
139 xim = 0;
140
141 /* if the prescaler is bypassed, enable horizontal
142 accumulation mode (xacm) and clear dcgx */
143 if( 1 == xpsc ) {
144 xacm = 1;
145 dcgx = 0;
146 } else {
147 xacm = 0;
148 /* get best match in the table of attenuations
149 for horizontal scaling */
150 h_atten = hps_h_coeff_tab[( (xpsc - 1) < 63 ? (xpsc - 1) : 63 )].weight_sum;
151
152 for (i = 0; h_attenuation[i] != 0; i++) {
153 if (h_attenuation[i] >= h_atten)
154 break;
155 }
156
157 dcgx = i;
158 }
159
160 /* the horizontal scaling increment controls the UV filter
161 to reduce the bandwith to improve the display quality,
162 so set it ... */
163 if ( xsci == 0x400)
164 pfuv = 0x00;
165 else if ( xsci < 0x600)
166 pfuv = 0x01;
167 else if ( xsci < 0x680)
168 pfuv = 0x11;
169 else if ( xsci < 0x700)
170 pfuv = 0x22;
171 else
172 pfuv = 0x33;
173
174
175 *hps_v_gain &= MASK_W0|MASK_B2;
176 *hps_v_gain |= (pfuv << 24);
177
178 *hps_h_scale &= ~(MASK_W1 | 0xf000);
179 *hps_h_scale |= (xim << 31) | (xp << 24) | (xsci << 12);
180
181 *hps_h_prescale |= (dcgx << 27) | ((xpsc-1) << 18) | (xacm << 17) | (cxy << 8) | (cxuv << 0);
182
183 return 0;
184}
185
186static struct {
187 u16 hps_coeff;
188 u16 weight_sum;
189} hps_v_coeff_tab [] = {
190 {0x0100, 2}, {0x0102, 4}, {0x0300, 4}, {0x0106, 8}, {0x0502, 8},
191 {0x0708, 8}, {0x0F00, 8}, {0x011E, 16}, {0x110E, 16}, {0x1926, 16},
192 {0x3906, 16}, {0x3D42, 16}, {0x7D02, 16}, {0x7F80, 16}, {0xFF00, 16},
193 {0x01FE, 32}, {0x01FE, 32}, {0x817E, 32}, {0x817E, 32}, {0xC13E, 32},
194 {0xC13E, 32}, {0xE11E, 32}, {0xE11E, 32}, {0xF10E, 32}, {0xF10E, 32},
195 {0xF906, 32}, {0xF906, 32}, {0xFD02, 32}, {0xFD02, 32}, {0xFF00, 32},
196 {0xFF00, 32}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64},
197 {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64},
198 {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64},
199 {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x817E, 64},
200 {0x817E, 64}, {0xC13E, 64}, {0xC13E, 64}, {0xE11E, 64}, {0xE11E, 64},
201 {0xF10E, 64}, {0xF10E, 64}, {0xF906, 64}, {0xF906, 64}, {0xFD02, 64},
202 {0xFD02, 64}, {0xFF00, 64}, {0xFF00, 64}, {0x01FE, 128}
203};
204
205/* table of attenuation values for vertical scaling */
206static u16 v_attenuation[] = { 2, 4, 8, 16, 32, 64, 128, 256, 0};
207
208/* calculate vertical scale registers */
209static int calculate_v_scale_registers(struct saa7146_dev *dev, enum v4l2_field field,
210 int in_y, int out_y, u32* hps_v_scale, u32* hps_v_gain)
211{
212 int lpi = 0;
213
214 /* vertical scaling */
215 u32 yacm = 0, ysci = 0, yacl = 0, ypo = 0, ype = 0;
216 /* vertical scale & gain */
217 u32 dcgy = 0, cya_cyb = 0;
218
219 /* helper variables */
220 u32 v_atten = 0, i = 0;
221
222 /* error, if vertical zooming */
223 if ( in_y < out_y ) {
224 return -EINVAL;
225 }
226
227 /* linear phase interpolation may be used
228 if scaling is between 1 and 1/2 (both fields used)
229 or scaling is between 1/2 and 1/4 (if only one field is used) */
230
231 if (V4L2_FIELD_HAS_BOTH(field)) {
232 if( 2*out_y >= in_y) {
233 lpi = 1;
234 }
235 } else if (field == V4L2_FIELD_TOP
236 || field == V4L2_FIELD_ALTERNATE
237 || field == V4L2_FIELD_BOTTOM) {
238 if( 4*out_y >= in_y ) {
239 lpi = 1;
240 }
241 out_y *= 2;
242 }
243 if( 0 != lpi ) {
244
245 yacm = 0;
246 yacl = 0;
247 cya_cyb = 0x00ff;
248
249 /* calculate scaling increment */
250 if ( in_y > out_y )
251 ysci = ((1024 * in_y) / (out_y + 1)) - 1024;
252 else
253 ysci = 0;
254
255 dcgy = 0;
256
257 /* calculate ype and ypo */
258 ype = ysci / 16;
259 ypo = ype + (ysci / 64);
260
261 } else {
262 yacm = 1;
263
264 /* calculate scaling increment */
265 ysci = (((10 * 1024 * (in_y - out_y - 1)) / in_y) + 9) / 10;
266
267 /* calculate ype and ypo */
268 ypo = ype = ((ysci + 15) / 16);
269
270 /* the sequence length interval (yacl) has to be set according
271 to the prescale value, e.g. [n .. 1/2) : 0
272 [1/2 .. 1/3) : 1
273 [1/3 .. 1/4) : 2
274 ... */
275 if ( ysci < 512) {
276 yacl = 0;
277 } else {
278 yacl = ( ysci / (1024 - ysci) );
279 }
280
281 /* get filter coefficients for cya, cyb from table hps_v_coeff_tab */
282 cya_cyb = hps_v_coeff_tab[ (yacl < 63 ? yacl : 63 ) ].hps_coeff;
283
284 /* get best match in the table of attenuations for vertical scaling */
285 v_atten = hps_v_coeff_tab[ (yacl < 63 ? yacl : 63 ) ].weight_sum;
286
287 for (i = 0; v_attenuation[i] != 0; i++) {
288 if (v_attenuation[i] >= v_atten)
289 break;
290 }
291
292 dcgy = i;
293 }
294
295 /* ypo and ype swapped in spec ? */
296 *hps_v_scale |= (yacm << 31) | (ysci << 21) | (yacl << 15) | (ypo << 8 ) | (ype << 1);
297
298 *hps_v_gain &= ~(MASK_W0|MASK_B2);
299 *hps_v_gain |= (dcgy << 16) | (cya_cyb << 0);
300
301 return 0;
302}
303
304/* simple bubble-sort algorithm with duplicate elimination */
305static int sort_and_eliminate(u32* values, int* count)
306{
307 int low = 0, high = 0, top = 0, temp = 0;
308 int cur = 0, next = 0;
309
310 /* sanity checks */
311 if( (0 > *count) || (NULL == values) ) {
312 return -EINVAL;
313 }
314
315