#define _I915_FIXED_H_
typedef struct {
- uint32_t val;
+ u32 val;
} uint_fixed_16_16_t;
#define FP_16_16_MAX ({ \
return false;
}
-static inline uint_fixed_16_16_t u32_to_fixed16(uint32_t val)
+static inline uint_fixed_16_16_t u32_to_fixed16(u32 val)
{
uint_fixed_16_16_t fp;
return fp;
}
-static inline uint32_t fixed16_to_u32_round_up(uint_fixed_16_16_t fp)
+static inline u32 fixed16_to_u32_round_up(uint_fixed_16_16_t fp)
{
return DIV_ROUND_UP(fp.val, 1 << 16);
}
-static inline uint32_t fixed16_to_u32(uint_fixed_16_16_t fp)
+static inline u32 fixed16_to_u32(uint_fixed_16_16_t fp)
{
return fp.val >> 16;
}
return max;
}
-static inline uint_fixed_16_16_t clamp_u64_to_fixed16(uint64_t val)
+static inline uint_fixed_16_16_t clamp_u64_to_fixed16(u64 val)
{
uint_fixed_16_16_t fp;
WARN_ON(val > U32_MAX);
- fp.val = (uint32_t) val;
+ fp.val = (u32)val;
return fp;
}
-static inline uint32_t div_round_up_fixed16(uint_fixed_16_16_t val,
- uint_fixed_16_16_t d)
+static inline u32 div_round_up_fixed16(uint_fixed_16_16_t val,
+ uint_fixed_16_16_t d)
{
return DIV_ROUND_UP(val.val, d.val);
}
-static inline uint32_t mul_round_up_u32_fixed16(uint32_t val,
- uint_fixed_16_16_t mul)
+static inline u32 mul_round_up_u32_fixed16(u32 val, uint_fixed_16_16_t mul)
{
- uint64_t intermediate_val;
+ u64 intermediate_val;
- intermediate_val = (uint64_t) val * mul.val;
+ intermediate_val = (u64)val * mul.val;
intermediate_val = DIV_ROUND_UP_ULL(intermediate_val, 1 << 16);
WARN_ON(intermediate_val > U32_MAX);
- return (uint32_t) intermediate_val;
+ return (u32)intermediate_val;
}
static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val,
uint_fixed_16_16_t mul)
{
- uint64_t intermediate_val;
+ u64 intermediate_val;
- intermediate_val = (uint64_t) val.val * mul.val;
+ intermediate_val = (u64)val.val * mul.val;
intermediate_val = intermediate_val >> 16;
return clamp_u64_to_fixed16(intermediate_val);
}
-static inline uint_fixed_16_16_t div_fixed16(uint32_t val, uint32_t d)
+static inline uint_fixed_16_16_t div_fixed16(u32 val, u32 d)
{
- uint64_t interm_val;
+ u64 interm_val;
- interm_val = (uint64_t)val << 16;
+ interm_val = (u64)val << 16;
interm_val = DIV_ROUND_UP_ULL(interm_val, d);
return clamp_u64_to_fixed16(interm_val);
}
-static inline uint32_t div_round_up_u32_fixed16(uint32_t val,
- uint_fixed_16_16_t d)
+static inline u32 div_round_up_u32_fixed16(u32 val, uint_fixed_16_16_t d)
{
- uint64_t interm_val;
+ u64 interm_val;
- interm_val = (uint64_t)val << 16;
+ interm_val = (u64)val << 16;
interm_val = DIV_ROUND_UP_ULL(interm_val, d.val);
WARN_ON(interm_val > U32_MAX);
- return (uint32_t) interm_val;
+ return (u32)interm_val;
}
-static inline uint_fixed_16_16_t mul_u32_fixed16(uint32_t val,
- uint_fixed_16_16_t mul)
+static inline uint_fixed_16_16_t mul_u32_fixed16(u32 val, uint_fixed_16_16_t mul)
{
- uint64_t intermediate_val;
+ u64 intermediate_val;
- intermediate_val = (uint64_t) val * mul.val;
+ intermediate_val = (u64)val * mul.val;
return clamp_u64_to_fixed16(intermediate_val);
}
static inline uint_fixed_16_16_t add_fixed16(uint_fixed_16_16_t add1,
uint_fixed_16_16_t add2)
{
- uint64_t interm_sum;
+ u64 interm_sum;
- interm_sum = (uint64_t) add1.val + add2.val;
+ interm_sum = (u64)add1.val + add2.val;
return clamp_u64_to_fixed16(interm_sum);
}
static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1,
- uint32_t add2)
+ u32 add2)
{
- uint64_t interm_sum;
+ u64 interm_sum;
uint_fixed_16_16_t interm_add2 = u32_to_fixed16(add2);
- interm_sum = (uint64_t) add1.val + interm_add2.val;
+ interm_sum = (u64)add1.val + interm_add2.val;
return clamp_u64_to_fixed16(interm_sum);
}