nlm: Ensure callback code also checks that the files match
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / linux / math64.h
CommitLineData
2418f4f2
RZ
1#ifndef _LINUX_MATH64_H
2#define _LINUX_MATH64_H
3
4#include <linux/types.h>
5#include <asm/div64.h>
6
7#if BITS_PER_LONG == 64
8
c2853c8d
AS
9#define div64_long(x, y) div64_s64((x), (y))
10#define div64_ul(x, y) div64_u64((x), (y))
f910381a 11
2418f4f2
RZ
12/**
13 * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
14 *
15 * This is commonly provided by 32bit archs to provide an optimized 64bit
16 * divide.
17 */
18static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
19{
20 *remainder = dividend % divisor;
21 return dividend / divisor;
22}
23
24/**
25 * div_s64_rem - signed 64bit divide with 32bit divisor with remainder
26 */
27static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
28{
29 *remainder = dividend % divisor;
30 return dividend / divisor;
31}
32
6f6d6a1a
RZ
33/**
34 * div64_u64 - unsigned 64bit divide with 64bit divisor
35 */
36static inline u64 div64_u64(u64 dividend, u64 divisor)
37{
38 return dividend / divisor;
39}
40
658716d1
BB
41/**
42 * div64_s64 - signed 64bit divide with 64bit divisor
43 */
44static inline s64 div64_s64(s64 dividend, s64 divisor)
45{
46 return dividend / divisor;
47}
48
2418f4f2
RZ
49#elif BITS_PER_LONG == 32
50
c2853c8d
AS
51#define div64_long(x, y) div_s64((x), (y))
52#define div64_ul(x, y) div_u64((x), (y))
f910381a 53
2418f4f2
RZ
54#ifndef div_u64_rem
55static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
56{
57 *remainder = do_div(dividend, divisor);
58 return dividend;
59}
60#endif
61
62#ifndef div_s64_rem
63extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
64#endif
65
6f6d6a1a 66#ifndef div64_u64
f3002134 67extern u64 div64_u64(u64 dividend, u64 divisor);
6f6d6a1a
RZ
68#endif
69
658716d1
BB
70#ifndef div64_s64
71extern s64 div64_s64(s64 dividend, s64 divisor);
72#endif
73
2418f4f2
RZ
74#endif /* BITS_PER_LONG */
75
76/**
77 * div_u64 - unsigned 64bit divide with 32bit divisor
78 *
79 * This is the most common 64bit divide and should be used if possible,
80 * as many 32bit archs can optimize this variant better than a full 64bit
81 * divide.
82 */
83#ifndef div_u64
84static inline u64 div_u64(u64 dividend, u32 divisor)
85{
86 u32 remainder;
87 return div_u64_rem(dividend, divisor, &remainder);
88}
89#endif
90
91/**
92 * div_s64 - signed 64bit divide with 32bit divisor
93 */
94#ifndef div_s64
95static inline s64 div_s64(s64 dividend, s32 divisor)
96{
97 s32 remainder;
98 return div_s64_rem(dividend, divisor, &remainder);
99}
100#endif
101
f595ec96
JF
102u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
103
d5e181f7
JF
104static __always_inline u32
105__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
106{
107 u32 ret = 0;
108
109 while (dividend >= divisor) {
110 /* The following asm() prevents the compiler from
111 optimising this loop into a modulo operation. */
112 asm("" : "+rm"(dividend));
113
114 dividend -= divisor;
115 ret++;
116 }
117
118 *remainder = dividend;
119
120 return ret;
121}
122
2418f4f2 123#endif /* _LINUX_MATH64_H */