[ARM] 5107/1: Add maintainer entries for the CompuLab CM-X270/EM-X270 machines
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / sparc / lib / mul.S
1 /* $Id: mul.S,v 1.4 1996/09/30 02:22:32 davem Exp $
2 * mul.S: This routine was taken from glibc-1.09 and is covered
3 * by the GNU Library General Public License Version 2.
4 */
5
6 /*
7 * Signed multiply, from Appendix E of the Sparc Version 8
8 * Architecture Manual.
9 */
10
11 /*
12 * Returns %o0 * %o1 in %o1%o0 (i.e., %o1 holds the upper 32 bits of
13 * the 64-bit product).
14 *
15 * This code optimizes short (less than 13-bit) multiplies.
16 */
17
18 .globl .mul
19 .globl _Mul
20 .mul:
21 _Mul: /* needed for export */
22 mov %o0, %y ! multiplier -> Y
23 andncc %o0, 0xfff, %g0 ! test bits 12..31
24 be Lmul_shortway ! if zero, can do it the short way
25 andcc %g0, %g0, %o4 ! zero the partial product and clear N and V
26
27 /*
28 * Long multiply. 32 steps, followed by a final shift step.
29 */
30 mulscc %o4, %o1, %o4 ! 1
31 mulscc %o4, %o1, %o4 ! 2
32 mulscc %o4, %o1, %o4 ! 3
33 mulscc %o4, %o1, %o4 ! 4
34 mulscc %o4, %o1, %o4 ! 5
35 mulscc %o4, %o1, %o4 ! 6
36 mulscc %o4, %o1, %o4 ! 7
37 mulscc %o4, %o1, %o4 ! 8
38 mulscc %o4, %o1, %o4 ! 9
39 mulscc %o4, %o1, %o4 ! 10
40 mulscc %o4, %o1, %o4 ! 11
41 mulscc %o4, %o1, %o4 ! 12
42 mulscc %o4, %o1, %o4 ! 13
43 mulscc %o4, %o1, %o4 ! 14
44 mulscc %o4, %o1, %o4 ! 15
45 mulscc %o4, %o1, %o4 ! 16
46 mulscc %o4, %o1, %o4 ! 17
47 mulscc %o4, %o1, %o4 ! 18
48 mulscc %o4, %o1, %o4 ! 19
49 mulscc %o4, %o1, %o4 ! 20
50 mulscc %o4, %o1, %o4 ! 21
51 mulscc %o4, %o1, %o4 ! 22
52 mulscc %o4, %o1, %o4 ! 23
53 mulscc %o4, %o1, %o4 ! 24
54 mulscc %o4, %o1, %o4 ! 25
55 mulscc %o4, %o1, %o4 ! 26
56 mulscc %o4, %o1, %o4 ! 27
57 mulscc %o4, %o1, %o4 ! 28
58 mulscc %o4, %o1, %o4 ! 29
59 mulscc %o4, %o1, %o4 ! 30
60 mulscc %o4, %o1, %o4 ! 31
61 mulscc %o4, %o1, %o4 ! 32
62 mulscc %o4, %g0, %o4 ! final shift
63
64 ! If %o0 was negative, the result is
65 ! (%o0 * %o1) + (%o1 << 32))
66 ! We fix that here.
67
68 #if 0
69 tst %o0
70 bge 1f
71 rd %y, %o0
72
73 ! %o0 was indeed negative; fix upper 32 bits of result by subtracting
74 ! %o1 (i.e., return %o4 - %o1 in %o1).
75 retl
76 sub %o4, %o1, %o1
77
78 1:
79 retl
80 mov %o4, %o1
81 #else
82 /* Faster code adapted from tege@sics.se's code for umul.S. */
83 sra %o0, 31, %o2 ! make mask from sign bit
84 and %o1, %o2, %o2 ! %o2 = 0 or %o1, depending on sign of %o0
85 rd %y, %o0 ! get lower half of product
86 retl
87 sub %o4, %o2, %o1 ! subtract compensation
88 ! and put upper half in place
89 #endif
90
91 Lmul_shortway:
92 /*
93 * Short multiply. 12 steps, followed by a final shift step.
94 * The resulting bits are off by 12 and (32-12) = 20 bit positions,
95 * but there is no problem with %o0 being negative (unlike above).
96 */
97 mulscc %o4, %o1, %o4 ! 1
98 mulscc %o4, %o1, %o4 ! 2
99 mulscc %o4, %o1, %o4 ! 3
100 mulscc %o4, %o1, %o4 ! 4
101 mulscc %o4, %o1, %o4 ! 5
102 mulscc %o4, %o1, %o4 ! 6
103 mulscc %o4, %o1, %o4 ! 7
104 mulscc %o4, %o1, %o4 ! 8
105 mulscc %o4, %o1, %o4 ! 9
106 mulscc %o4, %o1, %o4 ! 10
107 mulscc %o4, %o1, %o4 ! 11
108 mulscc %o4, %o1, %o4 ! 12
109 mulscc %o4, %g0, %o4 ! final shift
110
111 /*
112 * %o4 has 20 of the bits that should be in the low part of the
113 * result; %y has the bottom 12 (as %y's top 12). That is:
114 *
115 * %o4 %y
116 * +----------------+----------------+
117 * | -12- | -20- | -12- | -20- |
118 * +------(---------+------)---------+
119 * --hi-- ----low-part----
120 *
121 * The upper 12 bits of %o4 should be sign-extended to form the
122 * high part of the product (i.e., highpart = %o4 >> 20).
123 */
124
125 rd %y, %o5
126 sll %o4, 12, %o0 ! shift middle bits left 12
127 srl %o5, 20, %o5 ! shift low bits right 20, zero fill at left
128 or %o5, %o0, %o0 ! construct low part of result
129 retl
130 sra %o4, 20, %o1 ! ... and extract high part of result
131
132 .globl .mul_patch
133 .mul_patch:
134 smul %o0, %o1, %o0
135 retl
136 rd %y, %o1
137 nop