Merge tag 'v3.10.91' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / lib / csumpartial.S
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/lib/csumpartial.S
3 *
4 * Copyright (C) 1995-1998 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/linkage.h>
11#include <asm/assembler.h>
12
13 .text
14
15/*
16 * Function: __u32 csum_partial(const char *src, int len, __u32 sum)
17 * Params : r0 = buffer, r1 = len, r2 = checksum
18 * Returns : r0 = new checksum
19 */
20
21buf .req r0
22len .req r1
23sum .req r2
24td0 .req r3
25td1 .req r4 @ save before use
26td2 .req r5 @ save before use
27td3 .req lr
28
8adbb371 29.Lzero: mov r0, sum
1da177e4
LT
30 add sp, sp, #4
31 ldr pc, [sp], #4
32
33 /*
34 * Handle 0 to 7 bytes, with any alignment of source and
35 * destination pointers. Note that when we get here, C = 0
36 */
8adbb371
NP
37.Lless8: teq len, #0 @ check for zero count
38 beq .Lzero
1da177e4
LT
39
40 /* we must have at least one byte. */
41 tst buf, #1 @ odd address?
af36bef0 42 movne sum, sum, ror #8
1da177e4
LT
43 ldrneb td0, [buf], #1
44 subne len, len, #1
45 adcnes sum, sum, td0, put_byte_1
46
8adbb371
NP
47.Lless4: tst len, #6
48 beq .Lless8_byte
1da177e4
LT
49
50 /* we are now half-word aligned */
51
8adbb371 52.Lless8_wordlp:
1da177e4
LT
53#if __LINUX_ARM_ARCH__ >= 4
54 ldrh td0, [buf], #2
55 sub len, len, #2
56#else
57 ldrb td0, [buf], #1
58 ldrb td3, [buf], #1
59 sub len, len, #2
60#ifndef __ARMEB__
61 orr td0, td0, td3, lsl #8
62#else
63 orr td0, td3, td0, lsl #8
64#endif
65#endif
66 adcs sum, sum, td0
67 tst len, #6
8adbb371 68 bne .Lless8_wordlp
1da177e4 69
8adbb371 70.Lless8_byte: tst len, #1 @ odd number of bytes
1da177e4
LT
71 ldrneb td0, [buf], #1 @ include last byte
72 adcnes sum, sum, td0, put_byte_0 @ update checksum
73
8adbb371 74.Ldone: adc r0, sum, #0 @ collect up the last carry
1da177e4
LT
75 ldr td0, [sp], #4
76 tst td0, #1 @ check buffer alignment
77 movne r0, r0, ror #8 @ rotate checksum by 8 bits
78 ldr pc, [sp], #4 @ return
79
8adbb371 80.Lnot_aligned: tst buf, #1 @ odd address
1da177e4
LT
81 ldrneb td0, [buf], #1 @ make even
82 subne len, len, #1
83 adcnes sum, sum, td0, put_byte_1 @ update checksum
84
85 tst buf, #2 @ 32-bit aligned?
86#if __LINUX_ARM_ARCH__ >= 4
87 ldrneh td0, [buf], #2 @ make 32-bit aligned
88 subne len, len, #2
89#else
90 ldrneb td0, [buf], #1
91 ldrneb ip, [buf], #1
92 subne len, len, #2
93#ifndef __ARMEB__
94 orrne td0, td0, ip, lsl #8
95#else
96 orrne td0, ip, td0, lsl #8
97#endif
98#endif
99 adcnes sum, sum, td0 @ update checksum
100 mov pc, lr
101
102ENTRY(csum_partial)
103 stmfd sp!, {buf, lr}
104 cmp len, #8 @ Ensure that we have at least
8adbb371 105 blo .Lless8 @ 8 bytes to copy.
1da177e4 106
af36bef0
RK
107 tst buf, #1
108 movne sum, sum, ror #8
109
1da177e4
LT
110 adds sum, sum, #0 @ C = 0
111 tst buf, #3 @ Test destination alignment
8adbb371 112 blne .Lnot_aligned @ align destination, return here
1da177e4
LT
113
1141: bics ip, len, #31
115 beq 3f
116
117 stmfd sp!, {r4 - r5}
1182: ldmia buf!, {td0, td1, td2, td3}
119 adcs sum, sum, td0
120 adcs sum, sum, td1
121 adcs sum, sum, td2
122 adcs sum, sum, td3
123 ldmia buf!, {td0, td1, td2, td3}
124 adcs sum, sum, td0
125 adcs sum, sum, td1
126 adcs sum, sum, td2
127 adcs sum, sum, td3
128 sub ip, ip, #32
129 teq ip, #0
130 bne 2b
131 ldmfd sp!, {r4 - r5}
132
1333: tst len, #0x1c @ should not change C
8adbb371 134 beq .Lless4
1da177e4
LT
135
1364: ldr td0, [buf], #4
137 sub len, len, #4
138 adcs sum, sum, td0
139 tst len, #0x1c
140 bne 4b
8adbb371 141 b .Lless4
93ed3970 142ENDPROC(csum_partial)