};
struct rctx {
- be128 buf[XTS_BUFFER_SIZE / sizeof(be128)];
+ le128 buf[XTS_BUFFER_SIZE / sizeof(le128)];
- be128 t;
+ le128 t;
- be128 *ext;
+ le128 *ext;
struct scatterlist srcbuf[2];
struct scatterlist dstbuf[2];
static int post_crypt(struct skcipher_request *req)
{
struct rctx *rctx = skcipher_request_ctx(req);
- be128 *buf = rctx->ext ?: rctx->buf;
+ le128 *buf = rctx->ext ?: rctx->buf;
struct skcipher_request *subreq;
const int bs = XTS_BLOCK_SIZE;
struct skcipher_walk w;
while (w.nbytes) {
unsigned int avail = w.nbytes;
- be128 *wdst;
+ le128 *wdst;
wdst = w.dst.virt.addr;
do {
- be128_xor(wdst, buf++, wdst);
+ le128_xor(wdst, buf++, wdst);
wdst++;
} while ((avail -= bs) >= bs);
static int pre_crypt(struct skcipher_request *req)
{
struct rctx *rctx = skcipher_request_ctx(req);
- be128 *buf = rctx->ext ?: rctx->buf;
+ le128 *buf = rctx->ext ?: rctx->buf;
struct skcipher_request *subreq;
const int bs = XTS_BLOCK_SIZE;
struct skcipher_walk w;
while (w.nbytes) {
unsigned int avail = w.nbytes;
- be128 *wsrc;
- be128 *wdst;
+ le128 *wsrc;
+ le128 *wdst;
wsrc = w.src.virt.addr;
wdst = w.dst.virt.addr;
do {
*buf++ = rctx->t;
- be128_xor(wdst++, &rctx->t, wsrc++);
+ le128_xor(wdst++, &rctx->t, wsrc++);
gf128mul_x_ble(&rctx->t, &rctx->t);
} while ((avail -= bs) >= bs);
const unsigned int max_blks = req->tbuflen / bsize;
struct blkcipher_walk walk;
unsigned int nblocks;
- be128 *src, *dst, *t;
- be128 *t_buf = req->tbuf;
+ le128 *src, *dst, *t;
+ le128 *t_buf = req->tbuf;
int err, i;
BUG_ON(max_blks < 1);
return err;
nblocks = min(nbytes / bsize, max_blks);
- src = (be128 *)walk.src.virt.addr;
- dst = (be128 *)walk.dst.virt.addr;
+ src = (le128 *)walk.src.virt.addr;
+ dst = (le128 *)walk.dst.virt.addr;
/* calculate first value of T */
req->tweak_fn(req->tweak_ctx, (u8 *)&t_buf[0], walk.iv);
t = &t_buf[i];
/* PP <- T xor P */
- be128_xor(dst + i, t, src + i);
+ le128_xor(dst + i, t, src + i);
}
/* CC <- E(Key2,PP) */
/* C <- T xor CC */
for (i = 0; i < nblocks; i++)
- be128_xor(dst + i, dst + i, &t_buf[i]);
+ le128_xor(dst + i, dst + i, &t_buf[i]);
src += nblocks;
dst += nblocks;
nblocks = min(nbytes / bsize, max_blks);
} while (nblocks > 0);
- *(be128 *)walk.iv = *t;
+ *(le128 *)walk.iv = *t;
err = blkcipher_walk_done(desc, &walk, nbytes);
nbytes = walk.nbytes;
break;
nblocks = min(nbytes / bsize, max_blks);
- src = (be128 *)walk.src.virt.addr;
- dst = (be128 *)walk.dst.virt.addr;
+ src = (le128 *)walk.src.virt.addr;
+ dst = (le128 *)walk.dst.virt.addr;
}
return err;
}
/* needed by XTS */
-static inline void gf128mul_x_ble(be128 *r, const be128 *x)
+static inline void gf128mul_x_ble(le128 *r, const le128 *x)
{
u64 a = le64_to_cpu(x->a);
u64 b = le64_to_cpu(x->b);
/* equivalent to gf128mul_table_be[b >> 63] (see crypto/gf128mul.c): */
- u64 _tt = gf128mul_mask_from_bit(b, 63) & 0x87;
+ u64 _tt = gf128mul_mask_from_bit(a, 63) & 0x87;
- r->a = cpu_to_le64((a << 1) ^ _tt);
- r->b = cpu_to_le64((b << 1) | (a >> 63));
+ r->a = cpu_to_le64((a << 1) | (b >> 63));
+ r->b = cpu_to_le64((b << 1) ^ _tt);
}
/* 4k table optimization */