#endif
-
/* add a lustre compatible layer for crypto API */
#include <linux/crypto.h>
-#define ll_crypto_hash crypto_hash
-#define ll_crypto_cipher crypto_blkcipher
-#define ll_crypto_alloc_hash(name, type, mask) crypto_alloc_hash(name, type, mask)
-#define ll_crypto_hash_setkey(tfm, key, keylen) crypto_hash_setkey(tfm, key, keylen)
-#define ll_crypto_hash_init(desc) crypto_hash_init(desc)
-#define ll_crypto_hash_update(desc, sl, bytes) crypto_hash_update(desc, sl, bytes)
-#define ll_crypto_hash_final(desc, out) crypto_hash_final(desc, out)
-#define ll_crypto_blkcipher_setkey(tfm, key, keylen) \
- crypto_blkcipher_setkey(tfm, key, keylen)
-#define ll_crypto_blkcipher_set_iv(tfm, src, len) \
- crypto_blkcipher_set_iv(tfm, src, len)
-#define ll_crypto_blkcipher_get_iv(tfm, dst, len) \
- crypto_blkcipher_get_iv(tfm, dst, len)
-#define ll_crypto_blkcipher_encrypt(desc, dst, src, bytes) \
- crypto_blkcipher_encrypt(desc, dst, src, bytes)
-#define ll_crypto_blkcipher_decrypt(desc, dst, src, bytes) \
- crypto_blkcipher_decrypt(desc, dst, src, bytes)
-#define ll_crypto_blkcipher_encrypt_iv(desc, dst, src, bytes) \
- crypto_blkcipher_encrypt_iv(desc, dst, src, bytes)
-#define ll_crypto_blkcipher_decrypt_iv(desc, dst, src, bytes) \
- crypto_blkcipher_decrypt_iv(desc, dst, src, bytes)
static inline
-struct ll_crypto_cipher *ll_crypto_alloc_blkcipher(const char *name,
+struct crypto_blkcipher *ll_crypto_alloc_blkcipher(const char *name,
u32 type, u32 mask)
{
- struct ll_crypto_cipher *rtn = crypto_alloc_blkcipher(name, type, mask);
+ struct crypto_blkcipher *rtn = crypto_alloc_blkcipher(name, type, mask);
return (rtn == NULL ? ERR_PTR(-ENOMEM) : rtn);
}
-static inline int ll_crypto_hmac(struct ll_crypto_hash *tfm,
- u8 *key, unsigned int *keylen,
- struct scatterlist *sg,
- unsigned int size, u8 *result)
-{
- struct hash_desc desc;
- int rv;
- desc.tfm = tfm;
- desc.flags = 0;
- rv = crypto_hash_setkey(desc.tfm, key, *keylen);
- if (rv) {
- CERROR("failed to hash setkey: %d\n", rv);
- return rv;
- }
- return crypto_hash_digest(&desc, sg, size, result);
-}
-static inline
-unsigned int ll_crypto_tfm_alg_max_keysize(struct crypto_blkcipher *tfm)
-{
- return crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher.max_keysize;
-}
-static inline
-unsigned int ll_crypto_tfm_alg_min_keysize(struct crypto_blkcipher *tfm)
-{
- return crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher.min_keysize;
-}
-
-#define ll_crypto_hash_blocksize(tfm) crypto_hash_blocksize(tfm)
-#define ll_crypto_hash_digestsize(tfm) crypto_hash_digestsize(tfm)
-#define ll_crypto_blkcipher_ivsize(tfm) crypto_blkcipher_ivsize(tfm)
-#define ll_crypto_blkcipher_blocksize(tfm) crypto_blkcipher_blocksize(tfm)
-#define ll_crypto_free_hash(tfm) crypto_free_hash(tfm)
-#define ll_crypto_free_blkcipher(tfm) crypto_free_blkcipher(tfm)
-
#define ll_vfs_rmdir(dir,entry,mnt) vfs_rmdir(dir,entry)
#define ll_vfs_mkdir(inode,dir,mnt,mode) vfs_mkdir(inode,dir,mode)
#define ll_vfs_link(old,mnt,dir,new,mnt1) vfs_link(old,dir,new)
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/crypto.h>
#include <obd_class.h>
#include <lustre_debug.h>
EXPORT_SYMBOL(capa_lock);
EXPORT_SYMBOL(capa_count);
+static inline
+unsigned int ll_crypto_tfm_alg_min_keysize(struct crypto_blkcipher *tfm)
+{
+ return crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher.min_keysize;
+}
+
struct hlist_head *init_capa_hash(void)
{
struct hlist_head *hash;
}
EXPORT_SYMBOL(capa_lookup);
+static inline int ll_crypto_hmac(struct crypto_hash *tfm,
+ u8 *key, unsigned int *keylen,
+ struct scatterlist *sg,
+ unsigned int size, u8 *result)
+{
+ struct hash_desc desc;
+ int rv;
+ desc.tfm = tfm;
+ desc.flags = 0;
+ rv = crypto_hash_setkey(desc.tfm, key, *keylen);
+ if (rv) {
+ CERROR("failed to hash setkey: %d\n", rv);
+ return rv;
+ }
+ return crypto_hash_digest(&desc, sg, size, result);
+}
+
int capa_hmac(__u8 *hmac, struct lustre_capa *capa, __u8 *key)
{
- struct ll_crypto_hash *tfm;
+ struct crypto_hash *tfm;
struct capa_hmac_alg *alg;
int keylen;
struct scatterlist sl;
alg = &capa_hmac_algs[capa_alg(capa)];
- tfm = ll_crypto_alloc_hash(alg->ha_name, 0, 0);
+ tfm = crypto_alloc_hash(alg->ha_name, 0, 0);
if (!tfm) {
CERROR("crypto_alloc_tfm failed, check whether your kernel"
"has crypto support!\n");
(unsigned long)(capa) % PAGE_CACHE_SIZE);
ll_crypto_hmac(tfm, key, &keylen, &sl, sl.length, hmac);
- ll_crypto_free_hash(tfm);
+ crypto_free_hash(tfm);
return 0;
}
int capa_encrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen)
{
- struct ll_crypto_cipher *tfm;
+ struct crypto_blkcipher *tfm;
struct scatterlist sd;
struct scatterlist ss;
struct blkcipher_desc desc;
GOTO(out, rc = -EINVAL);
}
- rc = ll_crypto_blkcipher_setkey(tfm, key, min);
+ rc = crypto_blkcipher_setkey(tfm, key, min);
if (rc) {
CERROR("failed to setting key for aes\n");
GOTO(out, rc);
desc.tfm = tfm;
desc.info = NULL;
desc.flags = 0;
- rc = ll_crypto_blkcipher_encrypt(&desc, &sd, &ss, 16);
+ rc = crypto_blkcipher_encrypt(&desc, &sd, &ss, 16);
if (rc) {
CERROR("failed to encrypt for aes\n");
GOTO(out, rc);
}
out:
- ll_crypto_free_blkcipher(tfm);
+ crypto_free_blkcipher(tfm);
return rc;
}
EXPORT_SYMBOL(capa_encrypt_id);
int capa_decrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen)
{
- struct ll_crypto_cipher *tfm;
+ struct crypto_blkcipher *tfm;
struct scatterlist sd;
struct scatterlist ss;
struct blkcipher_desc desc;
GOTO(out, rc = -EINVAL);
}
- rc = ll_crypto_blkcipher_setkey(tfm, key, min);
+ rc = crypto_blkcipher_setkey(tfm, key, min);
if (rc) {
CERROR("failed to setting key for aes\n");
GOTO(out, rc);
desc.tfm = tfm;
desc.info = NULL;
desc.flags = 0;
- rc = ll_crypto_blkcipher_decrypt(&desc, &sd, &ss, 16);
+ rc = crypto_blkcipher_decrypt(&desc, &sd, &ss, 16);
if (rc) {
CERROR("failed to decrypt for aes\n");
GOTO(out, rc);
}
out:
- ll_crypto_free_blkcipher(tfm);
+ crypto_free_blkcipher(tfm);
return rc;
}
EXPORT_SYMBOL(capa_decrypt_id);
#include <linux/slab.h>
#include <linux/crypto.h>
#include <linux/mutex.h>
+#include <linux/crypto.h>
#include <obd.h>
#include <obd_class.h>
return -1;
}
- if (ll_crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) {
+ if (crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) {
CERROR("failed to set %s key, len %d\n",
alg_name, kb->kb_key.len);
return -1;
{
rawobj_free(&kb->kb_key);
if (kb->kb_tfm)
- ll_crypto_free_blkcipher(kb->kb_tfm);
+ crypto_free_blkcipher(kb->kb_tfm);
}
static
}
static
-__u32 krb5_encrypt(struct ll_crypto_cipher *tfm,
+__u32 krb5_encrypt(struct crypto_blkcipher *tfm,
int decrypt,
void * iv,
void * in,
desc.info = local_iv;
desc.flags= 0;
- if (length % ll_crypto_blkcipher_blocksize(tfm) != 0) {
+ if (length % crypto_blkcipher_blocksize(tfm) != 0) {
CERROR("output length %d mismatch blocksize %d\n",
- length, ll_crypto_blkcipher_blocksize(tfm));
+ length, crypto_blkcipher_blocksize(tfm));
goto out;
}
- if (ll_crypto_blkcipher_ivsize(tfm) > 16) {
- CERROR("iv size too large %d\n", ll_crypto_blkcipher_ivsize(tfm));
+ if (crypto_blkcipher_ivsize(tfm) > 16) {
+ CERROR("iv size too large %d\n", crypto_blkcipher_ivsize(tfm));
goto out;
}
if (iv)
- memcpy(local_iv, iv, ll_crypto_blkcipher_ivsize(tfm));
+ memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));
memcpy(out, in, length);
buf_to_sg(&sg, out, length);
if (decrypt)
- ret = ll_crypto_blkcipher_decrypt_iv(&desc, &sg, &sg, length);
+ ret = crypto_blkcipher_decrypt_iv(&desc, &sg, &sg, length);
else
- ret = ll_crypto_blkcipher_encrypt_iv(&desc, &sg, &sg, length);
+ ret = crypto_blkcipher_encrypt_iv(&desc, &sg, &sg, length);
out:
return(ret);
static inline
-int krb5_digest_hmac(struct ll_crypto_hash *tfm,
+int krb5_digest_hmac(struct crypto_hash *tfm,
rawobj_t *key,
struct krb5_header *khdr,
int msgcnt, rawobj_t *msgs,
struct scatterlist sg[1];
int i;
- ll_crypto_hash_setkey(tfm, key->data, key->len);
+ crypto_hash_setkey(tfm, key->data, key->len);
desc.tfm = tfm;
desc.flags= 0;
- ll_crypto_hash_init(&desc);
+ crypto_hash_init(&desc);
for (i = 0; i < msgcnt; i++) {
if (msgs[i].len == 0)
continue;
buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
- ll_crypto_hash_update(&desc, sg, msgs[i].len);
+ crypto_hash_update(&desc, sg, msgs[i].len);
}
for (i = 0; i < iovcnt; i++) {
sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
iovs[i].kiov_offset);
- ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
+ crypto_hash_update(&desc, sg, iovs[i].kiov_len);
}
if (khdr) {
buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
- ll_crypto_hash_update(&desc, sg, sizeof(*khdr));
+ crypto_hash_update(&desc, sg, sizeof(*khdr));
}
- return ll_crypto_hash_final(&desc, cksum->data);
+ return crypto_hash_final(&desc, cksum->data);
}
static inline
-int krb5_digest_norm(struct ll_crypto_hash *tfm,
+int krb5_digest_norm(struct crypto_hash *tfm,
struct krb5_keyblock *kb,
struct krb5_header *khdr,
int msgcnt, rawobj_t *msgs,
desc.tfm = tfm;
desc.flags= 0;
- ll_crypto_hash_init(&desc);
+ crypto_hash_init(&desc);
for (i = 0; i < msgcnt; i++) {
if (msgs[i].len == 0)
continue;
buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
- ll_crypto_hash_update(&desc, sg, msgs[i].len);
+ crypto_hash_update(&desc, sg, msgs[i].len);
}
for (i = 0; i < iovcnt; i++) {
sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
iovs[i].kiov_offset);
- ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
+ crypto_hash_update(&desc, sg, iovs[i].kiov_len);
}
if (khdr) {
buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
- ll_crypto_hash_update(&desc, sg, sizeof(*khdr));
+ crypto_hash_update(&desc, sg, sizeof(*khdr));
}
- ll_crypto_hash_final(&desc, cksum->data);
+ crypto_hash_final(&desc, cksum->data);
return krb5_encrypt(kb->kb_tfm, 0, NULL, cksum->data,
cksum->data, cksum->len);
rawobj_t *cksum)
{
struct krb5_enctype *ke = &enctypes[enctype];
- struct ll_crypto_hash *tfm;
+ struct crypto_hash *tfm;
__u32 code = GSS_S_FAILURE;
int rc;
return GSS_S_FAILURE;
}
- cksum->len = ll_crypto_hash_digestsize(tfm);
+ cksum->len = crypto_hash_digestsize(tfm);
OBD_ALLOC_LARGE(cksum->data, cksum->len);
if (!cksum->data) {
cksum->len = 0;
if (rc == 0)
code = GSS_S_COMPLETE;
out_tfm:
- ll_crypto_free_hash(tfm);
+ crypto_free_hash(tfm);
return code;
}
}
static
-int krb5_encrypt_rawobjs(struct ll_crypto_cipher *tfm,
+int krb5_encrypt_rawobjs(struct crypto_blkcipher *tfm,
int mode_ecb,
int inobj_cnt,
rawobj_t *inobjs,
if (mode_ecb) {
if (enc)
- rc = ll_crypto_blkcipher_encrypt(
+ rc = crypto_blkcipher_encrypt(
&desc, &dst, &src, src.length);
else
- rc = ll_crypto_blkcipher_decrypt(
+ rc = crypto_blkcipher_decrypt(
&desc, &dst, &src, src.length);
} else {
if (enc)
- rc = ll_crypto_blkcipher_encrypt_iv(
+ rc = crypto_blkcipher_encrypt_iv(
&desc, &dst, &src, src.length);
else
- rc = ll_crypto_blkcipher_decrypt_iv(
+ rc = crypto_blkcipher_decrypt_iv(
&desc, &dst, &src, src.length);
}
* if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size.
*/
static
-int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
+int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
struct krb5_header *khdr,
char *confounder,
struct ptlrpc_bulk_desc *desc,
LASSERT(desc->bd_iov_count);
LASSERT(desc->bd_enc_iov);
- blocksize = ll_crypto_blkcipher_blocksize(tfm);
+ blocksize = crypto_blkcipher_blocksize(tfm);
LASSERT(blocksize > 1);
LASSERT(cipher->len == blocksize + sizeof(*khdr));
buf_to_sg(&src, confounder, blocksize);
buf_to_sg(&dst, cipher->data, blocksize);
- rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, blocksize);
+ rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, blocksize);
if (rc) {
CERROR("error to encrypt confounder: %d\n", rc);
return rc;
desc->bd_enc_iov[i].kiov_offset = dst.offset;
desc->bd_enc_iov[i].kiov_len = dst.length;
- rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
+ rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
src.length);
if (rc) {
CERROR("error to encrypt page: %d\n", rc);
buf_to_sg(&src, khdr, sizeof(*khdr));
buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
- rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc,
+ rc = crypto_blkcipher_encrypt_iv(&ciph_desc,
&dst, &src, sizeof(*khdr));
if (rc) {
CERROR("error to encrypt krb5 header: %d\n", rc);
* should have been done by prep_bulk().
*/
static
-int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm,
+int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
struct krb5_header *khdr,
struct ptlrpc_bulk_desc *desc,
rawobj_t *cipher,
LASSERT(desc->bd_enc_iov);
LASSERT(desc->bd_nob_transferred);
- blocksize = ll_crypto_blkcipher_blocksize(tfm);
+ blocksize = crypto_blkcipher_blocksize(tfm);
LASSERT(blocksize > 1);
LASSERT(cipher->len == blocksize + sizeof(*khdr));
buf_to_sg(&src, cipher->data, blocksize);
buf_to_sg(&dst, plain->data, blocksize);
- rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, blocksize);
+ rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, blocksize);
if (rc) {
CERROR("error to decrypt confounder: %d\n", rc);
return rc;
if (desc->bd_iov[i].kiov_len % blocksize == 0)
sg_assign_page(&dst, desc->bd_iov[i].kiov_page);
- rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
+ rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
src.length);
if (rc) {
CERROR("error to decrypt page: %d\n", rc);
buf_to_sg(&src, cipher->data + blocksize, sizeof(*khdr));
buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
- rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc,
+ rc = crypto_blkcipher_decrypt_iv(&ciph_desc,
&dst, &src, sizeof(*khdr));
if (rc) {
CERROR("error to decrypt tail: %d\n", rc);
LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
LASSERT(kctx->kc_keye.kb_tfm == NULL ||
ke->ke_conf_size >=
- ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
+ crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
/*
* final token format:
blocksize = 1;
} else {
LASSERT(kctx->kc_keye.kb_tfm);
- blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+ blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
}
LASSERT(blocksize <= ke->ke_conf_size);
if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
rawobj_t arc4_keye;
- struct ll_crypto_cipher *arc4_tfm;
+ struct crypto_blkcipher *arc4_tfm;
if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
GOTO(arc4_out_key, rc = -EACCES);
}
- if (ll_crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
+ if (crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
arc4_keye.len)) {
CERROR("failed to set arc4 key, len %d\n",
arc4_keye.len);
rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
3, data_desc, &cipher, 1);
arc4_out_tfm:
- ll_crypto_free_blkcipher(arc4_tfm);
+ crypto_free_blkcipher(arc4_tfm);
arc4_out_key:
rawobj_free(&arc4_keye);
arc4_out:
LASSERT(desc->bd_enc_iov);
LASSERT(kctx->kc_keye.kb_tfm);
- blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+ blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
for (i = 0; i < desc->bd_iov_count; i++) {
LASSERT(desc->bd_enc_iov[i].kiov_page);
blocksize = 1;
} else {
LASSERT(kctx->kc_keye.kb_tfm);
- blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+ blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
}
/*
blocksize = 1;
} else {
LASSERT(kctx->kc_keye.kb_tfm);
- blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+ blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
}
/* expected token layout:
if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
rawobj_t arc4_keye;
- struct ll_crypto_cipher *arc4_tfm;
+ struct crypto_blkcipher *arc4_tfm;
cksum.data = token->data + token->len - ke->ke_hash_size;
cksum.len = ke->ke_hash_size;
GOTO(arc4_out_key, rc = -EACCES);
}
- if (ll_crypto_blkcipher_setkey(arc4_tfm,
+ if (crypto_blkcipher_setkey(arc4_tfm,
arc4_keye.data, arc4_keye.len)) {
CERROR("failed to set arc4 key, len %d\n",
arc4_keye.len);
rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
1, &cipher_in, &plain_out, 0);
arc4_out_tfm:
- ll_crypto_free_blkcipher(arc4_tfm);
+ crypto_free_blkcipher(arc4_tfm);
arc4_out_key:
rawobj_free(&arc4_keye);
arc4_out:
LBUG();
} else {
LASSERT(kctx->kc_keye.kb_tfm);
- blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+ blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
}
LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);