#define FS_HAS_FIEMAP (0)
#endif
-
-/* add a lustre compatible layer for crypto API */
-#include <linux/crypto.h>
-
-static inline
-struct crypto_blkcipher *ll_crypto_alloc_blkcipher(const char *name,
- u32 type, u32 mask)
-{
- struct crypto_blkcipher *rtn = crypto_alloc_blkcipher(name, type, mask);
-
- return (rtn == NULL ? ERR_PTR(-ENOMEM) : rtn);
-}
-
#define ll_vfs_rmdir(dir,entry,mnt) vfs_rmdir(dir,entry)
#define ll_vfs_mkdir(inode,dir,mnt,mode) vfs_mkdir(inode,dir,mode)
#define ll_vfs_link(old,mnt,dir,new,mnt1) vfs_link(old,dir,new)
/* passing "aes" in a variable instead of a constant string keeps gcc
* 4.3.2 happy */
- tfm = ll_crypto_alloc_blkcipher(alg, 0, 0 );
+ tfm = crypto_alloc_blkcipher(alg, 0, 0 );
if (IS_ERR(tfm)) {
CERROR("failed to load transform for aes\n");
return PTR_ERR(tfm);
/* passing "aes" in a variable instead of a constant string keeps gcc
* 4.3.2 happy */
- tfm = ll_crypto_alloc_blkcipher(alg, 0, 0 );
+ tfm = crypto_alloc_blkcipher(alg, 0, 0 );
if (IS_ERR(tfm)) {
CERROR("failed to load transform for aes\n");
return PTR_ERR(tfm);
static
int keyblock_init(struct krb5_keyblock *kb, char *alg_name, int alg_mode)
{
- kb->kb_tfm = ll_crypto_alloc_blkcipher(alg_name, alg_mode, 0);
+ kb->kb_tfm = crypto_alloc_blkcipher(alg_name, alg_mode, 0);
if (IS_ERR(kb->kb_tfm)) {
CERROR("failed to alloc tfm: %s, mode %d\n",
alg_name, alg_mode);
GOTO(arc4_out, rc = -EACCES);
}
- arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
+ arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
if (IS_ERR(arc4_tfm)) {
CERROR("failed to alloc tfm arc4 in ECB mode\n");
GOTO(arc4_out_key, rc = -EACCES);
GOTO(arc4_out, rc = -EACCES);
}
- arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
+ arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
if (IS_ERR(arc4_tfm)) {
CERROR("failed to alloc tfm arc4 in ECB mode\n");
GOTO(arc4_out_key, rc = -EACCES);