asinstr += $(call as-instr,crc32l %eax$(comma)%eax,-DCONFIG_AS_CRC32=1)
avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1)
avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1)
+ sha1_ni_instr :=$(call as-instr,sha1msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA1_NI=1)
+ sha256_ni_instr :=$(call as-instr,sha256msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA256_NI=1)
- KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr)
- KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr)
-KBUILD_AFLAGS += $(asinstr) $(avx_instr) $(avx2_instr) $(sha1_ni_instr) $(sha256_ni_instr)
-KBUILD_CFLAGS += $(asinstr) $(avx_instr) $(avx2_instr) $(sha1_ni_instr) $(sha256_ni_instr)
++KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(sha1_ni_instr) $(sha256_ni_instr)
++KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(sha1_ni_instr) $(sha256_ni_instr)
LDFLAGS := -m elf_$(UTS_MACHINE)
}
};
+ static int register_sha1_ssse3(void)
+ {
+ if (boot_cpu_has(X86_FEATURE_SSSE3))
+ return crypto_register_shash(&sha1_ssse3_alg);
+ return 0;
+ }
+
+ static void unregister_sha1_ssse3(void)
+ {
+ if (boot_cpu_has(X86_FEATURE_SSSE3))
+ crypto_unregister_shash(&sha1_ssse3_alg);
+ }
+
#ifdef CONFIG_AS_AVX
- static bool __init avx_usable(void)
+ asmlinkage void sha1_transform_avx(u32 *digest, const char *data,
+ unsigned int rounds);
+
+ static int sha1_avx_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+ {
+ return sha1_update(desc, data, len,
+ (sha1_transform_fn *) sha1_transform_avx);
+ }
+
+ static int sha1_avx_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+ {
+ return sha1_finup(desc, data, len, out,
+ (sha1_transform_fn *) sha1_transform_avx);
+ }
+
+ static int sha1_avx_final(struct shash_desc *desc, u8 *out)
+ {
+ return sha1_avx_finup(desc, NULL, 0, out);
+ }
+
+ static struct shash_alg sha1_avx_alg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .init = sha1_base_init,
+ .update = sha1_avx_update,
+ .final = sha1_avx_final,
+ .finup = sha1_avx_finup,
+ .descsize = sizeof(struct sha1_state),
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-avx",
+ .cra_priority = 160,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+ };
+
+ static bool avx_usable(void)
{
- if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL)) {
+ if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
if (cpu_has_avx)
pr_info("AVX detected but unusable.\n");
return false;
}
} };
+ static int register_sha256_ssse3(void)
+ {
+ if (boot_cpu_has(X86_FEATURE_SSSE3))
+ return crypto_register_shashes(sha256_ssse3_algs,
+ ARRAY_SIZE(sha256_ssse3_algs));
+ return 0;
+ }
+
+ static void unregister_sha256_ssse3(void)
+ {
+ if (boot_cpu_has(X86_FEATURE_SSSE3))
+ crypto_unregister_shashes(sha256_ssse3_algs,
+ ARRAY_SIZE(sha256_ssse3_algs));
+ }
+
#ifdef CONFIG_AS_AVX
- static bool __init avx_usable(void)
+ asmlinkage void sha256_transform_avx(u32 *digest, const char *data,
+ u64 rounds);
+
+ static int sha256_avx_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+ {
+ return sha256_update(desc, data, len, sha256_transform_avx);
+ }
+
+ static int sha256_avx_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+ {
+ return sha256_finup(desc, data, len, out, sha256_transform_avx);
+ }
+
+ static int sha256_avx_final(struct shash_desc *desc, u8 *out)
+ {
+ return sha256_avx_finup(desc, NULL, 0, out);
+ }
+
+ static struct shash_alg sha256_avx_algs[] = { {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .init = sha256_base_init,
+ .update = sha256_avx_update,
+ .final = sha256_avx_final,
+ .finup = sha256_avx_finup,
+ .descsize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-avx",
+ .cra_priority = 160,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+ }, {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .init = sha224_base_init,
+ .update = sha256_avx_update,
+ .final = sha256_avx_final,
+ .finup = sha256_avx_finup,
+ .descsize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "sha224-avx",
+ .cra_priority = 160,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+ } };
+
+ static bool avx_usable(void)
{
- if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL)) {
+ if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
if (cpu_has_avx)
pr_info("AVX detected but unusable.\n");
return false;
}
} };
+ static int register_sha512_ssse3(void)
+ {
+ if (boot_cpu_has(X86_FEATURE_SSSE3))
+ return crypto_register_shashes(sha512_ssse3_algs,
+ ARRAY_SIZE(sha512_ssse3_algs));
+ return 0;
+ }
+
+ static void unregister_sha512_ssse3(void)
+ {
+ if (boot_cpu_has(X86_FEATURE_SSSE3))
+ crypto_unregister_shashes(sha512_ssse3_algs,
+ ARRAY_SIZE(sha512_ssse3_algs));
+ }
+
#ifdef CONFIG_AS_AVX
- static bool __init avx_usable(void)
+ asmlinkage void sha512_transform_avx(u64 *digest, const char *data,
+ u64 rounds);
+ static bool avx_usable(void)
{
- if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL)) {
+ if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
if (cpu_has_avx)
pr_info("AVX detected but unusable.\n");
return false;
return engine->int_mask;
}
+ static inline bool mv_cesa_mac_op_is_first_frag(const struct mv_cesa_op_ctx *op)
+ {
+ return (mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK) ==
+ CESA_SA_DESC_CFG_FIRST_FRAG;
+ }
+
int mv_cesa_queue_req(struct crypto_async_request *req);
+/*
+ * Helper function that indicates whether a crypto request needs to be
+ * cleaned up or not after being enqueued using mv_cesa_queue_req().
+ */
+static inline int mv_cesa_req_needs_cleanup(struct crypto_async_request *req,
+ int ret)
+{
+ /*
+ * The queue still had some space, the request was queued
+ * normally, so there's no need to clean it up.
+ */
+ if (ret == -EINPROGRESS)
+ return false;
+
+ /*
+ * The queue had not space left, but since the request is
+ * flagged with CRYPTO_TFM_REQ_MAY_BACKLOG, it was added to
+ * the backlog and will be processed later. There's no need to
+ * clean it up.
+ */
+ if (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
+ return false;
+
+ /* Request wasn't queued, we need to clean it up */
+ return true;
+}
+
/* TDMA functions */
static inline void mv_cesa_req_dma_iter_init(struct mv_cesa_dma_iter *iter,