summaryrefslogtreecommitdiffstats
path: root/bsp/meta-freescale/recipes-connectivity/openssl/openssl-qoriq/0002-eng_devcrypto-add-support-for-TLS1.2-algorithms-offl.patch
diff options
context:
space:
mode:
Diffstat (limited to 'bsp/meta-freescale/recipes-connectivity/openssl/openssl-qoriq/0002-eng_devcrypto-add-support-for-TLS1.2-algorithms-offl.patch')
-rw-r--r--bsp/meta-freescale/recipes-connectivity/openssl/openssl-qoriq/0002-eng_devcrypto-add-support-for-TLS1.2-algorithms-offl.patch285
1 files changed, 285 insertions, 0 deletions
diff --git a/bsp/meta-freescale/recipes-connectivity/openssl/openssl-qoriq/0002-eng_devcrypto-add-support-for-TLS1.2-algorithms-offl.patch b/bsp/meta-freescale/recipes-connectivity/openssl/openssl-qoriq/0002-eng_devcrypto-add-support-for-TLS1.2-algorithms-offl.patch
new file mode 100644
index 00000000..b12af56e
--- /dev/null
+++ b/bsp/meta-freescale/recipes-connectivity/openssl/openssl-qoriq/0002-eng_devcrypto-add-support-for-TLS1.2-algorithms-offl.patch
@@ -0,0 +1,285 @@
+From db9d8be9d0d81bdb2ddb78f8616243593a3d24c5 Mon Sep 17 00:00:00 2001
+From: Pankaj Gupta <pankaj.gupta@nxp.com>
+Date: Fri, 10 Jan 2020 15:38:38 +0530
+Subject: [PATCH 2/2] eng_devcrypto: add support for TLS1.2 algorithms offload
+
+ - aes-128-cbc-hmac-sha256
+ - aes-256-cbc-hmac-sha256
+
+Enabled the support of TLS1.1 algorithms offload
+
+ - aes-128-cbc-hmac-sha1
+ - aes-256-cbc-hmac-sha1
+
+Requires TLS patches on cryptodev and TLS algorithm support in Linux
+kernel driver.
+
+Fix: Remove the support for TLS1.0.
+
+Signed-off-by: Pankaj Gupta <pankaj.gupta@nxp.com>
+Signed-off-by: Arun Pathak <arun.pathak@nxp.com>
+---
+ crypto/engine/eng_devcrypto.c | 133 +++++++++++++++++++++++-----------
+ 1 file changed, 90 insertions(+), 43 deletions(-)
+
+diff --git a/crypto/engine/eng_devcrypto.c b/crypto/engine/eng_devcrypto.c
+index 727a660e75..be63f65e04 100644
+--- a/crypto/engine/eng_devcrypto.c
++++ b/crypto/engine/eng_devcrypto.c
+@@ -25,6 +25,7 @@
+ #include "crypto/engine.h"
+
+ /* #define ENGINE_DEVCRYPTO_DEBUG */
++#define TLS1_1_VERSION 0x0302
+
+ #if CRYPTO_ALGORITHM_MIN < CRYPTO_ALGORITHM_MAX
+ # define CHECK_BSD_STYLE_MACROS
+@@ -67,6 +68,7 @@ struct cipher_ctx {
+ /* to handle ctr mode being a stream cipher */
+ unsigned char partial[EVP_MAX_BLOCK_LENGTH];
+ unsigned int blocksize, num;
++ unsigned int tls_ver;
+ };
+
+ static const struct cipher_data_st {
+@@ -92,11 +94,17 @@ static const struct cipher_data_st {
+ { NID_aes_192_cbc, 16, 192 / 8, 16, EVP_CIPH_CBC_MODE, CRYPTO_AES_CBC, 0 },
+ { NID_aes_256_cbc, 16, 256 / 8, 16, EVP_CIPH_CBC_MODE, CRYPTO_AES_CBC, 0 },
+ { NID_aes_128_cbc_hmac_sha1, 16, 16, 16,
+- EVP_CIPH_CBC_MODE | EVP_CIPH_FLAG_AEAD_CIPHER,
+- CRYPTO_TLS10_AES_CBC_HMAC_SHA1, 20 },
++ EVP_CIPH_CBC_MODE | EVP_CIPH_FLAG_AEAD_CIPHER,
++ CRYPTO_TLS11_AES_CBC_HMAC_SHA1, 20 },
+ { NID_aes_256_cbc_hmac_sha1, 16, 32, 16,
+- EVP_CIPH_CBC_MODE | EVP_CIPH_FLAG_AEAD_CIPHER,
+- CRYPTO_TLS10_AES_CBC_HMAC_SHA1, 20 },
++ EVP_CIPH_CBC_MODE | EVP_CIPH_FLAG_AEAD_CIPHER,
++ CRYPTO_TLS11_AES_CBC_HMAC_SHA1, 20 },
++ { NID_aes_128_cbc_hmac_sha256, 16, 16, 16,
++ EVP_CIPH_CBC_MODE | EVP_CIPH_FLAG_AEAD_CIPHER,
++ CRYPTO_TLS12_AES_CBC_HMAC_SHA256, 32 },
++ { NID_aes_256_cbc_hmac_sha256, 16, 32, 16,
++ EVP_CIPH_CBC_MODE | EVP_CIPH_FLAG_AEAD_CIPHER,
++ CRYPTO_TLS12_AES_CBC_HMAC_SHA256, 32 },
+ #ifndef OPENSSL_NO_RC4
+ { NID_rc4, 1, 16, 0, EVP_CIPH_STREAM_CIPHER, CRYPTO_ARC4, 0 },
+ #endif
+@@ -107,9 +115,9 @@ static const struct cipher_data_st {
+ #endif
+ #if 0 /* Not yet supported */
+ { NID_aes_128_xts, 16, 128 / 8 * 2, 16, EVP_CIPH_XTS_MODE, CRYPTO_AES_XTS,
+- 0 },
++ 0 },
+ { NID_aes_256_xts, 16, 256 / 8 * 2, 16, EVP_CIPH_XTS_MODE, CRYPTO_AES_XTS,
+- 0 },
++ 0 },
+ #endif
+ #if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_AES_ECB)
+ { NID_aes_128_ecb, 16, 128 / 8, 0, EVP_CIPH_ECB_MODE, CRYPTO_AES_ECB, 0 },
+@@ -166,7 +174,7 @@ static const struct cipher_data_st *get_cipher_data(int nid)
+ * with both the crypto and hmac keys.
+ */
+ static int cryptodev_init_aead_key(EVP_CIPHER_CTX *ctx,
+- const unsigned char *key, const unsigned char *iv, int enc)
++ const unsigned char *key, const unsigned char *iv, int enc)
+ {
+ struct cipher_ctx *state = EVP_CIPHER_CTX_get_cipher_data(ctx);
+ struct session_op *sess = &state->sess;
+@@ -212,10 +220,29 @@ static int cryptodev_aead_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
+
+ memset(&cryp, 0, sizeof(cryp));
+
++ if (EVP_CIPHER_CTX_iv_length(ctx) > 0) {
++ if (!EVP_CIPHER_CTX_encrypting(ctx)) {
++ iiv = in + len - EVP_CIPHER_CTX_iv_length(ctx);
++ memcpy(save_iv, iiv, EVP_CIPHER_CTX_iv_length(ctx));
++
++ if (state->tls_ver >= TLS1_1_VERSION) {
++ memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), in,
++ EVP_CIPHER_CTX_iv_length(ctx));
++ in += EVP_CIPHER_CTX_iv_length(ctx);
++ out += EVP_CIPHER_CTX_iv_length(ctx);
++ len -= EVP_CIPHER_CTX_iv_length(ctx);
++ }
++ }
++ cryp.iv = (void *) EVP_CIPHER_CTX_iv(ctx);
++ } else
++ cryp.iv = NULL;
++
+ /* TODO: make a seamless integration with cryptodev flags */
+ switch (EVP_CIPHER_CTX_nid(ctx)) {
+ case NID_aes_128_cbc_hmac_sha1:
+ case NID_aes_256_cbc_hmac_sha1:
++ case NID_aes_128_cbc_hmac_sha256:
++ case NID_aes_256_cbc_hmac_sha256:
+ cryp.flags = COP_FLAG_AEAD_TLS_TYPE;
+ }
+ cryp.ses = sess->ses;
+@@ -227,15 +254,6 @@ static int cryptodev_aead_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
+
+ cryp.op = EVP_CIPHER_CTX_encrypting(ctx) ? COP_ENCRYPT : COP_DECRYPT;
+
+- if (EVP_CIPHER_CTX_iv_length(ctx) > 0) {
+- cryp.iv = (void *) EVP_CIPHER_CTX_iv(ctx);
+- if (!EVP_CIPHER_CTX_encrypting(ctx)) {
+- iiv = in + len - EVP_CIPHER_CTX_iv_length(ctx);
+- memcpy(save_iv, iiv, EVP_CIPHER_CTX_iv_length(ctx));
+- }
+- } else
+- cryp.iv = NULL;
+-
+ if (ioctl(cfd, CIOCAUTHCRYPT, &cryp) == -1) {
+ /*
+ * XXX need better errror handling this can fail for a number of
+@@ -262,7 +280,7 @@ static int cryptodev_cbc_hmac_sha1_ctrl(EVP_CIPHER_CTX *ctx, int type,
+ switch (type) {
+ case EVP_CTRL_AEAD_SET_MAC_KEY:
+ {
+- /* TODO: what happens with hmac keys larger than 64 bytes? */
++ /* TODO: what happens with hmac keys larger than 64 bytes? */
+ struct cipher_ctx *state =
+ EVP_CIPHER_CTX_get_cipher_data(ctx);
+ struct session_op *sess = &state->sess;
+@@ -282,27 +300,52 @@ static int cryptodev_cbc_hmac_sha1_ctrl(EVP_CIPHER_CTX *ctx, int type,
+ EVP_CIPHER_CTX_get_cipher_data(ctx);
+ unsigned char *p = ptr;
+ unsigned int cryptlen = p[arg - 2] << 8 | p[arg - 1];
+- unsigned int maclen, padlen;
+- unsigned int bs = EVP_CIPHER_CTX_block_size(ctx);
++ unsigned int maclen;
++ unsigned int blocksize = EVP_CIPHER_CTX_block_size(ctx);
++ int ret;
+
++ state->tls_ver = p[arg - 4] << 8 | p[arg - 3];
+ state->aad = ptr;
+ state->aad_len = arg;
+- state->len = cryptlen;
+
+ /* TODO: this should be an extension of EVP_CIPHER struct */
+ switch (EVP_CIPHER_CTX_nid(ctx)) {
+ case NID_aes_128_cbc_hmac_sha1:
+ case NID_aes_256_cbc_hmac_sha1:
+ maclen = SHA_DIGEST_LENGTH;
++ break;
++ case NID_aes_128_cbc_hmac_sha256:
++ case NID_aes_256_cbc_hmac_sha256:
++ maclen = SHA256_DIGEST_LENGTH;
++ break;
++ default:
++ /*
++ * Only above 4 supported NIDs are used to enter to this
++ * function. If any other NID reaches this function,
++ * there's a grave coding error further down.
++ */
++ assert("Code that never should be reached" == NULL);
++ return -1;
+ }
+
+ /* space required for encryption (not only TLS padding) */
+- padlen = maclen;
+ if (EVP_CIPHER_CTX_encrypting(ctx)) {
+- cryptlen += maclen;
+- padlen += bs - (cryptlen % bs);
++ if (state->tls_ver >= TLS1_1_VERSION) {
++ p[arg - 2] = (cryptlen - blocksize) >> 8;
++ p[arg - 1] = (cryptlen - blocksize);
++ }
++ ret = (int)(((cryptlen + maclen +
++ blocksize) & -blocksize) - cryptlen);
++ } else {
++ if (state->tls_ver >= TLS1_1_VERSION) {
++ cryptlen -= blocksize;
++ p[arg - 2] = cryptlen >> 8;
++ p[arg - 1] = cryptlen;
++ }
++ ret = maclen;
+ }
+- return padlen;
++ state->len = cryptlen;
++ return ret;
+ }
+ default:
+ return -1;
+@@ -510,11 +553,11 @@ static int cipher_cleanup(EVP_CIPHER_CTX *ctx)
+ static int known_cipher_nids[OSSL_NELEM(cipher_data)];
+ static int known_cipher_nids_amount = -1; /* -1 indicates not yet initialised */
+ static EVP_CIPHER *known_cipher_methods[OSSL_NELEM(cipher_data)] = { NULL, };
+-int (*init) (EVP_CIPHER_CTX *ctx, const unsigned char *key,
+- const unsigned char *iv, int enc);
+-int (*do_cipher) (EVP_CIPHER_CTX *ctx, unsigned char *out,
+- const unsigned char *in, size_t inl);
+-int (*ctrl) (EVP_CIPHER_CTX *, int type, int arg, void *ptr);
++int (*init)(EVP_CIPHER_CTX *ctx, const unsigned char *key,
++ const unsigned char *iv, int enc);
++int (*do_cipher)(EVP_CIPHER_CTX *ctx, unsigned char *out,
++ const unsigned char *in, size_t inl);
++int (*ctrl)(EVP_CIPHER_CTX *ctx, int type, int arg, void *ptr);
+
+ static void prepare_cipher_methods(void)
+ {
+@@ -543,26 +586,28 @@ static void prepare_cipher_methods(void)
+ */
+ sess.cipher = cipher_data[i].devcryptoid;
+ sess.keylen = cipher_data[i].keylen;
+- sess.mackeylen = cipher_data[i].mackeylen;
++ sess.mackeylen = cipher_data[i].mackeylen;
+
+ cipher_mode = cipher_data[i].flags & EVP_CIPH_MODE;
+
+- do_cipher = (cipher_mode == EVP_CIPH_CTR_MODE ?
++ do_cipher = (cipher_mode == EVP_CIPH_CTR_MODE ?
+ ctr_do_cipher :
+ cipher_do_cipher);
+- if (cipher_data[i].nid == NID_aes_128_cbc_hmac_sha1
+- || cipher_data[i].nid == NID_aes_256_cbc_hmac_sha1) {
+- init = cryptodev_init_aead_key;
+- do_cipher = cryptodev_aead_cipher;
+- ctrl = cryptodev_cbc_hmac_sha1_ctrl;
+- flags = cipher_data[i].flags;
+- }
++ if (cipher_data[i].nid == NID_aes_128_cbc_hmac_sha1
++ || cipher_data[i].nid == NID_aes_256_cbc_hmac_sha1
++ || cipher_data[i].nid == NID_aes_128_cbc_hmac_sha256
++ || cipher_data[i].nid == NID_aes_256_cbc_hmac_sha256) {
++ init = cryptodev_init_aead_key;
++ do_cipher = cryptodev_aead_cipher;
++ ctrl = cryptodev_cbc_hmac_sha1_ctrl;
++ flags = cipher_data[i].flags;
++ }
+
+ if (ioctl(cfd, CIOCGSESSION, &sess) < 0
+ || ioctl(cfd, CIOCFSESSION, &sess.ses) < 0)
+ continue;
+
+- if ((known_cipher_methods[i] =
++ if ((known_cipher_methods[i] =
+ EVP_CIPHER_meth_new(cipher_data[i].nid,
+ cipher_mode == EVP_CIPH_CTR_MODE ? 1 :
+ cipher_data[i].blocksize,
+@@ -574,7 +619,7 @@ static void prepare_cipher_methods(void)
+ || !EVP_CIPHER_meth_set_init(known_cipher_methods[i], init)
+ || !EVP_CIPHER_meth_set_do_cipher(known_cipher_methods[i],
+ do_cipher)
+- /* AEAD Support to be added. */
++ /* AEAD Support to be added. */
+ || !EVP_CIPHER_meth_set_ctrl(known_cipher_methods[i], ctrl)
+ || !EVP_CIPHER_meth_set_cleanup(known_cipher_methods[i],
+ cipher_cleanup)
+@@ -587,9 +632,11 @@ static void prepare_cipher_methods(void)
+ cipher_data[i].nid;
+ }
+
+- if (cipher_data[i].nid == NID_aes_128_cbc_hmac_sha1
+- || cipher_data[i].nid == NID_aes_256_cbc_hmac_sha1)
+- EVP_add_cipher(known_cipher_methods[i]);
++ if (cipher_data[i].nid == NID_aes_128_cbc_hmac_sha1
++ || cipher_data[i].nid == NID_aes_256_cbc_hmac_sha1
++ || cipher_data[i].nid == NID_aes_128_cbc_hmac_sha256
++ || cipher_data[i].nid == NID_aes_256_cbc_hmac_sha256)
++ EVP_add_cipher(known_cipher_methods[i]);
+ }
+ }
+
+--
+2.17.1
+