aboutsummaryrefslogtreecommitdiffstats
path: root/roms/u-boot/drivers/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'roms/u-boot/drivers/crypto')
-rw-r--r--roms/u-boot/drivers/crypto/Kconfig5
-rw-r--r--roms/u-boot/drivers/crypto/Makefile8
-rw-r--r--roms/u-boot/drivers/crypto/ace_sha.c185
-rw-r--r--roms/u-boot/drivers/crypto/ace_sha.h313
-rw-r--r--roms/u-boot/drivers/crypto/fsl/Kconfig67
-rw-r--r--roms/u-boot/drivers/crypto/fsl/Makefile10
-rw-r--r--roms/u-boot/drivers/crypto/fsl/desc.h750
-rw-r--r--roms/u-boot/drivers/crypto/fsl/desc_constr.h330
-rw-r--r--roms/u-boot/drivers/crypto/fsl/error.c258
-rw-r--r--roms/u-boot/drivers/crypto/fsl/fsl_blob.c203
-rw-r--r--roms/u-boot/drivers/crypto/fsl/fsl_hash.c233
-rw-r--r--roms/u-boot/drivers/crypto/fsl/fsl_hash.h33
-rw-r--r--roms/u-boot/drivers/crypto/fsl/fsl_mfgprot.c160
-rw-r--r--roms/u-boot/drivers/crypto/fsl/fsl_rsa.c60
-rw-r--r--roms/u-boot/drivers/crypto/fsl/jobdesc.c336
-rw-r--r--roms/u-boot/drivers/crypto/fsl/jobdesc.h52
-rw-r--r--roms/u-boot/drivers/crypto/fsl/jr.c745
-rw-r--r--roms/u-boot/drivers/crypto/fsl/jr.h108
-rw-r--r--roms/u-boot/drivers/crypto/fsl/rng.c88
-rw-r--r--roms/u-boot/drivers/crypto/fsl/rsa_caam.h27
-rw-r--r--roms/u-boot/drivers/crypto/fsl/sec.c189
-rw-r--r--roms/u-boot/drivers/crypto/fsl/type.h16
-rw-r--r--roms/u-boot/drivers/crypto/rsa_mod_exp/Kconfig5
-rw-r--r--roms/u-boot/drivers/crypto/rsa_mod_exp/Makefile6
-rw-r--r--roms/u-boot/drivers/crypto/rsa_mod_exp/mod_exp_sw.c40
-rw-r--r--roms/u-boot/drivers/crypto/rsa_mod_exp/mod_exp_uclass.c44
26 files changed, 4271 insertions, 0 deletions
diff --git a/roms/u-boot/drivers/crypto/Kconfig b/roms/u-boot/drivers/crypto/Kconfig
new file mode 100644
index 000000000..1ea116be7
--- /dev/null
+++ b/roms/u-boot/drivers/crypto/Kconfig
@@ -0,0 +1,5 @@
+menu "Hardware crypto devices"
+
+source drivers/crypto/fsl/Kconfig
+
+endmenu
diff --git a/roms/u-boot/drivers/crypto/Makefile b/roms/u-boot/drivers/crypto/Makefile
new file mode 100644
index 000000000..efbd1d3fc
--- /dev/null
+++ b/roms/u-boot/drivers/crypto/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Copyright (c) 2013 Samsung Electronics Co., Ltd.
+# http://www.samsung.com
+
+obj-$(CONFIG_EXYNOS_ACE_SHA) += ace_sha.o
+obj-y += rsa_mod_exp/
+obj-y += fsl/
diff --git a/roms/u-boot/drivers/crypto/ace_sha.c b/roms/u-boot/drivers/crypto/ace_sha.c
new file mode 100644
index 000000000..261d3efe8
--- /dev/null
+++ b/roms/u-boot/drivers/crypto/ace_sha.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Advanced Crypto Engine - SHA Firmware
+ * Copyright (c) 2012 Samsung Electronics
+ */
+#include <common.h>
+#include "ace_sha.h"
+#include <log.h>
+#include <rand.h>
+
+#ifdef CONFIG_SHA_HW_ACCEL
+#include <u-boot/sha256.h>
+#include <u-boot/sha1.h>
+#include <linux/errno.h>
+
+/* SHA1 value for the message of zero length */
+static const unsigned char sha1_digest_emptymsg[SHA1_SUM_LEN] = {
+ 0xDA, 0x39, 0xA3, 0xEE, 0x5E, 0x6B, 0x4B, 0x0D,
+ 0x32, 0x55, 0xBF, 0xFF, 0x95, 0x60, 0x18, 0x90,
+ 0xAF, 0xD8, 0x07, 0x09};
+
+/* SHA256 value for the message of zero length */
+static const unsigned char sha256_digest_emptymsg[SHA256_SUM_LEN] = {
+ 0xE3, 0xB0, 0xC4, 0x42, 0x98, 0xFC, 0x1C, 0x14,
+ 0x9A, 0xFB, 0xF4, 0xC8, 0x99, 0x6F, 0xB9, 0x24,
+ 0x27, 0xAE, 0x41, 0xE4, 0x64, 0x9B, 0x93, 0x4C,
+ 0xA4, 0x95, 0x99, 0x1B, 0x78, 0x52, 0xB8, 0x55};
+
+int ace_sha_hash_digest(const unsigned char *pbuf, unsigned int buf_len,
+ unsigned char *pout, unsigned int hash_type)
+{
+ unsigned int i, reg, len;
+ unsigned int *pdigest;
+ struct exynos_ace_sfr *ace_sha_reg =
+ (struct exynos_ace_sfr *)samsung_get_base_ace_sfr();
+
+ if (buf_len == 0) {
+ /* ACE H/W cannot compute hash value for empty string */
+ if (hash_type == ACE_SHA_TYPE_SHA1)
+ memcpy(pout, sha1_digest_emptymsg, SHA1_SUM_LEN);
+ else
+ memcpy(pout, sha256_digest_emptymsg, SHA256_SUM_LEN);
+ return 0;
+ }
+
+ /* Flush HRDMA */
+ writel(ACE_FC_HRDMACFLUSH_ON, &ace_sha_reg->fc_hrdmac);
+ writel(ACE_FC_HRDMACFLUSH_OFF, &ace_sha_reg->fc_hrdmac);
+
+ /* Set byte swap of data in */
+ writel(ACE_HASH_SWAPDI_ON | ACE_HASH_SWAPDO_ON | ACE_HASH_SWAPIV_ON,
+ &ace_sha_reg->hash_byteswap);
+
+ /* Select Hash input mux as external source */
+ reg = readl(&ace_sha_reg->fc_fifoctrl);
+ reg = (reg & ~ACE_FC_SELHASH_MASK) | ACE_FC_SELHASH_EXOUT;
+ writel(reg, &ace_sha_reg->fc_fifoctrl);
+
+ /* Set Hash as SHA1 or SHA256 and start Hash engine */
+ reg = (hash_type == ACE_SHA_TYPE_SHA1) ?
+ ACE_HASH_ENGSEL_SHA1HASH : ACE_HASH_ENGSEL_SHA256HASH;
+ reg |= ACE_HASH_STARTBIT_ON;
+ writel(reg, &ace_sha_reg->hash_control);
+
+ /* Enable FIFO mode */
+ writel(ACE_HASH_FIFO_ON, &ace_sha_reg->hash_fifo_mode);
+
+ /* Set message length */
+ writel(buf_len, &ace_sha_reg->hash_msgsize_low);
+ writel(0, &ace_sha_reg->hash_msgsize_high);
+
+ /* Set HRDMA */
+ writel((unsigned int)pbuf, &ace_sha_reg->fc_hrdmas);
+ writel(buf_len, &ace_sha_reg->fc_hrdmal);
+
+ while ((readl(&ace_sha_reg->hash_status) & ACE_HASH_MSGDONE_MASK) ==
+ ACE_HASH_MSGDONE_OFF) {
+ /*
+ * PRNG error bit goes HIGH if a PRNG request occurs without
+ * a complete seed setup. We are using this bit to check h/w
+ * fault because proper setup is not expected in that case.
+ */
+ if ((readl(&ace_sha_reg->hash_status)
+ & ACE_HASH_PRNGERROR_MASK) == ACE_HASH_PRNGERROR_ON)
+ return -EBUSY;
+ }
+
+ /* Clear MSG_DONE bit */
+ writel(ACE_HASH_MSGDONE_ON, &ace_sha_reg->hash_status);
+
+ /* Read hash result */
+ pdigest = (unsigned int *)pout;
+ len = (hash_type == ACE_SHA_TYPE_SHA1) ? SHA1_SUM_LEN : SHA256_SUM_LEN;
+
+ for (i = 0; i < len / 4; i++)
+ pdigest[i] = readl(&ace_sha_reg->hash_result[i]);
+
+ /* Clear HRDMA pending bit */
+ writel(ACE_FC_HRDMA, &ace_sha_reg->fc_intpend);
+
+ return 0;
+}
+
+void hw_sha256(const unsigned char *pbuf, unsigned int buf_len,
+ unsigned char *pout, unsigned int chunk_size)
+{
+ if (ace_sha_hash_digest(pbuf, buf_len, pout, ACE_SHA_TYPE_SHA256))
+ debug("ACE was not setup properly or it is faulty\n");
+}
+
+void hw_sha1(const unsigned char *pbuf, unsigned int buf_len,
+ unsigned char *pout, unsigned int chunk_size)
+{
+ if (ace_sha_hash_digest(pbuf, buf_len, pout, ACE_SHA_TYPE_SHA1))
+ debug("ACE was not setup properly or it is faulty\n");
+}
+#endif /* CONFIG_SHA_HW_ACCEL */
+
+#ifdef CONFIG_LIB_HW_RAND
+static unsigned int seed_done;
+
+void srand(unsigned int seed)
+{
+ struct exynos_ace_sfr *reg =
+ (struct exynos_ace_sfr *)samsung_get_base_ace_sfr();
+ int i, status;
+
+ /* Seed data */
+ for (i = 0; i < ACE_HASH_PRNG_REG_NUM; i++)
+ writel(seed << i, &reg->hash_seed[i]);
+
+ /* Wait for seed setup done */
+ while (1) {
+ status = readl(&reg->hash_status);
+ if ((status & ACE_HASH_SEEDSETTING_MASK) ||
+ (status & ACE_HASH_PRNGERROR_MASK))
+ break;
+ }
+
+ seed_done = 1;
+}
+
+unsigned int rand(void)
+{
+ struct exynos_ace_sfr *reg =
+ (struct exynos_ace_sfr *)samsung_get_base_ace_sfr();
+ int i, status;
+ unsigned int seed = (unsigned int)&status;
+ unsigned int ret = 0;
+
+ if (!seed_done)
+ srand(seed);
+
+ /* Start PRNG */
+ writel(ACE_HASH_ENGSEL_PRNG | ACE_HASH_STARTBIT_ON, &reg->hash_control);
+
+ /* Wait for PRNG done */
+ while (1) {
+ status = readl(&reg->hash_status);
+ if (status & ACE_HASH_PRNGDONE_MASK)
+ break;
+ if (status & ACE_HASH_PRNGERROR_MASK) {
+ seed_done = 0;
+ return 0;
+ }
+ }
+
+ /* Clear Done IRQ */
+ writel(ACE_HASH_PRNGDONE_MASK, &reg->hash_status);
+
+ /* Read a PRNG result */
+ for (i = 0; i < ACE_HASH_PRNG_REG_NUM; i++)
+ ret += readl(&reg->hash_prng[i]);
+
+ seed_done = 0;
+ return ret;
+}
+
+unsigned int rand_r(unsigned int *seedp)
+{
+ srand(*seedp);
+
+ return rand();
+}
+#endif /* CONFIG_LIB_HW_RAND */
diff --git a/roms/u-boot/drivers/crypto/ace_sha.h b/roms/u-boot/drivers/crypto/ace_sha.h
new file mode 100644
index 000000000..a671b925b
--- /dev/null
+++ b/roms/u-boot/drivers/crypto/ace_sha.h
@@ -0,0 +1,313 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Header file for Advanced Crypto Engine - SFR definitions
+ *
+ * Copyright (c) 2012 Samsung Electronics
+ */
+
+#ifndef __ACE_SHA_H
+#define __ACE_SHA_H
+
+struct exynos_ace_sfr {
+ unsigned int fc_intstat; /* base + 0 */
+ unsigned int fc_intenset;
+ unsigned int fc_intenclr;
+ unsigned int fc_intpend;
+ unsigned int fc_fifostat;
+ unsigned int fc_fifoctrl;
+ unsigned int fc_global;
+ unsigned int res1;
+ unsigned int fc_brdmas;
+ unsigned int fc_brdmal;
+ unsigned int fc_brdmac;
+ unsigned int res2;
+ unsigned int fc_btdmas;
+ unsigned int fc_btdmal;
+ unsigned int fc_btdmac;
+ unsigned int res3;
+ unsigned int fc_hrdmas;
+ unsigned int fc_hrdmal;
+ unsigned int fc_hrdmac;
+ unsigned int res4;
+ unsigned int fc_pkdmas;
+ unsigned int fc_pkdmal;
+ unsigned int fc_pkdmac;
+ unsigned int fc_pkdmao;
+ unsigned char res5[0x1a0];
+
+ unsigned int aes_control; /* base + 0x200 */
+ unsigned int aes_status;
+ unsigned char res6[0x8];
+ unsigned int aes_in[4];
+ unsigned int aes_out[4];
+ unsigned int aes_iv[4];
+ unsigned int aes_cnt[4];
+ unsigned char res7[0x30];
+ unsigned int aes_key[8];
+ unsigned char res8[0x60];
+
+ unsigned int tdes_control; /* base + 0x300 */
+ unsigned int tdes_status;
+ unsigned char res9[0x8];
+ unsigned int tdes_key[6];
+ unsigned int tdes_iv[2];
+ unsigned int tdes_in[2];
+ unsigned int tdes_out[2];
+ unsigned char res10[0xc0];
+
+ unsigned int hash_control; /* base + 0x400 */
+ unsigned int hash_control2;
+ unsigned int hash_fifo_mode;
+ unsigned int hash_byteswap;
+ unsigned int hash_status;
+ unsigned char res11[0xc];
+ unsigned int hash_msgsize_low;
+ unsigned int hash_msgsize_high;
+ unsigned int hash_prelen_low;
+ unsigned int hash_prelen_high;
+ unsigned int hash_in[16];
+ unsigned int hash_key_in[16];
+ unsigned int hash_iv[8];
+ unsigned char res12[0x30];
+ unsigned int hash_result[8];
+ unsigned char res13[0x20];
+ unsigned int hash_seed[5];
+ unsigned char res14[12];
+ unsigned int hash_prng[5];
+ unsigned char res15[0x18c];
+
+ unsigned int pka_sfr[5]; /* base + 0x700 */
+};
+
+/* ACE_FC_INT */
+#define ACE_FC_PKDMA (1 << 0)
+#define ACE_FC_HRDMA (1 << 1)
+#define ACE_FC_BTDMA (1 << 2)
+#define ACE_FC_BRDMA (1 << 3)
+#define ACE_FC_PRNG_ERROR (1 << 4)
+#define ACE_FC_MSG_DONE (1 << 5)
+#define ACE_FC_PRNG_DONE (1 << 6)
+#define ACE_FC_PARTIAL_DONE (1 << 7)
+
+/* ACE_FC_FIFOSTAT */
+#define ACE_FC_PKFIFO_EMPTY (1 << 0)
+#define ACE_FC_PKFIFO_FULL (1 << 1)
+#define ACE_FC_HRFIFO_EMPTY (1 << 2)
+#define ACE_FC_HRFIFO_FULL (1 << 3)
+#define ACE_FC_BTFIFO_EMPTY (1 << 4)
+#define ACE_FC_BTFIFO_FULL (1 << 5)
+#define ACE_FC_BRFIFO_EMPTY (1 << 6)
+#define ACE_FC_BRFIFO_FULL (1 << 7)
+
+/* ACE_FC_FIFOCTRL */
+#define ACE_FC_SELHASH_MASK (3 << 0)
+#define ACE_FC_SELHASH_EXOUT (0 << 0) /* independent source */
+#define ACE_FC_SELHASH_BCIN (1 << 0) /* blk cipher input */
+#define ACE_FC_SELHASH_BCOUT (2 << 0) /* blk cipher output */
+#define ACE_FC_SELBC_MASK (1 << 2)
+#define ACE_FC_SELBC_AES (0 << 2)
+#define ACE_FC_SELBC_DES (1 << 2)
+
+/* ACE_FC_GLOBAL */
+#define ACE_FC_SSS_RESET (1 << 0)
+#define ACE_FC_DMA_RESET (1 << 1)
+#define ACE_FC_AES_RESET (1 << 2)
+#define ACE_FC_DES_RESET (1 << 3)
+#define ACE_FC_HASH_RESET (1 << 4)
+#define ACE_FC_AXI_ENDIAN_MASK (3 << 6)
+#define ACE_FC_AXI_ENDIAN_LE (0 << 6)
+#define ACE_FC_AXI_ENDIAN_BIBE (1 << 6)
+#define ACE_FC_AXI_ENDIAN_WIBE (2 << 6)
+
+/* Feed control - BRDMA control */
+#define ACE_FC_BRDMACFLUSH_OFF (0 << 0)
+#define ACE_FC_BRDMACFLUSH_ON (1 << 0)
+#define ACE_FC_BRDMACSWAP_ON (1 << 1)
+#define ACE_FC_BRDMACARPROT_MASK (0x7 << 2)
+#define ACE_FC_BRDMACARPROT_OFS 2
+#define ACE_FC_BRDMACARCACHE_MASK (0xf << 5)
+#define ACE_FC_BRDMACARCACHE_OFS 5
+
+/* Feed control - BTDMA control */
+#define ACE_FC_BTDMACFLUSH_OFF (0 << 0)
+#define ACE_FC_BTDMACFLUSH_ON (1 << 0)
+#define ACE_FC_BTDMACSWAP_ON (1 << 1)
+#define ACE_FC_BTDMACAWPROT_MASK (0x7 << 2)
+#define ACE_FC_BTDMACAWPROT_OFS 2
+#define ACE_FC_BTDMACAWCACHE_MASK (0xf << 5)
+#define ACE_FC_BTDMACAWCACHE_OFS 5
+
+/* Feed control - HRDMA control */
+#define ACE_FC_HRDMACFLUSH_OFF (0 << 0)
+#define ACE_FC_HRDMACFLUSH_ON (1 << 0)
+#define ACE_FC_HRDMACSWAP_ON (1 << 1)
+#define ACE_FC_HRDMACARPROT_MASK (0x7 << 2)
+#define ACE_FC_HRDMACARPROT_OFS 2
+#define ACE_FC_HRDMACARCACHE_MASK (0xf << 5)
+#define ACE_FC_HRDMACARCACHE_OFS 5
+
+/* Feed control - PKDMA control */
+#define ACE_FC_PKDMACBYTESWAP_ON (1 << 3)
+#define ACE_FC_PKDMACDESEND_ON (1 << 2)
+#define ACE_FC_PKDMACTRANSMIT_ON (1 << 1)
+#define ACE_FC_PKDMACFLUSH_ON (1 << 0)
+
+/* Feed control - PKDMA offset */
+#define ACE_FC_SRAMOFFSET_MASK 0xfff
+
+/* AES control */
+#define ACE_AES_MODE_MASK (1 << 0)
+#define ACE_AES_MODE_ENC (0 << 0)
+#define ACE_AES_MODE_DEC (1 << 0)
+#define ACE_AES_OPERMODE_MASK (3 << 1)
+#define ACE_AES_OPERMODE_ECB (0 << 1)
+#define ACE_AES_OPERMODE_CBC (1 << 1)
+#define ACE_AES_OPERMODE_CTR (2 << 1)
+#define ACE_AES_FIFO_MASK (1 << 3)
+#define ACE_AES_FIFO_OFF (0 << 3) /* CPU mode */
+#define ACE_AES_FIFO_ON (1 << 3) /* FIFO mode */
+#define ACE_AES_KEYSIZE_MASK (3 << 4)
+#define ACE_AES_KEYSIZE_128 (0 << 4)
+#define ACE_AES_KEYSIZE_192 (1 << 4)
+#define ACE_AES_KEYSIZE_256 (2 << 4)
+#define ACE_AES_KEYCNGMODE_MASK (1 << 6)
+#define ACE_AES_KEYCNGMODE_OFF (0 << 6)
+#define ACE_AES_KEYCNGMODE_ON (1 << 6)
+#define ACE_AES_SWAP_MASK (0x1f << 7)
+#define ACE_AES_SWAPKEY_OFF (0 << 7)
+#define ACE_AES_SWAPKEY_ON (1 << 7)
+#define ACE_AES_SWAPCNT_OFF (0 << 8)
+#define ACE_AES_SWAPCNT_ON (1 << 8)
+#define ACE_AES_SWAPIV_OFF (0 << 9)
+#define ACE_AES_SWAPIV_ON (1 << 9)
+#define ACE_AES_SWAPDO_OFF (0 << 10)
+#define ACE_AES_SWAPDO_ON (1 << 10)
+#define ACE_AES_SWAPDI_OFF (0 << 11)
+#define ACE_AES_SWAPDI_ON (1 << 11)
+#define ACE_AES_COUNTERSIZE_MASK (3 << 12)
+#define ACE_AES_COUNTERSIZE_128 (0 << 12)
+#define ACE_AES_COUNTERSIZE_64 (1 << 12)
+#define ACE_AES_COUNTERSIZE_32 (2 << 12)
+#define ACE_AES_COUNTERSIZE_16 (3 << 12)
+
+/* AES status */
+#define ACE_AES_OUTRDY_MASK (1 << 0)
+#define ACE_AES_OUTRDY_OFF (0 << 0)
+#define ACE_AES_OUTRDY_ON (1 << 0)
+#define ACE_AES_INRDY_MASK (1 << 1)
+#define ACE_AES_INRDY_OFF (0 << 1)
+#define ACE_AES_INRDY_ON (1 << 1)
+#define ACE_AES_BUSY_MASK (1 << 2)
+#define ACE_AES_BUSY_OFF (0 << 2)
+#define ACE_AES_BUSY_ON (1 << 2)
+
+/* TDES control */
+#define ACE_TDES_MODE_MASK (1 << 0)
+#define ACE_TDES_MODE_ENC (0 << 0)
+#define ACE_TDES_MODE_DEC (1 << 0)
+#define ACE_TDES_OPERMODE_MASK (1 << 1)
+#define ACE_TDES_OPERMODE_ECB (0 << 1)
+#define ACE_TDES_OPERMODE_CBC (1 << 1)
+#define ACE_TDES_SEL_MASK (3 << 3)
+#define ACE_TDES_SEL_DES (0 << 3)
+#define ACE_TDES_SEL_TDESEDE (1 << 3) /* TDES EDE mode */
+#define ACE_TDES_SEL_TDESEEE (3 << 3) /* TDES EEE mode */
+#define ACE_TDES_FIFO_MASK (1 << 5)
+#define ACE_TDES_FIFO_OFF (0 << 5) /* CPU mode */
+#define ACE_TDES_FIFO_ON (1 << 5) /* FIFO mode */
+#define ACE_TDES_SWAP_MASK (0xf << 6)
+#define ACE_TDES_SWAPKEY_OFF (0 << 6)
+#define ACE_TDES_SWAPKEY_ON (1 << 6)
+#define ACE_TDES_SWAPIV_OFF (0 << 7)
+#define ACE_TDES_SWAPIV_ON (1 << 7)
+#define ACE_TDES_SWAPDO_OFF (0 << 8)
+#define ACE_TDES_SWAPDO_ON (1 << 8)
+#define ACE_TDES_SWAPDI_OFF (0 << 9)
+#define ACE_TDES_SWAPDI_ON (1 << 9)
+
+/* TDES status */
+#define ACE_TDES_OUTRDY_MASK (1 << 0)
+#define ACE_TDES_OUTRDY_OFF (0 << 0)
+#define ACE_TDES_OUTRDY_ON (1 << 0)
+#define ACE_TDES_INRDY_MASK (1 << 1)
+#define ACE_TDES_INRDY_OFF (0 << 1)
+#define ACE_TDES_INRDY_ON (1 << 1)
+#define ACE_TDES_BUSY_MASK (1 << 2)
+#define ACE_TDES_BUSY_OFF (0 << 2)
+#define ACE_TDES_BUSY_ON (1 << 2)
+
+/* Hash control */
+#define ACE_HASH_ENGSEL_MASK (0xf << 0)
+#define ACE_HASH_ENGSEL_SHA1HASH (0x0 << 0)
+#define ACE_HASH_ENGSEL_SHA1HMAC (0x1 << 0)
+#define ACE_HASH_ENGSEL_SHA1HMACIN (0x1 << 0)
+#define ACE_HASH_ENGSEL_SHA1HMACOUT (0x9 << 0)
+#define ACE_HASH_ENGSEL_MD5HASH (0x2 << 0)
+#define ACE_HASH_ENGSEL_MD5HMAC (0x3 << 0)
+#define ACE_HASH_ENGSEL_MD5HMACIN (0x3 << 0)
+#define ACE_HASH_ENGSEL_MD5HMACOUT (0xb << 0)
+#define ACE_HASH_ENGSEL_SHA256HASH (0x4 << 0)
+#define ACE_HASH_ENGSEL_SHA256HMAC (0x5 << 0)
+#define ACE_HASH_ENGSEL_PRNG (0x8 << 0)
+#define ACE_HASH_STARTBIT_ON (1 << 4)
+#define ACE_HASH_USERIV_EN (1 << 5)
+#define ACE_HASH_PAUSE_ON (1 << 0)
+
+/* Hash control - FIFO mode */
+#define ACE_HASH_FIFO_MASK (1 << 0)
+#define ACE_HASH_FIFO_OFF (0 << 0)
+#define ACE_HASH_FIFO_ON (1 << 0)
+
+/* Hash control - byte swap */
+#define ACE_HASH_SWAP_MASK (0xf << 0)
+#define ACE_HASH_SWAPKEY_OFF (0 << 0)
+#define ACE_HASH_SWAPKEY_ON (1 << 0)
+#define ACE_HASH_SWAPIV_OFF (0 << 1)
+#define ACE_HASH_SWAPIV_ON (1 << 1)
+#define ACE_HASH_SWAPDO_OFF (0 << 2)
+#define ACE_HASH_SWAPDO_ON (1 << 2)
+#define ACE_HASH_SWAPDI_OFF (0 << 3)
+#define ACE_HASH_SWAPDI_ON (1 << 3)
+
+/* Hash status */
+#define ACE_HASH_BUFRDY_MASK (1 << 0)
+#define ACE_HASH_BUFRDY_OFF (0 << 0)
+#define ACE_HASH_BUFRDY_ON (1 << 0)
+#define ACE_HASH_SEEDSETTING_MASK (1 << 1)
+#define ACE_HASH_SEEDSETTING_OFF (0 << 1)
+#define ACE_HASH_SEEDSETTING_ON (1 << 1)
+#define ACE_HASH_PRNGBUSY_MASK (1 << 2)
+#define ACE_HASH_PRNGBUSY_OFF (0 << 2)
+#define ACE_HASH_PRNGBUSY_ON (1 << 2)
+#define ACE_HASH_PARTIALDONE_MASK (1 << 4)
+#define ACE_HASH_PARTIALDONE_OFF (0 << 4)
+#define ACE_HASH_PARTIALDONE_ON (1 << 4)
+#define ACE_HASH_PRNGDONE_MASK (1 << 5)
+#define ACE_HASH_PRNGDONE_OFF (0 << 5)
+#define ACE_HASH_PRNGDONE_ON (1 << 5)
+#define ACE_HASH_MSGDONE_MASK (1 << 6)
+#define ACE_HASH_MSGDONE_OFF (0 << 6)
+#define ACE_HASH_MSGDONE_ON (1 << 6)
+#define ACE_HASH_PRNGERROR_MASK (1 << 7)
+#define ACE_HASH_PRNGERROR_OFF (0 << 7)
+#define ACE_HASH_PRNGERROR_ON (1 << 7)
+#define ACE_HASH_PRNG_REG_NUM 5
+
+#define ACE_SHA_TYPE_SHA1 1
+#define ACE_SHA_TYPE_SHA256 2
+
+/**
+ * Computes hash value of input pbuf using ACE
+ *
+ * @param in_addr A pointer to the input buffer
+ * @param bufleni Byte length of input buffer
+ * @param out_addr A pointer to the output buffer. When complete
+ * 32 bytes are copied to pout[0]...pout[31]. Thus, a user
+ * should allocate at least 32 bytes at pOut in advance.
+ * @param hash_type SHA1 or SHA256
+ *
+ * @return 0 on Success, -1 on Failure (Timeout)
+ */
+int ace_sha_hash_digest(const uchar * in_addr, uint buflen,
+ uchar * out_addr, uint hash_type);
+#endif
diff --git a/roms/u-boot/drivers/crypto/fsl/Kconfig b/roms/u-boot/drivers/crypto/fsl/Kconfig
new file mode 100644
index 000000000..1f5dfb94b
--- /dev/null
+++ b/roms/u-boot/drivers/crypto/fsl/Kconfig
@@ -0,0 +1,67 @@
+config FSL_CAAM
+ bool "Freescale Crypto Driver Support"
+ select SHA_HW_ACCEL
+ imply CMD_HASH
+ help
+ Enables the Freescale's Cryptographic Accelerator and Assurance
+ Module (CAAM), also known as the SEC version 4 (SEC4). The driver uses
+ Job Ring as interface to communicate with CAAM.
+
+config CAAM_64BIT
+ bool
+ default y if PHYS_64BIT && !ARCH_IMX8M
+ help
+ Select Crypto driver for 64 bits CAAM version
+
+config SYS_FSL_HAS_SEC
+ bool
+ help
+ Enable Freescale Secure Boot and Trusted Architecture
+
+config SYS_FSL_SEC_COMPAT_2
+ bool
+ help
+ Secure boot and trust architecture compatible version 2
+
+config SYS_FSL_SEC_COMPAT_4
+ bool
+ help
+ Secure boot and trust architecture compatible version 4
+
+config SYS_FSL_SEC_COMPAT_5
+ bool
+ help
+ Secure boot and trust architecture compatible version 5
+
+config SYS_FSL_SEC_COMPAT_6
+ bool
+ help
+ Secure boot and trust architecture compatible version 6
+
+config SYS_FSL_SEC_BE
+ bool "Big-endian access to Freescale Secure Boot"
+
+config SYS_FSL_SEC_COMPAT
+ int "Freescale Secure Boot compatibility"
+ depends on SYS_FSL_HAS_SEC
+ default 2 if SYS_FSL_SEC_COMPAT_2
+ default 4 if SYS_FSL_SEC_COMPAT_4
+ default 5 if SYS_FSL_SEC_COMPAT_5
+ default 6 if SYS_FSL_SEC_COMPAT_6
+
+config SYS_FSL_SEC_LE
+ bool "Little-endian access to Freescale Secure Boot"
+
+if FSL_CAAM
+
+config FSL_CAAM_RNG
+ bool "Enable Random Number Generator support"
+ depends on DM_RNG
+ default y
+ help
+ Enable support for the hardware based random number generator
+ module of the CAAM. The random data is fetched from the DRGB
+ using the prediction resistance flag which means the DRGB is
+ reseeded from the TRNG every time random data is generated.
+
+endif
diff --git a/roms/u-boot/drivers/crypto/fsl/Makefile b/roms/u-boot/drivers/crypto/fsl/Makefile
new file mode 100644
index 000000000..f9c3ccecf
--- /dev/null
+++ b/roms/u-boot/drivers/crypto/fsl/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Copyright 2014 Freescale Semiconductor, Inc.
+
+obj-y += sec.o
+obj-$(CONFIG_FSL_CAAM) += jr.o fsl_hash.o jobdesc.o error.o
+obj-$(CONFIG_CMD_BLOB)$(CONFIG_IMX_CAAM_DEK_ENCAP) += fsl_blob.o
+obj-$(CONFIG_RSA_FREESCALE_EXP) += fsl_rsa.o
+obj-$(CONFIG_FSL_CAAM_RNG) += rng.o
+obj-$(CONFIG_FSL_MFGPROT) += fsl_mfgprot.o
diff --git a/roms/u-boot/drivers/crypto/fsl/desc.h b/roms/u-boot/drivers/crypto/fsl/desc.h
new file mode 100644
index 000000000..5705c4f94
--- /dev/null
+++ b/roms/u-boot/drivers/crypto/fsl/desc.h
@@ -0,0 +1,750 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * CAAM descriptor composition header
+ * Definitions to support CAAM descriptor instruction generation
+ *
+ * Copyright 2008-2014 Freescale Semiconductor, Inc.
+ *
+ * Based on desc.h file in linux drivers/crypto/caam
+ */
+
+#ifndef DESC_H
+#define DESC_H
+
+#include "type.h"
+
+#define KEY_BLOB_SIZE 32
+#define MAC_SIZE 16
+
+/* Max size of any CAAM descriptor in 32-bit words, inclusive of header */
+#define MAX_CAAM_DESCSIZE 64
+
+/* Size of DEK Blob descriptor, inclusive of header */
+#define DEK_BLOB_DESCSIZE 9
+
+/* Block size of any entity covered/uncovered with a KEK/TKEK */
+#define KEK_BLOCKSIZE 16
+
+/*
+ * Supported descriptor command types as they show up
+ * inside a descriptor command word.
+ */
+#define CMD_SHIFT 27
+#define CMD_MASK 0xf8000000
+
+#define CMD_KEY (0x00 << CMD_SHIFT)
+#define CMD_SEQ_KEY (0x01 << CMD_SHIFT)
+#define CMD_LOAD (0x02 << CMD_SHIFT)
+#define CMD_SEQ_LOAD (0x03 << CMD_SHIFT)
+#define CMD_FIFO_LOAD (0x04 << CMD_SHIFT)
+#define CMD_SEQ_FIFO_LOAD (0x05 << CMD_SHIFT)
+#define CMD_STORE (0x0a << CMD_SHIFT)
+#define CMD_SEQ_STORE (0x0b << CMD_SHIFT)
+#define CMD_FIFO_STORE (0x0c << CMD_SHIFT)
+#define CMD_SEQ_FIFO_STORE (0x0d << CMD_SHIFT)
+#define CMD_MOVE_LEN (0x0e << CMD_SHIFT)
+#define CMD_MOVE (0x0f << CMD_SHIFT)
+#define CMD_OPERATION (0x10 << CMD_SHIFT)
+#define CMD_SIGNATURE (0x12 << CMD_SHIFT)
+#define CMD_JUMP (0x14 << CMD_SHIFT)
+#define CMD_MATH (0x15 << CMD_SHIFT)
+#define CMD_DESC_HDR (0x16 << CMD_SHIFT)
+#define CMD_SHARED_DESC_HDR (0x17 << CMD_SHIFT)
+#define CMD_SEQ_IN_PTR (0x1e << CMD_SHIFT)
+#define CMD_SEQ_OUT_PTR (0x1f << CMD_SHIFT)
+
+/* General-purpose class selector for all commands */
+#define CLASS_SHIFT 25
+#define CLASS_MASK (0x03 << CLASS_SHIFT)
+
+#define CLASS_NONE (0x00 << CLASS_SHIFT)
+#define CLASS_1 (0x01 << CLASS_SHIFT)
+#define CLASS_2 (0x02 << CLASS_SHIFT)
+#define CLASS_BOTH (0x03 << CLASS_SHIFT)
+
+/*
+ * Descriptor header command constructs
+ * Covers shared, job, and trusted descriptor headers
+ */
+
+/*
+ * Do Not Run - marks a descriptor inexecutable if there was
+ * a preceding error somewhere
+ */
+#define HDR_DNR 0x01000000
+
+/*
+ * ONE - should always be set. Combination of ONE (always
+ * set) and ZRO (always clear) forms an endianness sanity check
+ */
+#define HDR_ONE 0x00800000
+#define HDR_ZRO 0x00008000
+
+/* Start Index or SharedDesc Length */
+#define HDR_START_IDX_MASK 0x3f
+#define HDR_START_IDX_SHIFT 16
+
+/* If shared descriptor header, 6-bit length */
+#define HDR_DESCLEN_SHR_MASK 0x3f
+
+/* If non-shared header, 7-bit length */
+#define HDR_DESCLEN_MASK 0x7f
+
+/* This is a TrustedDesc (if not SharedDesc) */
+#define HDR_TRUSTED 0x00004000
+
+/* Make into TrustedDesc (if not SharedDesc) */
+#define HDR_MAKE_TRUSTED 0x00002000
+
+/* Save context if self-shared (if SharedDesc) */
+#define HDR_SAVECTX 0x00001000
+
+/* Next item points to SharedDesc */
+#define HDR_SHARED 0x00001000
+
+/*
+ * Reverse Execution Order - execute JobDesc first, then
+ * execute SharedDesc (normally SharedDesc goes first).
+ */
+#define HDR_REVERSE 0x00000800
+
+/* Propagate DNR property to SharedDesc */
+#define HDR_PROP_DNR 0x00000800
+
+/* JobDesc/SharedDesc share property */
+#define HDR_SD_SHARE_MASK 0x03
+#define HDR_SD_SHARE_SHIFT 8
+#define HDR_JD_SHARE_MASK 0x07
+#define HDR_JD_SHARE_SHIFT 8
+
+#define HDR_SHARE_NEVER (0x00 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_WAIT (0x01 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_SERIAL (0x02 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_ALWAYS (0x03 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_DEFER (0x04 << HDR_SD_SHARE_SHIFT)
+
+/* JobDesc/SharedDesc descriptor length */
+#define HDR_JD_LENGTH_MASK 0x7f
+#define HDR_SD_LENGTH_MASK 0x3f
+
+/*
+ * KEY/SEQ_KEY Command Constructs
+ */
+
+/* Key Destination Class: 01 = Class 1, 02 - Class 2 */
+#define KEY_DEST_CLASS_SHIFT 25 /* use CLASS_1 or CLASS_2 */
+#define KEY_DEST_CLASS_MASK (0x03 << KEY_DEST_CLASS_SHIFT)
+
+/* Scatter-Gather Table/Variable Length Field */
+#define KEY_SGF 0x01000000
+#define KEY_VLF 0x01000000
+
+/* Immediate - Key follows command in the descriptor */
+#define KEY_IMM 0x00800000
+
+/*
+ * Encrypted - Key is encrypted either with the KEK, or
+ * with the TDKEK if TK is set
+ */
+#define KEY_ENC 0x00400000
+
+/*
+ * No Write Back - Do not allow key to be FIFO STOREd
+ */
+#define KEY_NWB 0x00200000
+
+/*
+ * Enhanced Encryption of Key
+ */
+#define KEY_EKT 0x00100000
+
+/*
+ * Encrypted with Trusted Key
+ */
+#define KEY_TK 0x00008000
+
+/*
+ * KDEST - Key Destination: 0 - class key register,
+ * 1 - PKHA 'e', 2 - AFHA Sbox, 3 - MDHA split-key
+ */
+#define KEY_DEST_SHIFT 16
+#define KEY_DEST_MASK (0x03 << KEY_DEST_SHIFT)
+
+#define KEY_DEST_CLASS_REG (0x00 << KEY_DEST_SHIFT)
+#define KEY_DEST_PKHA_E (0x01 << KEY_DEST_SHIFT)
+#define KEY_DEST_AFHA_SBOX (0x02 << KEY_DEST_SHIFT)
+#define KEY_DEST_MDHA_SPLIT (0x03 << KEY_DEST_SHIFT)
+
+/* Length in bytes */
+#define KEY_LENGTH_MASK 0x000003ff
+
+/*
+ * LOAD/SEQ_LOAD/STORE/SEQ_STORE Command Constructs
+ */
+
+/*
+ * Load/Store Destination: 0 = class independent CCB,
+ * 1 = class 1 CCB, 2 = class 2 CCB, 3 = DECO
+ */
+#define LDST_CLASS_SHIFT 25
+#define LDST_CLASS_MASK (0x03 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_IND_CCB (0x00 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_1_CCB (0x01 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_2_CCB (0x02 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_DECO (0x03 << LDST_CLASS_SHIFT)
+
+/* Scatter-Gather Table/Variable Length Field */
+#define LDST_SGF 0x01000000
+#define LDST_VLF LDST_SGF
+
+/* Immediate - Key follows this command in descriptor */
+#define LDST_IMM_MASK 1
+#define LDST_IMM_SHIFT 23
+#define LDST_IMM (LDST_IMM_MASK << LDST_IMM_SHIFT)
+
+/* SRC/DST - Destination for LOAD, Source for STORE */
+#define LDST_SRCDST_SHIFT 16
+#define LDST_SRCDST_MASK (0x7f << LDST_SRCDST_SHIFT)
+
+#define LDST_SRCDST_BYTE_CONTEXT (0x20 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_KEY (0x40 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_INFIFO (0x7c << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_OUTFIFO (0x7e << LDST_SRCDST_SHIFT)
+
+#define LDST_SRCDST_WORD_MODE_REG (0x00 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_KEYSZ_REG (0x01 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DATASZ_REG (0x02 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_ICVSZ_REG (0x03 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CHACTRL (0x06 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECOCTRL (0x06 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_IRQCTRL (0x07 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_PCLOVRD (0x07 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLRW (0x08 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH0 (0x08 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_STAT (0x09 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH1 (0x09 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH2 (0x0a << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_AAD_SZ (0x0b << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH3 (0x0b << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLASS1_ICV_SZ (0x0c << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_ALTDS_CLASS1 (0x0f << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_A_SZ (0x10 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_B_SZ (0x11 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_N_SZ (0x12 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_E_SZ (0x13 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLASS_CTX (0x20 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF (0x40 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF_JOB (0x41 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF_SHARED (0x42 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF_JOB_WE (0x45 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF_SHARED_WE (0x46 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_INFO_FIFO (0x7a << LDST_SRCDST_SHIFT)
+
+/* Offset in source/destination */
+#define LDST_OFFSET_SHIFT 8
+#define LDST_OFFSET_MASK (0xff << LDST_OFFSET_SHIFT)
+
+/* LDOFF definitions used when DST = LDST_SRCDST_WORD_DECOCTRL */
+/* These could also be shifted by LDST_OFFSET_SHIFT - this reads better */
+#define LDOFF_CHG_SHARE_SHIFT 0
+#define LDOFF_CHG_SHARE_MASK (0x3 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_NEVER (0x1 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_OK_PROP (0x2 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_OK_NO_PROP (0x3 << LDOFF_CHG_SHARE_SHIFT)
+
+#define LDOFF_ENABLE_AUTO_NFIFO (1 << 2)
+#define LDOFF_DISABLE_AUTO_NFIFO (1 << 3)
+
+#define LDOFF_CHG_NONSEQLIODN_SHIFT 4
+#define LDOFF_CHG_NONSEQLIODN_MASK (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_SEQ (0x1 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_NON_SEQ (0x2 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_TRUSTED (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+
+#define LDOFF_CHG_SEQLIODN_SHIFT 6
+#define LDOFF_CHG_SEQLIODN_MASK (0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_SEQ (0x1 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_NON_SEQ (0x2 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_TRUSTED (0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
+
+/* Data length in bytes */
+#define LDST_LEN_SHIFT 0
+#define LDST_LEN_MASK (0xff << LDST_LEN_SHIFT)
+
+/* Special Length definitions when dst=deco-ctrl */
+#define LDLEN_ENABLE_OSL_COUNT (1 << 7)
+#define LDLEN_RST_CHA_OFIFO_PTR (1 << 6)
+#define LDLEN_RST_OFIFO (1 << 5)
+#define LDLEN_SET_OFIFO_OFF_VALID (1 << 4)
+#define LDLEN_SET_OFIFO_OFF_RSVD (1 << 3)
+#define LDLEN_SET_OFIFO_OFFSET_SHIFT 0
+#define LDLEN_SET_OFIFO_OFFSET_MASK (3 << LDLEN_SET_OFIFO_OFFSET_SHIFT)
+
+/*
+ * AAD Definitions
+ */
+#define AES_KEY_SHIFT 8
+#define LD_CCM_MODE 0x66
+#define KEY_AES_SRC (0x55 << AES_KEY_SHIFT)
+
+/*
+ * FIFO_LOAD/FIFO_STORE/SEQ_FIFO_LOAD/SEQ_FIFO_STORE
+ * Command Constructs
+ */
+
+/*
+ * Load Destination: 0 = skip (SEQ_FIFO_LOAD only),
+ * 1 = Load for Class1, 2 = Load for Class2, 3 = Load both
+ * Store Source: 0 = normal, 1 = Class1key, 2 = Class2key
+ */
+#define FIFOLD_CLASS_SHIFT 25
+#define FIFOLD_CLASS_MASK (0x03 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_SKIP (0x00 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_CLASS1 (0x01 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_CLASS2 (0x02 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_BOTH (0x03 << FIFOLD_CLASS_SHIFT)
+
+#define FIFOST_CLASS_SHIFT 25
+#define FIFOST_CLASS_MASK (0x03 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_NORMAL (0x00 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_CLASS1KEY (0x01 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_CLASS2KEY (0x02 << FIFOST_CLASS_SHIFT)
+
+/*
+ * Scatter-Gather Table/Variable Length Field
+ * If set for FIFO_LOAD, refers to a SG table. Within
+ * SEQ_FIFO_LOAD, is variable input sequence
+ */
+#define FIFOLDST_SGF_SHIFT 24
+#define FIFOLDST_SGF_MASK (1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_VLF_MASK (1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_SGF (1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_VLF (1 << FIFOLDST_SGF_SHIFT)
+
+/* Immediate - Data follows command in descriptor */
+#define FIFOLD_IMM_SHIFT 23
+#define FIFOLD_IMM_MASK (1 << FIFOLD_IMM_SHIFT)
+#define FIFOLD_IMM (1 << FIFOLD_IMM_SHIFT)
+
+/* Continue - Not the last FIFO store to come */
+#define FIFOST_CONT_SHIFT 23
+#define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT)
+
+/*
+ * Extended Length - use 32-bit extended length that
+ * follows the pointer field. Illegal with IMM set
+ */
+#define FIFOLDST_EXT_SHIFT 22
+#define FIFOLDST_EXT_MASK (1 << FIFOLDST_EXT_SHIFT)
+#define FIFOLDST_EXT (1 << FIFOLDST_EXT_SHIFT)
+
+/* Input data type.*/
+#define FIFOLD_TYPE_SHIFT 16
+#define FIFOLD_CONT_TYPE_SHIFT 19 /* shift past last-flush bits */
+#define FIFOLD_TYPE_MASK (0x3f << FIFOLD_TYPE_SHIFT)
+
+/* PK types */
+#define FIFOLD_TYPE_PK (0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_MASK (0x30 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_TYPEMASK (0x0f << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A0 (0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A1 (0x01 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A2 (0x02 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A3 (0x03 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B0 (0x04 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B1 (0x05 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B2 (0x06 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B3 (0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_N (0x08 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A (0x0c << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B (0x0d << FIFOLD_TYPE_SHIFT)
+
+/* Other types. Need to OR in last/flush bits as desired */
+#define FIFOLD_TYPE_MSG_MASK (0x38 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_MSG (0x10 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_MSG1OUT2 (0x18 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_IV (0x20 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_BITDATA (0x28 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_AAD (0x30 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_ICV (0x38 << FIFOLD_TYPE_SHIFT)
+
+/* Last/Flush bits for use with "other" types above */
+#define FIFOLD_TYPE_ACT_MASK (0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_NOACTION (0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_FLUSH1 (0x01 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST1 (0x02 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2FLUSH (0x03 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2 (0x04 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2FLUSH1 (0x05 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LASTBOTH (0x06 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LASTBOTHFL (0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_NOINFOFIFO (0x0F << FIFOLD_TYPE_SHIFT)
+
+#define FIFOLDST_LEN_MASK 0xffff
+#define FIFOLDST_EXT_LEN_MASK 0xffffffff
+
+/* Output data types */
+#define FIFOST_TYPE_SHIFT 16
+#define FIFOST_TYPE_MASK (0x3f << FIFOST_TYPE_SHIFT)
+
+#define FIFOST_TYPE_PKHA_A0 (0x00 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A1 (0x01 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A2 (0x02 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A3 (0x03 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B0 (0x04 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B1 (0x05 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B2 (0x06 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B3 (0x07 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_AF_SBOX_JKEK (0x10 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_E_TKEK (0x23 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_KEY_KEK (0x24 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_KEY_TKEK (0x25 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SPLIT_KEK (0x26 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SPLIT_TKEK (0x27 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_OUTFIFO_KEK (0x28 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_OUTFIFO_TKEK (0x29 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_RNGSTORE (0x34 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_RNGFIFO (0x35 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT)
+
+/*
+ * OPERATION Command Constructs
+ */
+
+/* Operation type selectors - OP TYPE */
+#define OP_TYPE_SHIFT 24
+#define OP_TYPE_MASK (0x07 << OP_TYPE_SHIFT)
+
+#define OP_TYPE_UNI_PROTOCOL (0x00 << OP_TYPE_SHIFT)
+#define OP_TYPE_PK (0x01 << OP_TYPE_SHIFT)
+#define OP_TYPE_CLASS1_ALG (0x02 << OP_TYPE_SHIFT)
+#define OP_TYPE_CLASS2_ALG (0x04 << OP_TYPE_SHIFT)
+#define OP_TYPE_DECAP_PROTOCOL (0x06 << OP_TYPE_SHIFT)
+#define OP_TYPE_ENCAP_PROTOCOL (0x07 << OP_TYPE_SHIFT)
+
+/* ProtocolID selectors - PROTID */
+#define OP_PCLID_SHIFT 16
+#define OP_PCLID_MASK (0xff << 16)
+
+/* Assuming OP_TYPE = OP_TYPE_UNI_PROTOCOL */
+#define OP_PCLID_SECMEM 0x08
+#define OP_PCLID_BLOB (0x0d << OP_PCLID_SHIFT)
+#define OP_PCLID_SECRETKEY (0x11 << OP_PCLID_SHIFT)
+#define OP_PCLID_PUBLICKEYPAIR (0x14 << OP_PCLID_SHIFT)
+#define OP_PCLID_DSA_SIGN (0x15 << OP_PCLID_SHIFT)
+#define OP_PCLID_DSA_VERIFY (0x16 << OP_PCLID_SHIFT)
+
+/* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL */
+#define OP_PCLID_MP_PUB_KEY (0x14 << OP_PCLID_SHIFT)
+#define OP_PCLID_MP_SIGN (0x15 << OP_PCLID_SHIFT)
+
+/* Assuming OP_TYPE = OP_TYPE_ENCAP_PROTOCOL */
+#define OP_PCLID_MP_PRIV_KEY (0x14 << OP_PCLID_SHIFT)
+
+/* PROTINFO fields for discrete log public key protocols */
+#define OP_PROTINFO_F2M_FP 0x00000001
+#define OP_PROTINFO_ECC_DL 0x00000002
+#define OP_PROTINFO_ENC_PRI 0x00000004
+#define OP_PROTINFO_TEST 0x00000008
+#define OP_PROTINFO_EXT_PRI 0x00000010
+#define OP_PROTINFO_ENC_Z 0x00000020
+#define OP_PROTINFO_EKT_Z 0x00000040
+#define OP_PROTINFO_MES_REP 0x00000400
+#define OP_PROTINFO_HASH_MD5 0x00000000
+#define OP_PROTINFO_HASH_SHA1 0x00000080
+#define OP_PROTINFO_HASH_SHA224 0x00000100
+#define OP_PROTINFO_HASH_SHA256 0x00000180
+#define OP_PROTINFO_HASH_SHA384 0x00000200
+#define OP_PROTINFO_HASH_SHA512 0x00000280
+
+/* For non-protocol/alg-only op commands */
+#define OP_ALG_TYPE_SHIFT 24
+#define OP_ALG_TYPE_MASK (0x7 << OP_ALG_TYPE_SHIFT)
+#define OP_ALG_TYPE_CLASS1 2
+#define OP_ALG_TYPE_CLASS2 4
+
+#define OP_ALG_ALGSEL_SHIFT 16
+#define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SUBMASK (0x0f << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_AES (0x10 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_DES (0x20 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_3DES (0x21 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_ARC4 (0x30 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_MD5 (0x40 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA1 (0x41 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA224 (0x42 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA256 (0x43 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA384 (0x44 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA512 (0x45 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_RNG (0x50 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SNOW (0x60 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SNOW_F8 (0x60 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_KASUMI (0x70 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_CRC (0x90 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SNOW_F9 (0xA0 << OP_ALG_ALGSEL_SHIFT)
+
+#define OP_ALG_AAI_SHIFT 4
+#define OP_ALG_AAI_MASK (0x1ff << OP_ALG_AAI_SHIFT)
+
+/* randomizer AAI set */
+#define OP_ALG_AAI_RNG (0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG_NZB (0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG_OBP (0x20 << OP_ALG_AAI_SHIFT)
+
+/* RNG4 AAI set */
+#define OP_ALG_AAI_RNG4_SH_0 (0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_SH_1 (0x01 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_PS (0x40 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_AI (0x80 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_SK (0x100 << OP_ALG_AAI_SHIFT)
+
+/* hmac/smac AAI set */
+#define OP_ALG_AAI_HASH (0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_HMAC (0x01 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_SMAC (0x02 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_HMAC_PRECOMP (0x04 << OP_ALG_AAI_SHIFT)
+
+#define OP_ALG_AS_SHIFT 2
+#define OP_ALG_AS_MASK (0x3 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_UPDATE (0 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_INIT (1 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_FINALIZE (2 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_INITFINAL (3 << OP_ALG_AS_SHIFT)
+
+#define OP_ALG_ICV_SHIFT 1
+#define OP_ALG_ICV_MASK (1 << OP_ALG_ICV_SHIFT)
+#define OP_ALG_ICV_OFF (0 << OP_ALG_ICV_SHIFT)
+#define OP_ALG_ICV_ON (1 << OP_ALG_ICV_SHIFT)
+
+#define OP_ALG_PR_ON 0x02
+
+#define OP_ALG_DIR_SHIFT 0
+#define OP_ALG_DIR_MASK 1
+#define OP_ALG_DECRYPT 0
+#define OP_ALG_ENCRYPT 1
+
+/* PKHA algorithm type set */
+#define OP_ALG_PK 0x00800000
+#define OP_ALG_PK_FUN_MASK 0x3f /* clrmem, modmath, or cpymem */
+
+/* PKHA mode modular-arithmetic functions */
+#define OP_ALG_PKMODE_MOD_EXPO 0x006
+
+/*
+ * SEQ_IN_PTR Command Constructs
+ */
+
+/* Release Buffers */
+#define SQIN_RBS 0x04000000
+
+/* Sequence pointer is really a descriptor */
+#define SQIN_INL 0x02000000
+
+/* Sequence pointer is a scatter-gather table */
+#define SQIN_SGF 0x01000000
+
+/* Appends to a previous pointer */
+#define SQIN_PRE 0x00800000
+
+/* Use extended length following pointer */
+#define SQIN_EXT 0x00400000
+
+/* Restore sequence with pointer/length */
+#define SQIN_RTO 0x00200000
+
+/* Replace job descriptor */
+#define SQIN_RJD 0x00100000
+
+#define SQIN_LEN_SHIFT 0
+#define SQIN_LEN_MASK (0xffff << SQIN_LEN_SHIFT)
+
+/*
+ * SEQ_OUT_PTR Command Constructs
+ */
+
+/* Sequence pointer is a scatter-gather table */
+#define SQOUT_SGF 0x01000000
+
+/* Appends to a previous pointer */
+#define SQOUT_PRE SQIN_PRE
+
+/* Restore sequence with pointer/length */
+#define SQOUT_RTO SQIN_RTO
+
+/* Use extended length following pointer */
+#define SQOUT_EXT 0x00400000
+
+#define SQOUT_LEN_SHIFT 0
+#define SQOUT_LEN_MASK (0xffff << SQOUT_LEN_SHIFT)
+
+/*
+ * MOVE Command Constructs
+ */
+
+#define MOVE_AUX_SHIFT 25
+#define MOVE_AUX_MASK (3 << MOVE_AUX_SHIFT)
+#define MOVE_AUX_MS (2 << MOVE_AUX_SHIFT)
+#define MOVE_AUX_LS (1 << MOVE_AUX_SHIFT)
+
+#define MOVE_WAITCOMP_SHIFT 24
+#define MOVE_WAITCOMP_MASK (1 << MOVE_WAITCOMP_SHIFT)
+#define MOVE_WAITCOMP (1 << MOVE_WAITCOMP_SHIFT)
+
+#define MOVE_SRC_SHIFT 20
+#define MOVE_SRC_MASK (0x0f << MOVE_SRC_SHIFT)
+#define MOVE_SRC_CLASS1CTX (0x00 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_CLASS2CTX (0x01 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_OUTFIFO (0x02 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_DESCBUF (0x03 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH0 (0x04 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH1 (0x05 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH2 (0x06 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH3 (0x07 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_INFIFO (0x08 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_INFIFO_CL (0x09 << MOVE_SRC_SHIFT)
+
+#define MOVE_DEST_SHIFT 16
+#define MOVE_DEST_MASK (0x0f << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1CTX (0x00 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2CTX (0x01 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_OUTFIFO (0x02 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_DESCBUF (0x03 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH0 (0x04 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH1 (0x05 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH2 (0x06 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH3 (0x07 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1INFIFO (0x08 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2INFIFO (0x09 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_INFIFO_NOINFO (0x0a << MOVE_DEST_SHIFT)
+#define MOVE_DEST_PK_A (0x0c << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1KEY (0x0d << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2KEY (0x0e << MOVE_DEST_SHIFT)
+
+#define MOVE_OFFSET_SHIFT 8
+#define MOVE_OFFSET_MASK (0xff << MOVE_OFFSET_SHIFT)
+
+#define MOVE_LEN_SHIFT 0
+#define MOVE_LEN_MASK (0xff << MOVE_LEN_SHIFT)
+
+#define MOVELEN_MRSEL_SHIFT 0
+#define MOVELEN_MRSEL_MASK (0x3 << MOVE_LEN_SHIFT)
+
+/*
+ * JUMP Command Constructs
+ */
+
+#define JUMP_CLASS_SHIFT 25
+#define JUMP_CLASS_MASK (3 << JUMP_CLASS_SHIFT)
+#define JUMP_CLASS_NONE 0
+#define JUMP_CLASS_CLASS1 (1 << JUMP_CLASS_SHIFT)
+#define JUMP_CLASS_CLASS2 (2 << JUMP_CLASS_SHIFT)
+#define JUMP_CLASS_BOTH (3 << JUMP_CLASS_SHIFT)
+
+#define JUMP_JSL_SHIFT 24
+#define JUMP_JSL_MASK (1 << JUMP_JSL_SHIFT)
+#define JUMP_JSL (1 << JUMP_JSL_SHIFT)
+
+#define JUMP_TYPE_SHIFT 22
+#define JUMP_TYPE_MASK (0x03 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_LOCAL (0x00 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_NONLOCAL (0x01 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_HALT (0x02 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_HALT_USER (0x03 << JUMP_TYPE_SHIFT)
+
+#define JUMP_TEST_SHIFT 16
+#define JUMP_TEST_MASK (0x03 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_ALL (0x00 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_INVALL (0x01 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_ANY (0x02 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_INVANY (0x03 << JUMP_TEST_SHIFT)
+
+/* Condition codes. JSL bit is factored in */
+#define JUMP_COND_SHIFT 8
+#define JUMP_COND_MASK (0x100ff << JUMP_COND_SHIFT)
+#define JUMP_COND_PK_0 (0x80 << JUMP_COND_SHIFT)
+#define JUMP_COND_PK_GCD_1 (0x40 << JUMP_COND_SHIFT)
+#define JUMP_COND_PK_PRIME (0x20 << JUMP_COND_SHIFT)
+#define JUMP_COND_MATH_N (0x08 << JUMP_COND_SHIFT)
+#define JUMP_COND_MATH_Z (0x04 << JUMP_COND_SHIFT)
+#define JUMP_COND_MATH_C (0x02 << JUMP_COND_SHIFT)
+#define JUMP_COND_MATH_NV (0x01 << JUMP_COND_SHIFT)
+
+#define JUMP_COND_JRP ((0x80 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_SHRD ((0x40 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_SELF ((0x20 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_CALM ((0x10 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_NIP ((0x08 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_NIFP ((0x04 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_NOP ((0x02 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_NCP ((0x01 << JUMP_COND_SHIFT) | JUMP_JSL)
+
+#define JUMP_OFFSET_SHIFT 0
+#define JUMP_OFFSET_MASK (0xff << JUMP_OFFSET_SHIFT)
+
+#define OP_ALG_RNG4_SHIFT 4
+#define OP_ALG_RNG4_MAS (0x1f3 << OP_ALG_RNG4_SHIFT)
+#define OP_ALG_RNG4_SK (0x100 << OP_ALG_RNG4_SHIFT)
+
+
+/* Structures for Protocol Data Blocks */
+struct __packed pdb_ecdsa_verify {
+ uint32_t pdb_hdr;
+ caam_dma_addr_t dma_q; /* Pointer to q (elliptic curve) */
+ caam_dma_addr_t dma_r; /* Pointer to r (elliptic curve) */
+ caam_dma_addr_t dma_g_xy; /* Pointer to Gx,y (elliptic curve) */
+ caam_dma_addr_t dma_pkey; /* Pointer to Wx,y (public key) */
+ caam_dma_addr_t dma_hash; /* Pointer to hash input */
+ caam_dma_addr_t dma_c; /* Pointer to C_signature */
+ caam_dma_addr_t dma_d; /* Pointer to D_signature */
+ caam_dma_addr_t dma_buf; /* Pointer to 64-byte temp buffer */
+ caam_dma_addr_t dma_ab; /* Pointer to a,b (elliptic curve ) */
+ uint32_t img_size; /* Length of Message */
+};
+
+struct __packed pdb_ecdsa_sign {
+ uint32_t pdb_hdr;
+ caam_dma_addr_t dma_q; /* Pointer to q (elliptic curve) */
+ caam_dma_addr_t dma_r; /* Pointer to r (elliptic curve) */
+ caam_dma_addr_t dma_g_xy; /* Pointer to Gx,y (elliptic curve) */
+ caam_dma_addr_t dma_pri_key; /* Pointer to S (Private key) */
+ caam_dma_addr_t dma_hash; /* Pointer to hash input */
+ caam_dma_addr_t dma_c; /* Pointer to C_signature */
+ caam_dma_addr_t dma_d; /* Pointer to D_signature */
+ caam_dma_addr_t dma_ab; /* Pointer to a,b (elliptic curve ) */
+ caam_dma_addr_t dma_u; /* Pointer to Per Message Random */
+ uint32_t img_size; /* Length of Message */
+};
+
+#define PDB_ECDSA_SGF_SHIFT 23
+#define PDB_ECDSA_L_SHIFT 7
+#define PDB_ECDSA_N_SHIFT 0
+
+struct __packed pdb_mp_pub_k {
+ uint32_t pdb_hdr;
+ #define PDB_MP_PUB_K_SGF_SHIFT 31
+ caam_dma_addr_t dma_pkey; /* Pointer to Wx,y (public key) */
+};
+
+struct __packed pdb_mp_sign {
+ uint32_t pdb_hdr;
+ #define PDB_MP_SIGN_SGF_SHIFT 28
+ caam_dma_addr_t dma_addr_msg; /* Pointer to Message */
+ caam_dma_addr_t dma_addr_hash; /* Pointer to hash output */
+ caam_dma_addr_t dma_addr_c_sig; /* Pointer to C_signature */
+ caam_dma_addr_t dma_addr_d_sig; /* Pointer to D_signature */
+ uint32_t img_size; /* Length of Message */
+};
+
+#define PDB_MP_CSEL_SHIFT 17
+#define PDB_MP_CSEL_WIDTH 4
+#define PDB_MP_CSEL_P256 0x3 << PDB_MP_CSEL_SHIFT /* P-256 */
+#define PDB_MP_CSEL_P384 0x4 << PDB_MP_CSEL_SHIFT /* P-384 */
+#define PDB_MP_CSEL_P521 0x5 << PDB_MP_CSEL_SHIFT /* P-521 */
+
+#endif /* DESC_H */
diff --git a/roms/u-boot/drivers/crypto/fsl/desc_constr.h b/roms/u-boot/drivers/crypto/fsl/desc_constr.h
new file mode 100644
index 000000000..209557c4f
--- /dev/null
+++ b/roms/u-boot/drivers/crypto/fsl/desc_constr.h
@@ -0,0 +1,330 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * caam descriptor construction helper functions
+ *
+ * Copyright 2008-2014 Freescale Semiconductor, Inc.
+ *
+ * Based on desc_constr.h file in linux drivers/crypto/caam
+ */
+
+#include <linux/compat.h>
+#include "desc.h"
+
+#define IMMEDIATE (1 << 23)
+#define CAAM_CMD_SZ sizeof(u32)
+#define CAAM_PTR_SZ sizeof(caam_dma_addr_t)
+#define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * MAX_CAAM_DESCSIZE)
+#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
+
+#ifdef DEBUG
+#define PRINT_POS do { printf("%02d: %s\n", desc_len(desc),\
+ &__func__[sizeof("append")]); \
+ } while (0)
+#else
+#define PRINT_POS
+#endif
+
+#define SET_OK_NO_PROP_ERRORS (IMMEDIATE | LDST_CLASS_DECO | \
+ LDST_SRCDST_WORD_DECOCTRL | \
+ (LDOFF_CHG_SHARE_OK_NO_PROP << \
+ LDST_OFFSET_SHIFT))
+#define DISABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \
+ LDST_SRCDST_WORD_DECOCTRL | \
+ (LDOFF_DISABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT))
+#define ENABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \
+ LDST_SRCDST_WORD_DECOCTRL | \
+ (LDOFF_ENABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT))
+
+#ifdef CONFIG_CAAM_64BIT
+struct ptr_addr_t {
+#ifdef CONFIG_SYS_FSL_SEC_LE
+ u32 low;
+ u32 high;
+#elif defined(CONFIG_SYS_FSL_SEC_BE)
+ u32 high;
+ u32 low;
+#else
+#error Neither CONFIG_SYS_FSL_SEC_LE nor CONFIG_SYS_FSL_SEC_BE is defined
+#endif
+};
+#endif
+
+static inline void pdb_add_ptr(caam_dma_addr_t *offset, caam_dma_addr_t ptr)
+{
+#ifdef CONFIG_CAAM_64BIT
+ /* The Position of low and high part of 64 bit address
+ * will depend on the endianness of CAAM Block */
+ struct ptr_addr_t *ptr_addr = (struct ptr_addr_t *)offset;
+
+ ptr_addr->high = (u32)(ptr >> 32);
+ ptr_addr->low = (u32)ptr;
+#else
+ *offset = ptr;
+#endif
+}
+
+static inline int desc_len(u32 *desc)
+{
+ return *desc & HDR_DESCLEN_MASK;
+}
+
+static inline int desc_bytes(void *desc)
+{
+ return desc_len(desc) * CAAM_CMD_SZ;
+}
+
+static inline u32 *desc_end(u32 *desc)
+{
+ return desc + desc_len(desc);
+}
+
+static inline void *desc_pdb(u32 *desc)
+{
+ return desc + 1;
+}
+
+static inline void init_desc(u32 *desc, u32 options)
+{
+ *desc = (options | HDR_ONE) + 1;
+}
+
+static inline void init_job_desc(u32 *desc, u32 options)
+{
+ init_desc(desc, CMD_DESC_HDR | options);
+}
+
+static inline void init_job_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
+{
+ u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
+
+ init_job_desc(desc,
+ (((pdb_len + 1) << HDR_START_IDX_SHIFT) + pdb_len) |
+ options);
+}
+
+static inline void append_ptr(u32 *desc, caam_dma_addr_t ptr)
+{
+ caam_dma_addr_t *offset = (caam_dma_addr_t *)desc_end(desc);
+
+#ifdef CONFIG_CAAM_64BIT
+ /* The Position of low and high part of 64 bit address
+ * will depend on the endianness of CAAM Block */
+ struct ptr_addr_t *ptr_addr = (struct ptr_addr_t *)offset;
+
+ ptr_addr->high = (u32)(ptr >> 32);
+ ptr_addr->low = (u32)ptr;
+#else
+ *offset = ptr;
+#endif
+
+ (*desc) += CAAM_PTR_SZ / CAAM_CMD_SZ;
+}
+
+static inline void append_data(u32 *desc, void *data, int len)
+{
+ u32 *offset = desc_end(desc);
+
+ if (len) /* avoid sparse warning: memcpy with byte count of 0 */
+ memcpy(offset, data, len);
+
+ (*desc) += (len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
+}
+
+static inline void append_cmd(u32 *desc, u32 command)
+{
+ u32 *cmd = desc_end(desc);
+
+ *cmd = command;
+
+ (*desc)++;
+}
+
+#define append_u32 append_cmd
+
+static inline void append_u64(u32 *desc, u64 data)
+{
+ u32 *offset = desc_end(desc);
+
+ *offset = upper_32_bits(data);
+ *(++offset) = lower_32_bits(data);
+
+ (*desc) += 2;
+}
+
+/* Write command without affecting header, and return pointer to next word */
+static inline u32 *write_cmd(u32 *desc, u32 command)
+{
+ *desc = command;
+
+ return desc + 1;
+}
+
+static inline void append_cmd_ptr(u32 *desc, caam_dma_addr_t ptr, int len,
+ u32 command)
+{
+ append_cmd(desc, command | len);
+ append_ptr(desc, ptr);
+}
+
+/* Write length after pointer, rather than inside command */
+static inline void append_cmd_ptr_extlen(u32 *desc, caam_dma_addr_t ptr,
+ unsigned int len, u32 command)
+{
+ append_cmd(desc, command);
+ if (!(command & (SQIN_RTO | SQIN_PRE)))
+ append_ptr(desc, ptr);
+ append_cmd(desc, len);
+}
+
+static inline void append_cmd_data(u32 *desc, void *data, int len,
+ u32 command)
+{
+ append_cmd(desc, command | IMMEDIATE | len);
+ append_data(desc, data, len);
+}
+
+#define APPEND_CMD_RET(cmd, op) \
+static inline u32 *append_##cmd(u32 *desc, u32 options) \
+{ \
+ u32 *cmd = desc_end(desc); \
+ PRINT_POS; \
+ append_cmd(desc, CMD_##op | options); \
+ return cmd; \
+}
+APPEND_CMD_RET(jump, JUMP)
+APPEND_CMD_RET(move, MOVE)
+
+static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd)
+{
+ *jump_cmd = *jump_cmd | (desc_len(desc) - (jump_cmd - desc));
+}
+
+static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd)
+{
+ *move_cmd &= ~MOVE_OFFSET_MASK;
+ *move_cmd = *move_cmd | ((desc_len(desc) << (MOVE_OFFSET_SHIFT + 2)) &
+ MOVE_OFFSET_MASK);
+}
+
+#define APPEND_CMD(cmd, op) \
+static inline void append_##cmd(u32 *desc, u32 options) \
+{ \
+ PRINT_POS; \
+ append_cmd(desc, CMD_##op | options); \
+}
+APPEND_CMD(operation, OPERATION)
+
+#define APPEND_CMD_LEN(cmd, op) \
+static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \
+{ \
+ PRINT_POS; \
+ append_cmd(desc, CMD_##op | len | options); \
+}
+APPEND_CMD_LEN(seq_store, SEQ_STORE)
+APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_LOAD)
+APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE)
+
+#define APPEND_CMD_PTR(cmd, op) \
+static inline void append_##cmd(u32 *desc, caam_dma_addr_t ptr, unsigned int len, \
+ u32 options) \
+{ \
+ PRINT_POS; \
+ append_cmd_ptr(desc, ptr, len, CMD_##op | options); \
+}
+APPEND_CMD_PTR(key, KEY)
+APPEND_CMD_PTR(load, LOAD)
+APPEND_CMD_PTR(fifo_load, FIFO_LOAD)
+APPEND_CMD_PTR(fifo_store, FIFO_STORE)
+
+static inline void append_store(u32 *desc, caam_dma_addr_t ptr, unsigned int len,
+ u32 options)
+{
+ u32 cmd_src;
+
+ cmd_src = options & LDST_SRCDST_MASK;
+
+ append_cmd(desc, CMD_STORE | options | len);
+
+ /* The following options do not require pointer */
+ if (!(cmd_src == LDST_SRCDST_WORD_DESCBUF_SHARED ||
+ cmd_src == LDST_SRCDST_WORD_DESCBUF_JOB ||
+ cmd_src == LDST_SRCDST_WORD_DESCBUF_JOB_WE ||
+ cmd_src == LDST_SRCDST_WORD_DESCBUF_SHARED_WE))
+ append_ptr(desc, ptr);
+}
+
+#define APPEND_SEQ_PTR_INTLEN(cmd, op) \
+static inline void append_seq_##cmd##_ptr_intlen(u32 *desc, caam_dma_addr_t ptr, \
+ unsigned int len, \
+ u32 options) \
+{ \
+ PRINT_POS; \
+ if (options & (SQIN_RTO | SQIN_PRE)) \
+ append_cmd(desc, CMD_SEQ_##op##_PTR | len | options); \
+ else \
+ append_cmd_ptr(desc, ptr, len, CMD_SEQ_##op##_PTR | options); \
+}
+APPEND_SEQ_PTR_INTLEN(in, IN)
+APPEND_SEQ_PTR_INTLEN(out, OUT)
+
+#define APPEND_CMD_PTR_TO_IMM(cmd, op) \
+static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
+ unsigned int len, u32 options) \
+{ \
+ PRINT_POS; \
+ append_cmd_data(desc, data, len, CMD_##op | options); \
+}
+APPEND_CMD_PTR_TO_IMM(load, LOAD);
+APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD);
+
+#define APPEND_CMD_PTR_EXTLEN(cmd, op) \
+static inline void append_##cmd##_extlen(u32 *desc, caam_dma_addr_t ptr, \
+ unsigned int len, u32 options) \
+{ \
+ PRINT_POS; \
+ append_cmd_ptr_extlen(desc, ptr, len, CMD_##op | SQIN_EXT | options); \
+}
+APPEND_CMD_PTR_EXTLEN(seq_in_ptr, SEQ_IN_PTR)
+APPEND_CMD_PTR_EXTLEN(seq_out_ptr, SEQ_OUT_PTR)
+
+/*
+ * Determine whether to store length internally or externally depending on
+ * the size of its type
+ */
+#define APPEND_CMD_PTR_LEN(cmd, op, type) \
+static inline void append_##cmd(u32 *desc, caam_dma_addr_t ptr, \
+ type len, u32 options) \
+{ \
+ PRINT_POS; \
+ if (sizeof(type) > sizeof(u16)) \
+ append_##cmd##_extlen(desc, ptr, len, options); \
+ else \
+ append_##cmd##_intlen(desc, ptr, len, options); \
+}
+APPEND_CMD_PTR_LEN(seq_in_ptr, SEQ_IN_PTR, u32)
+APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_PTR, u32)
+
+/*
+ * 2nd variant for commands whose specified immediate length differs
+ * from length of immediate data provided, e.g., split keys
+ */
+#define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
+static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
+ unsigned int data_len, \
+ unsigned int len, u32 options) \
+{ \
+ PRINT_POS; \
+ append_cmd(desc, CMD_##op | IMMEDIATE | len | options); \
+ append_data(desc, data, data_len); \
+}
+APPEND_CMD_PTR_TO_IMM2(key, KEY);
+
+#define APPEND_CMD_RAW_IMM(cmd, op, type) \
+static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \
+ u32 options) \
+{ \
+ PRINT_POS; \
+ append_cmd(desc, CMD_##op | IMMEDIATE | options | sizeof(type)); \
+ append_cmd(desc, immediate); \
+}
+APPEND_CMD_RAW_IMM(load, LOAD, u32);
diff --git a/roms/u-boot/drivers/crypto/fsl/error.c b/roms/u-boot/drivers/crypto/fsl/error.c
new file mode 100644
index 000000000..c76574919
--- /dev/null
+++ b/roms/u-boot/drivers/crypto/fsl/error.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * CAAM Error Reporting
+ *
+ * Copyright 2009-2014 Freescale Semiconductor, Inc.
+ *
+ * Derived from error.c file in linux drivers/crypto/caam
+ */
+
+#include <common.h>
+#include <log.h>
+#include <malloc.h>
+#include "desc.h"
+#include "jr.h"
+
+#define CAAM_ERROR_STR_MAX 302
+
+#define JRSTA_SSRC_SHIFT 28
+#define JRSTA_CCBERR_CHAID_MASK 0x00f0
+#define JRSTA_CCBERR_CHAID_SHIFT 4
+#define JRSTA_CCBERR_ERRID_MASK 0x000
+#define JRSTA_CCBERR_CHAID_RNG (0x05 << JRSTA_CCBERR_CHAID_SHIFT)
+
+#define JRSTA_DECOERR_JUMP 0x08000000
+#define JRSTA_DECOERR_INDEX_SHIFT 8
+#define JRSTA_DECOERR_INDEX_MASK 0xff00
+#define JRSTA_DECOERR_ERROR_MASK 0x00ff
+
+
+static const struct {
+ u8 value;
+ const char *error_text;
+} desc_error_list[] = {
+ { 0x00, "No error." },
+ { 0x01, "SGT Length Error. The descriptor is trying to read" \
+ " more data than is contained in the SGT table." },
+ { 0x02, "SGT Null Entry Error." },
+ { 0x03, "Job Ring Control Error. Bad value in Job Ring Control reg." },
+ { 0x04, "Invalid Descriptor Command." },
+ { 0x05, "Reserved." },
+ { 0x06, "Invalid KEY Command" },
+ { 0x07, "Invalid LOAD Command" },
+ { 0x08, "Invalid STORE Command" },
+ { 0x09, "Invalid OPERATION Command" },
+ { 0x0A, "Invalid FIFO LOAD Command" },
+ { 0x0B, "Invalid FIFO STORE Command" },
+ { 0x0C, "Invalid MOVE/MOVE_LEN Command" },
+ { 0x0D, "Invalid JUMP Command" },
+ { 0x0E, "Invalid MATH Command" },
+ { 0x0F, "Invalid SIGNATURE Command" },
+ { 0x10, "Invalid Sequence Command" },
+ { 0x11, "Skip data type invalid. The type must be 0xE or 0xF."},
+ { 0x12, "Shared Descriptor Header Error" },
+ { 0x13, "Header Error. Invalid length or parity, or other problems." },
+ { 0x14, "Burster Error. Burster has gotten to an illegal state" },
+ { 0x15, "Context Register Length Error" },
+ { 0x16, "DMA Error" },
+ { 0x17, "Reserved." },
+ { 0x1A, "Job failed due to JR reset" },
+ { 0x1B, "Job failed due to Fail Mode" },
+ { 0x1C, "DECO Watchdog timer timeout error" },
+ { 0x1D, "DECO tried to copy a key from another DECO but" \
+ " the other DECO's Key Registers were locked" },
+ { 0x1E, "DECO attempted to copy data from a DECO" \
+ "that had an unmasked Descriptor error" },
+ { 0x1F, "LIODN error" },
+ { 0x20, "DECO has completed a reset initiated via the DRR register" },
+ { 0x21, "Nonce error" },
+ { 0x22, "Meta data is too large (> 511 bytes) for TLS decap" },
+ { 0x23, "Read Input Frame error" },
+ { 0x24, "JDKEK, TDKEK or TDSK not loaded error" },
+ { 0x80, "DNR (do not run) error" },
+ { 0x81, "undefined protocol command" },
+ { 0x82, "invalid setting in PDB" },
+ { 0x83, "Anti-replay LATE error" },
+ { 0x84, "Anti-replay REPLAY error" },
+ { 0x85, "Sequence number overflow" },
+ { 0x86, "Sigver invalid signature" },
+ { 0x87, "DSA Sign Illegal test descriptor" },
+ { 0x88, "Protocol Format Error" },
+ { 0x89, "Protocol Size Error" },
+ { 0xC1, "Blob Command error: Undefined mode" },
+ { 0xC2, "Blob Command error: Secure Memory Blob mode error" },
+ { 0xC4, "Blob Command error: Black Blob key or input size error" },
+ { 0xC5, "Blob Command error: Invalid key destination" },
+ { 0xC8, "Blob Command error: Trusted/Secure mode error" },
+ { 0xF0, "IPsec TTL or hop limit field is 0, or was decremented to 0" },
+ { 0xF1, "3GPP HFN matches or exceeds the Threshold" },
+};
+
+static const char * const cha_id_list[] = {
+ "",
+ "AES",
+ "DES",
+ "ARC4",
+ "MDHA",
+ "RNG",
+ "SNOW f8",
+ "Kasumi f8/9",
+ "PKHA",
+ "CRCA",
+ "SNOW f9",
+ "ZUCE",
+ "ZUCA",
+};
+
+static const char * const err_id_list[] = {
+ "No error.",
+ "Mode error.",
+ "Data size error.",
+ "Key size error.",
+ "PKHA A memory size error.",
+ "PKHA B memory size error.",
+ "Data arrived out of sequence error.",
+ "PKHA divide-by-zero error.",
+ "PKHA modulus even error.",
+ "DES key parity error.",
+ "ICV check failed.",
+ "Hardware error.",
+ "Unsupported CCM AAD size.",
+ "Class 1 CHA is not reset",
+ "Invalid CHA combination was selected",
+ "Invalid CHA selected.",
+};
+
+static const char * const rng_err_id_list[] = {
+ "",
+ "",
+ "",
+ "Instantiate",
+ "Not instantiated",
+ "Test instantiate",
+ "Prediction resistance",
+ "Prediction resistance and test request",
+ "Uninstantiate",
+ "Secure key generation",
+};
+
+static void report_ccb_status(const u32 status,
+ const char *error)
+{
+ u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >>
+ JRSTA_CCBERR_CHAID_SHIFT;
+ u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
+ u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >>
+ JRSTA_DECOERR_INDEX_SHIFT;
+ char *idx_str;
+ const char *cha_str = "unidentified cha_id value 0x";
+ char cha_err_code[3] = { 0 };
+ const char *err_str = "unidentified err_id value 0x";
+ char err_err_code[3] = { 0 };
+
+ if (status & JRSTA_DECOERR_JUMP)
+ idx_str = "jump tgt desc idx";
+ else
+ idx_str = "desc idx";
+
+ if (cha_id < ARRAY_SIZE(cha_id_list))
+ cha_str = cha_id_list[cha_id];
+ else
+ snprintf(cha_err_code, sizeof(cha_err_code), "%02x", cha_id);
+
+ if ((cha_id << JRSTA_CCBERR_CHAID_SHIFT) == JRSTA_CCBERR_CHAID_RNG &&
+ err_id < ARRAY_SIZE(rng_err_id_list) &&
+ strlen(rng_err_id_list[err_id])) {
+ /* RNG-only error */
+ err_str = rng_err_id_list[err_id];
+ } else if (err_id < ARRAY_SIZE(err_id_list)) {
+ err_str = err_id_list[err_id];
+ } else {
+ snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
+ }
+
+ debug("%08x: %s: %s %d: %s%s: %s%s\n",
+ status, error, idx_str, idx,
+ cha_str, cha_err_code,
+ err_str, err_err_code);
+}
+
+static void report_jump_status(const u32 status,
+ const char *error)
+{
+ debug("%08x: %s: %s() not implemented\n",
+ status, error, __func__);
+}
+
+static void report_deco_status(const u32 status,
+ const char *error)
+{
+ u8 err_id = status & JRSTA_DECOERR_ERROR_MASK;
+ u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >>
+ JRSTA_DECOERR_INDEX_SHIFT;
+ char *idx_str;
+ const char *err_str = "unidentified error value 0x";
+ char err_err_code[3] = { 0 };
+ int i;
+
+ if (status & JRSTA_DECOERR_JUMP)
+ idx_str = "jump tgt desc idx";
+ else
+ idx_str = "desc idx";
+
+ for (i = 0; i < ARRAY_SIZE(desc_error_list); i++)
+ if (desc_error_list[i].value == err_id)
+ break;
+
+ if (i != ARRAY_SIZE(desc_error_list) && desc_error_list[i].error_text)
+ err_str = desc_error_list[i].error_text;
+ else
+ snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
+
+ debug("%08x: %s: %s %d: %s%s\n",
+ status, error, idx_str, idx, err_str, err_err_code);
+}
+
+static void report_jr_status(const u32 status,
+ const char *error)
+{
+ debug("%08x: %s: %s() not implemented\n",
+ status, error, __func__);
+}
+
+static void report_cond_code_status(const u32 status,
+ const char *error)
+{
+ debug("%08x: %s: %s() not implemented\n",
+ status, error, __func__);
+}
+
+void caam_jr_strstatus(u32 status)
+{
+ static const struct stat_src {
+ void (*report_ssed)(const u32 status,
+ const char *error);
+ const char *error;
+ } status_src[] = {
+ { NULL, "No error" },
+ { NULL, NULL },
+ { report_ccb_status, "CCB" },
+ { report_jump_status, "Jump" },
+ { report_deco_status, "DECO" },
+ { NULL, NULL },
+ { report_jr_status, "Job Ring" },
+ { report_cond_code_status, "Condition Code" },
+ };
+ u32 ssrc = status >> JRSTA_SSRC_SHIFT;
+ const char *error = status_src[ssrc].error;
+
+ /*
+ * If there is no further error handling function, just
+ * print the error code, error string and exit. Otherwise
+ * call the handler function.
+ */
+ if (!status_src[ssrc].report_ssed)
+ debug("%08x: %s:\n", status, status_src[ssrc].error);
+ else
+ status_src[ssrc].report_ssed(status, error);
+}
diff --git a/roms/u-boot/drivers/crypto/fsl/fsl_blob.c b/roms/u-boot/drivers/crypto/fsl/fsl_blob.c
new file mode 100644
index 000000000..e8202cc56
--- /dev/null
+++ b/roms/u-boot/drivers/crypto/fsl/fsl_blob.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2014 Freescale Semiconductor, Inc.
+ *
+ */
+
+#include <common.h>
+#include <cpu_func.h>
+#include <log.h>
+#include <malloc.h>
+#include <memalign.h>
+#include <fsl_sec.h>
+#include <asm/cache.h>
+#include <linux/errno.h>
+#include "jobdesc.h"
+#include "desc.h"
+#include "jr.h"
+
+/**
+ * blob_decap() - Decapsulate the data from a blob
+ * @key_mod: - Key modifier address
+ * @src: - Source address (blob)
+ * @dst: - Destination address (data)
+ * @len: - Size of decapsulated data
+ *
+ * Note: Start and end of the key_mod, src and dst buffers have to be aligned to
+ * the cache line size (ARCH_DMA_MINALIGN) for the CAAM operation to succeed.
+ *
+ * Returns zero on success, negative on error.
+ */
+int blob_decap(u8 *key_mod, u8 *src, u8 *dst, u32 len)
+{
+ int ret, size, i = 0;
+ u32 *desc;
+
+ if (!IS_ALIGNED((uintptr_t)key_mod, ARCH_DMA_MINALIGN) ||
+ !IS_ALIGNED((uintptr_t)src, ARCH_DMA_MINALIGN) ||
+ !IS_ALIGNED((uintptr_t)dst, ARCH_DMA_MINALIGN)) {
+ puts("Error: blob_decap: Address arguments are not aligned!\n");
+ return -EINVAL;
+ }
+
+ printf("\nDecapsulating blob to get data\n");
+ desc = malloc_cache_aligned(sizeof(int) * MAX_CAAM_DESCSIZE);
+ if (!desc) {
+ debug("Not enough memory for descriptor allocation\n");
+ return -ENOMEM;
+ }
+
+ size = ALIGN(16, ARCH_DMA_MINALIGN);
+ flush_dcache_range((unsigned long)key_mod,
+ (unsigned long)key_mod + size);
+
+ size = ALIGN(BLOB_SIZE(len), ARCH_DMA_MINALIGN);
+ flush_dcache_range((unsigned long)src,
+ (unsigned long)src + size);
+
+ inline_cnstr_jobdesc_blob_decap(desc, key_mod, src, dst, len);
+
+ debug("Descriptor dump:\n");
+ for (i = 0; i < 14; i++)
+ debug("Word[%d]: %08x\n", i, *(desc + i));
+
+ size = ALIGN(sizeof(int) * MAX_CAAM_DESCSIZE, ARCH_DMA_MINALIGN);
+ flush_dcache_range((unsigned long)desc,
+ (unsigned long)desc + size);
+
+ flush_dcache_range((unsigned long)dst,
+ (unsigned long)dst + size);
+
+ ret = run_descriptor_jr(desc);
+
+ if (ret) {
+ printf("Error in blob decapsulation: %d\n", ret);
+ } else {
+ size = ALIGN(len, ARCH_DMA_MINALIGN);
+ invalidate_dcache_range((unsigned long)dst,
+ (unsigned long)dst + size);
+
+ puts("Blob decapsulation successful.\n");
+ }
+
+ free(desc);
+ return ret;
+}
+
+/**
+ * blob_encap() - Encapsulate the data as a blob
+ * @key_mod: - Key modifier address
+ * @src: - Source address (data)
+ * @dst: - Destination address (blob)
+ * @len: - Size of data to be encapsulated
+ *
+ * Note: Start and end of the key_mod, src and dst buffers have to be aligned to
+ * the cache line size (ARCH_DMA_MINALIGN) for the CAAM operation to succeed.
+ *
+ * Returns zero on success, negative on error.
+ */
+int blob_encap(u8 *key_mod, u8 *src, u8 *dst, u32 len)
+{
+ int ret, size, i = 0;
+ u32 *desc;
+
+ if (!IS_ALIGNED((uintptr_t)key_mod, ARCH_DMA_MINALIGN) ||
+ !IS_ALIGNED((uintptr_t)src, ARCH_DMA_MINALIGN) ||
+ !IS_ALIGNED((uintptr_t)dst, ARCH_DMA_MINALIGN)) {
+ puts("Error: blob_encap: Address arguments are not aligned!\n");
+ return -EINVAL;
+ }
+
+ printf("\nEncapsulating data to form blob\n");
+ desc = malloc_cache_aligned(sizeof(int) * MAX_CAAM_DESCSIZE);
+ if (!desc) {
+ debug("Not enough memory for descriptor allocation\n");
+ return -ENOMEM;
+ }
+
+ size = ALIGN(16, ARCH_DMA_MINALIGN);
+ flush_dcache_range((unsigned long)key_mod,
+ (unsigned long)key_mod + size);
+
+ size = ALIGN(len, ARCH_DMA_MINALIGN);
+ flush_dcache_range((unsigned long)src,
+ (unsigned long)src + size);
+
+ inline_cnstr_jobdesc_blob_encap(desc, key_mod, src, dst, len);
+
+ debug("Descriptor dump:\n");
+ for (i = 0; i < 14; i++)
+ debug("Word[%d]: %08x\n", i, *(desc + i));
+
+ size = ALIGN(sizeof(int) * MAX_CAAM_DESCSIZE, ARCH_DMA_MINALIGN);
+ flush_dcache_range((unsigned long)desc,
+ (unsigned long)desc + size);
+
+ flush_dcache_range((unsigned long)dst,
+ (unsigned long)dst + size);
+
+ ret = run_descriptor_jr(desc);
+
+ if (ret) {
+ printf("Error in blob encapsulation: %d\n", ret);
+ } else {
+ size = ALIGN(BLOB_SIZE(len), ARCH_DMA_MINALIGN);
+ invalidate_dcache_range((unsigned long)dst,
+ (unsigned long)dst + size);
+
+ puts("Blob encapsulation successful.\n");
+ }
+
+ free(desc);
+ return ret;
+}
+
+#ifdef CONFIG_CMD_DEKBLOB
+int blob_dek(const u8 *src, u8 *dst, u8 len)
+{
+ int ret, size, i = 0;
+ u32 *desc;
+
+ int out_sz = WRP_HDR_SIZE + len + KEY_BLOB_SIZE + MAC_SIZE;
+
+ puts("\nEncapsulating provided DEK to form blob\n");
+ desc = memalign(ARCH_DMA_MINALIGN,
+ sizeof(uint32_t) * DEK_BLOB_DESCSIZE);
+ if (!desc) {
+ debug("Not enough memory for descriptor allocation\n");
+ return -ENOMEM;
+ }
+
+ ret = inline_cnstr_jobdesc_blob_dek(desc, src, dst, len);
+ if (ret) {
+ debug("Error in Job Descriptor Construction: %d\n", ret);
+ } else {
+ size = roundup(sizeof(uint32_t) * DEK_BLOB_DESCSIZE,
+ ARCH_DMA_MINALIGN);
+ flush_dcache_range((unsigned long)desc,
+ (unsigned long)desc + size);
+ size = roundup(sizeof(uint8_t) * out_sz, ARCH_DMA_MINALIGN);
+ flush_dcache_range((unsigned long)dst,
+ (unsigned long)dst + size);
+
+ ret = run_descriptor_jr(desc);
+ }
+
+ if (ret) {
+ debug("Error in Encapsulation %d\n", ret);
+ goto err;
+ }
+
+ size = roundup(out_sz, ARCH_DMA_MINALIGN);
+ invalidate_dcache_range((unsigned long)dst, (unsigned long)dst+size);
+
+ puts("DEK Blob\n");
+ for (i = 0; i < out_sz; i++)
+ printf("%02X", ((uint8_t *)dst)[i]);
+ printf("\n");
+
+err:
+ free(desc);
+ return ret;
+}
+#endif
diff --git a/roms/u-boot/drivers/crypto/fsl/fsl_hash.c b/roms/u-boot/drivers/crypto/fsl/fsl_hash.c
new file mode 100644
index 000000000..8b5c26db0
--- /dev/null
+++ b/roms/u-boot/drivers/crypto/fsl/fsl_hash.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2014 Freescale Semiconductor, Inc.
+ *
+ */
+
+#include <common.h>
+#include <cpu_func.h>
+#include <log.h>
+#include <malloc.h>
+#include <memalign.h>
+#include "jobdesc.h"
+#include "desc.h"
+#include "jr.h"
+#include "fsl_hash.h"
+#include <hw_sha.h>
+#include <asm/cache.h>
+#include <linux/errno.h>
+
+#define CRYPTO_MAX_ALG_NAME 80
+#define SHA1_DIGEST_SIZE 20
+#define SHA256_DIGEST_SIZE 32
+
+struct caam_hash_template {
+ char name[CRYPTO_MAX_ALG_NAME];
+ unsigned int digestsize;
+ u32 alg_type;
+};
+
+enum caam_hash_algos {
+ SHA1 = 0,
+ SHA256
+};
+
+static struct caam_hash_template driver_hash[] = {
+ {
+ .name = "sha1",
+ .digestsize = SHA1_DIGEST_SIZE,
+ .alg_type = OP_ALG_ALGSEL_SHA1,
+ },
+ {
+ .name = "sha256",
+ .digestsize = SHA256_DIGEST_SIZE,
+ .alg_type = OP_ALG_ALGSEL_SHA256,
+ },
+};
+
+static enum caam_hash_algos get_hash_type(struct hash_algo *algo)
+{
+ if (!strcmp(algo->name, driver_hash[SHA1].name))
+ return SHA1;
+ else
+ return SHA256;
+}
+
+/* Create the context for progressive hashing using h/w acceleration.
+ *
+ * @ctxp: Pointer to the pointer of the context for hashing
+ * @caam_algo: Enum for SHA1 or SHA256
+ * @return 0 if ok, -ENOMEM on error
+ */
+static int caam_hash_init(void **ctxp, enum caam_hash_algos caam_algo)
+{
+ *ctxp = calloc(1, sizeof(struct sha_ctx));
+ if (*ctxp == NULL) {
+ debug("Cannot allocate memory for context\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/*
+ * Update sg table for progressive hashing using h/w acceleration
+ *
+ * The context is freed by this function if an error occurs.
+ * We support at most 32 Scatter/Gather Entries.
+ *
+ * @hash_ctx: Pointer to the context for hashing
+ * @buf: Pointer to the buffer being hashed
+ * @size: Size of the buffer being hashed
+ * @is_last: 1 if this is the last update; 0 otherwise
+ * @caam_algo: Enum for SHA1 or SHA256
+ * @return 0 if ok, -EINVAL on error
+ */
+static int caam_hash_update(void *hash_ctx, const void *buf,
+ unsigned int size, int is_last,
+ enum caam_hash_algos caam_algo)
+{
+ uint32_t final;
+ caam_dma_addr_t addr = virt_to_phys((void *)buf);
+ struct sha_ctx *ctx = hash_ctx;
+
+ if (ctx->sg_num >= MAX_SG_32) {
+ free(ctx);
+ return -EINVAL;
+ }
+
+#ifdef CONFIG_CAAM_64BIT
+ sec_out32(&ctx->sg_tbl[ctx->sg_num].addr_hi, (uint32_t)(addr >> 32));
+#else
+ sec_out32(&ctx->sg_tbl[ctx->sg_num].addr_hi, 0x0);
+#endif
+ sec_out32(&ctx->sg_tbl[ctx->sg_num].addr_lo, (caam_dma_addr_t)addr);
+
+ sec_out32(&ctx->sg_tbl[ctx->sg_num].len_flag,
+ (size & SG_ENTRY_LENGTH_MASK));
+
+ ctx->sg_num++;
+
+ if (is_last) {
+ final = sec_in32(&ctx->sg_tbl[ctx->sg_num - 1].len_flag) |
+ SG_ENTRY_FINAL_BIT;
+ sec_out32(&ctx->sg_tbl[ctx->sg_num - 1].len_flag, final);
+ }
+
+ return 0;
+}
+
+/*
+ * Perform progressive hashing on the given buffer and copy hash at
+ * destination buffer
+ *
+ * The context is freed after completion of hash operation.
+ *
+ * @hash_ctx: Pointer to the context for hashing
+ * @dest_buf: Pointer to the destination buffer where hash is to be copied
+ * @size: Size of the buffer being hashed
+ * @caam_algo: Enum for SHA1 or SHA256
+ * @return 0 if ok, -EINVAL on error
+ */
+static int caam_hash_finish(void *hash_ctx, void *dest_buf,
+ int size, enum caam_hash_algos caam_algo)
+{
+ uint32_t len = 0;
+ struct sha_ctx *ctx = hash_ctx;
+ int i = 0, ret = 0;
+
+ if (size < driver_hash[caam_algo].digestsize) {
+ free(ctx);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ctx->sg_num; i++)
+ len += (sec_in32(&ctx->sg_tbl[i].len_flag) &
+ SG_ENTRY_LENGTH_MASK);
+
+ inline_cnstr_jobdesc_hash(ctx->sha_desc, (uint8_t *)ctx->sg_tbl, len,
+ ctx->hash,
+ driver_hash[caam_algo].alg_type,
+ driver_hash[caam_algo].digestsize,
+ 1);
+
+ ret = run_descriptor_jr(ctx->sha_desc);
+
+ if (ret)
+ debug("Error %x\n", ret);
+ else
+ memcpy(dest_buf, ctx->hash, sizeof(ctx->hash));
+
+ free(ctx);
+ return ret;
+}
+
+int caam_hash(const unsigned char *pbuf, unsigned int buf_len,
+ unsigned char *pout, enum caam_hash_algos algo)
+{
+ int ret = 0;
+ uint32_t *desc;
+ unsigned int size;
+
+ desc = malloc_cache_aligned(sizeof(int) * MAX_CAAM_DESCSIZE);
+ if (!desc) {
+ debug("Not enough memory for descriptor allocation\n");
+ return -ENOMEM;
+ }
+
+ if (!IS_ALIGNED((uintptr_t)pbuf, ARCH_DMA_MINALIGN) ||
+ !IS_ALIGNED((uintptr_t)pout, ARCH_DMA_MINALIGN)) {
+ puts("Error: Address arguments are not aligned\n");
+ return -EINVAL;
+ }
+
+ size = ALIGN(buf_len, ARCH_DMA_MINALIGN);
+ flush_dcache_range((unsigned long)pbuf, (unsigned long)pbuf + size);
+
+ inline_cnstr_jobdesc_hash(desc, pbuf, buf_len, pout,
+ driver_hash[algo].alg_type,
+ driver_hash[algo].digestsize,
+ 0);
+
+ size = ALIGN(sizeof(int) * MAX_CAAM_DESCSIZE, ARCH_DMA_MINALIGN);
+ flush_dcache_range((unsigned long)desc, (unsigned long)desc + size);
+
+ ret = run_descriptor_jr(desc);
+
+ size = ALIGN(driver_hash[algo].digestsize, ARCH_DMA_MINALIGN);
+ invalidate_dcache_range((unsigned long)pout,
+ (unsigned long)pout + size);
+
+ free(desc);
+ return ret;
+}
+
+void hw_sha256(const unsigned char *pbuf, unsigned int buf_len,
+ unsigned char *pout, unsigned int chunk_size)
+{
+ if (caam_hash(pbuf, buf_len, pout, SHA256))
+ printf("CAAM was not setup properly or it is faulty\n");
+}
+
+void hw_sha1(const unsigned char *pbuf, unsigned int buf_len,
+ unsigned char *pout, unsigned int chunk_size)
+{
+ if (caam_hash(pbuf, buf_len, pout, SHA1))
+ printf("CAAM was not setup properly or it is faulty\n");
+}
+
+int hw_sha_init(struct hash_algo *algo, void **ctxp)
+{
+ return caam_hash_init(ctxp, get_hash_type(algo));
+}
+
+int hw_sha_update(struct hash_algo *algo, void *ctx, const void *buf,
+ unsigned int size, int is_last)
+{
+ return caam_hash_update(ctx, buf, size, is_last, get_hash_type(algo));
+}
+
+int hw_sha_finish(struct hash_algo *algo, void *ctx, void *dest_buf,
+ int size)
+{
+ return caam_hash_finish(ctx, dest_buf, size, get_hash_type(algo));
+}
diff --git a/roms/u-boot/drivers/crypto/fsl/fsl_hash.h b/roms/u-boot/drivers/crypto/fsl/fsl_hash.h
new file mode 100644
index 000000000..82057c83c
--- /dev/null
+++ b/roms/u-boot/drivers/crypto/fsl/fsl_hash.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2014 Freescale Semiconductor, Inc.
+ *
+ */
+
+#ifndef _SHA_H
+#define _SHA_H
+
+#include <fsl_sec.h>
+#include <hash.h>
+#include "jr.h"
+
+/* We support at most 32 Scatter/Gather Entries.*/
+#define MAX_SG_32 32
+
+/*
+ * Hash context contains the following fields
+ * @sha_desc: Sha Descriptor
+ * @sg_num: number of entries in sg table
+ * @len: total length of buffer
+ * @sg_tbl: sg entry table
+ * @hash: index to the hash calculated
+ */
+struct sha_ctx {
+ uint32_t sha_desc[64];
+ uint32_t sg_num;
+ uint32_t len;
+ struct sg_entry sg_tbl[MAX_SG_32];
+ u8 hash[HASH_MAX_DIGEST_SIZE];
+};
+
+#endif
diff --git a/roms/u-boot/drivers/crypto/fsl/fsl_mfgprot.c b/roms/u-boot/drivers/crypto/fsl/fsl_mfgprot.c
new file mode 100644
index 000000000..29af79f57
--- /dev/null
+++ b/roms/u-boot/drivers/crypto/fsl/fsl_mfgprot.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2014-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ */
+
+#include <common.h>
+#include <errno.h>
+#include <fsl_sec.h>
+#include <memalign.h>
+#include "desc.h"
+#include "desc_constr.h"
+#include "jobdesc.h"
+#include "jr.h"
+
+/* Size of MFG descriptor */
+#define MFG_PUBK_DSC_WORDS 4
+#define MFG_SIGN_DSC_WORDS 8
+
+static void mfg_build_sign_dsc(u32 *dsc_ptr, const u8 *m, int size,
+ u8 *dgst, u8 *c, u8 *d)
+{
+ u32 *dsc = dsc_ptr;
+ struct pdb_mp_sign *pdb;
+
+ init_job_desc_pdb(dsc, 0, sizeof(struct pdb_mp_sign));
+
+ pdb = (struct pdb_mp_sign *)desc_pdb(dsc);
+
+ /* Curve */
+ pdb->pdb_hdr = (PDB_MP_CSEL_P256);
+
+ /* Message Pointer */
+ pdb_add_ptr(&pdb->dma_addr_msg, virt_to_phys((void *)m));
+
+ /* mes-resp Pointer */
+ pdb_add_ptr(&pdb->dma_addr_hash, virt_to_phys((void *)dgst));
+
+ /* C Pointer */
+ pdb_add_ptr(&pdb->dma_addr_c_sig, virt_to_phys((void *)c));
+
+ /* d Pointer */
+ pdb_add_ptr(&pdb->dma_addr_d_sig, virt_to_phys((void *)d));
+
+ /* Message Size */
+ pdb->img_size = size;
+
+ /* MP PubK generate key command */
+ append_cmd(dsc, (CMD_OPERATION | OP_TYPE_DECAP_PROTOCOL |
+ OP_PCLID_MP_SIGN));
+}
+
+static void mfg_build_pubk_dsc(u32 *dsc_ptr, u8 *dst)
+{
+ u32 *dsc = dsc_ptr;
+ struct pdb_mp_pub_k *pdb;
+
+ init_job_desc_pdb(dsc, 0, sizeof(struct pdb_mp_pub_k));
+
+ pdb = (struct pdb_mp_pub_k *)desc_pdb(dsc);
+
+ /* Curve */
+ pdb->pdb_hdr = (PDB_MP_CSEL_P256);
+
+ /* Message Pointer */
+ pdb_add_ptr(&pdb->dma_pkey, virt_to_phys((void *)dst));
+
+ /* MP Sign key command */
+ append_cmd(dsc, (CMD_OPERATION | OP_TYPE_DECAP_PROTOCOL |
+ OP_PCLID_MP_PUB_KEY));
+}
+
+int gen_mppubk(u8 *dst)
+{
+ int size, ret;
+ u32 *dsc;
+
+ /* Job Descriptor initialization */
+ dsc = memalign(ARCH_DMA_MINALIGN,
+ sizeof(uint32_t) * MFG_PUBK_DSC_WORDS);
+ if (!dsc) {
+ debug("Not enough memory for descriptor allocation\n");
+ return -ENOMEM;
+ }
+
+ mfg_build_pubk_dsc(dsc, dst);
+
+ size = roundup(sizeof(uint32_t) * MFG_PUBK_DSC_WORDS,
+ ARCH_DMA_MINALIGN);
+ flush_dcache_range((unsigned long)dsc, (unsigned long)dsc + size);
+
+ size = roundup(FSL_CAAM_MP_PUBK_BYTES, ARCH_DMA_MINALIGN);
+ flush_dcache_range((unsigned long)dst, (unsigned long)dst + size);
+
+ /* Execute Job Descriptor */
+ puts("\nGenerating Manufacturing Protection Public Key\n");
+
+ ret = run_descriptor_jr(dsc);
+ if (ret) {
+ debug("Error in public key generation %d\n", ret);
+ goto err;
+ }
+
+ size = roundup(FSL_CAAM_MP_PUBK_BYTES, ARCH_DMA_MINALIGN);
+ invalidate_dcache_range((unsigned long)dst, (unsigned long)dst + size);
+err:
+ free(dsc);
+ return ret;
+}
+
+int sign_mppubk(const u8 *m, int data_size, u8 *dgst, u8 *c, u8 *d)
+{
+ int size, ret;
+ u32 *dsc;
+
+ /* Job Descriptor initialization */
+ dsc = memalign(ARCH_DMA_MINALIGN,
+ sizeof(uint32_t) * MFG_SIGN_DSC_WORDS);
+ if (!dsc) {
+ debug("Not enough memory for descriptor allocation\n");
+ return -ENOMEM;
+ }
+
+ mfg_build_sign_dsc(dsc, m, data_size, dgst, c, d);
+
+ size = roundup(sizeof(uint32_t) * MFG_SIGN_DSC_WORDS,
+ ARCH_DMA_MINALIGN);
+ flush_dcache_range((unsigned long)dsc, (unsigned long)dsc + size);
+
+ size = roundup(data_size, ARCH_DMA_MINALIGN);
+ flush_dcache_range((unsigned long)m, (unsigned long)m + size);
+
+ size = roundup(FSL_CAAM_MP_MES_DGST_BYTES, ARCH_DMA_MINALIGN);
+ flush_dcache_range((unsigned long)dgst, (unsigned long)dgst + size);
+
+ size = roundup(FSL_CAAM_MP_PRVK_BYTES, ARCH_DMA_MINALIGN);
+ flush_dcache_range((unsigned long)c, (unsigned long)c + size);
+ flush_dcache_range((unsigned long)d, (unsigned long)d + size);
+
+ /* Execute Job Descriptor */
+ puts("\nSigning message with Manufacturing Protection Private Key\n");
+
+ ret = run_descriptor_jr(dsc);
+ if (ret) {
+ debug("Error in public key generation %d\n", ret);
+ goto err;
+ }
+
+ size = roundup(FSL_CAAM_MP_MES_DGST_BYTES, ARCH_DMA_MINALIGN);
+ invalidate_dcache_range((unsigned long)dgst,
+ (unsigned long)dgst + size);
+
+ size = roundup(FSL_CAAM_MP_PRVK_BYTES, ARCH_DMA_MINALIGN);
+ invalidate_dcache_range((unsigned long)c, (unsigned long)c + size);
+ invalidate_dcache_range((unsigned long)d, (unsigned long)d + size);
+
+err:
+ free(dsc);
+ return ret;
+}
diff --git a/roms/u-boot/drivers/crypto/fsl/fsl_rsa.c b/roms/u-boot/drivers/crypto/fsl/fsl_rsa.c
new file mode 100644
index 000000000..897ee855e
--- /dev/null
+++ b/roms/u-boot/drivers/crypto/fsl/fsl_rsa.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2014 Freescale Semiconductor, Inc.
+ * Author: Ruchika Gupta <ruchika.gupta@freescale.com>
+ */
+
+#include <config.h>
+#include <common.h>
+#include <dm.h>
+#include <log.h>
+#include <asm/types.h>
+#include <malloc.h>
+#include "jobdesc.h"
+#include "desc.h"
+#include "jr.h"
+#include "rsa_caam.h"
+#include <u-boot/rsa-mod-exp.h>
+
+int fsl_mod_exp(struct udevice *dev, const uint8_t *sig, uint32_t sig_len,
+ struct key_prop *prop, uint8_t *out)
+{
+ uint32_t keylen;
+ struct pk_in_params pkin;
+ uint32_t desc[MAX_CAAM_DESCSIZE];
+ int ret;
+
+ /* Length in bytes */
+ keylen = prop->num_bits / 8;
+
+ pkin.a = sig;
+ pkin.a_siz = sig_len;
+ pkin.n = prop->modulus;
+ pkin.n_siz = keylen;
+ pkin.e = prop->public_exponent;
+ pkin.e_siz = prop->exp_len;
+
+ inline_cnstr_jobdesc_pkha_rsaexp(desc, &pkin, out, sig_len);
+
+ ret = run_descriptor_jr(desc);
+ if (ret) {
+ debug("%s: RSA failed to verify: %d\n", __func__, ret);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static const struct mod_exp_ops fsl_mod_exp_ops = {
+ .mod_exp = fsl_mod_exp,
+};
+
+U_BOOT_DRIVER(fsl_rsa_mod_exp) = {
+ .name = "fsl_rsa_mod_exp",
+ .id = UCLASS_MOD_EXP,
+ .ops = &fsl_mod_exp_ops,
+};
+
+U_BOOT_DRVINFO(fsl_rsa) = {
+ .name = "fsl_rsa_mod_exp",
+};
diff --git a/roms/u-boot/drivers/crypto/fsl/jobdesc.c b/roms/u-boot/drivers/crypto/fsl/jobdesc.c
new file mode 100644
index 000000000..d23541553
--- /dev/null
+++ b/roms/u-boot/drivers/crypto/fsl/jobdesc.c
@@ -0,0 +1,336 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * SEC Descriptor Construction Library
+ * Basic job descriptor construction
+ *
+ * Copyright 2014 Freescale Semiconductor, Inc.
+ * Copyright 2018 NXP
+ *
+ */
+
+#include <common.h>
+#include <cpu_func.h>
+#include <fsl_sec.h>
+#include "desc_constr.h"
+#include "jobdesc.h"
+#include "rsa_caam.h"
+#include <asm/cache.h>
+
+#if defined(CONFIG_MX6) || defined(CONFIG_MX7) || defined(CONFIG_MX7ULP) || \
+ defined(CONFIG_IMX8M)
+/*!
+ * Secure memory run command
+ *
+ * @param sec_mem_cmd Secure memory command register
+ * @return cmd_status Secure memory command status register
+ */
+uint32_t secmem_set_cmd(uint32_t sec_mem_cmd)
+{
+ uint32_t temp_reg;
+
+ ccsr_sec_t *sec = (void *)CONFIG_SYS_FSL_SEC_ADDR;
+ uint32_t sm_vid = SM_VERSION(sec_in32(&sec->smvid));
+ uint32_t jr_id = 0;
+
+ sec_out32(CAAM_SMCJR(sm_vid, jr_id), sec_mem_cmd);
+
+ do {
+ temp_reg = sec_in32(CAAM_SMCSJR(sm_vid, jr_id));
+ } while (temp_reg & CMD_COMPLETE);
+
+ return temp_reg;
+}
+
+/*!
+ * CAAM page allocation:
+ * Allocates a partition from secure memory, with the id
+ * equal to partition_num. This will de-allocate the page
+ * if it is already allocated. The partition will have
+ * full access permissions. The permissions are set before,
+ * running a job descriptor. A memory page of secure RAM
+ * is allocated for the partition.
+ *
+ * @param page Number of the page to allocate.
+ * @param partition Number of the partition to allocate.
+ * @return 0 on success, ERROR_IN_PAGE_ALLOC otherwise
+ */
+int caam_page_alloc(uint8_t page_num, uint8_t partition_num)
+{
+ uint32_t temp_reg;
+
+ ccsr_sec_t *sec = (void *)CONFIG_SYS_FSL_SEC_ADDR;
+ uint32_t sm_vid = SM_VERSION(sec_in32(&sec->smvid));
+ uint32_t jr_id = 0;
+
+ /*
+ * De-Allocate partition_num if already allocated to ARM core
+ */
+ if (sec_in32(CAAM_SMPO_0) & PARTITION_OWNER(partition_num)) {
+ temp_reg = secmem_set_cmd(PARTITION(partition_num) |
+ CMD_PART_DEALLOC);
+ if (temp_reg & SMCSJR_AERR) {
+ printf("Error: De-allocation status 0x%X\n", temp_reg);
+ return ERROR_IN_PAGE_ALLOC;
+ }
+ }
+
+ /* set the access rights to allow full access */
+ sec_out32(CAAM_SMAG1JR(sm_vid, jr_id, partition_num), 0xF);
+ sec_out32(CAAM_SMAG2JR(sm_vid, jr_id, partition_num), 0xF);
+ sec_out32(CAAM_SMAPJR(sm_vid, jr_id, partition_num), 0xFF);
+
+ /* Now need to allocate partition_num of secure RAM. */
+ /* De-Allocate page_num by starting with a page inquiry command */
+ temp_reg = secmem_set_cmd(PAGE(page_num) | CMD_INQUIRY);
+
+ /* if the page is owned, de-allocate it */
+ if ((temp_reg & SMCSJR_PO) == PAGE_OWNED) {
+ temp_reg = secmem_set_cmd(PAGE(page_num) | CMD_PAGE_DEALLOC);
+ if (temp_reg & SMCSJR_AERR) {
+ printf("Error: Allocation status 0x%X\n", temp_reg);
+ return ERROR_IN_PAGE_ALLOC;
+ }
+ }
+
+ /* Allocate page_num to partition_num */
+ temp_reg = secmem_set_cmd(PAGE(page_num) | PARTITION(partition_num)
+ | CMD_PAGE_ALLOC);
+ if (temp_reg & SMCSJR_AERR) {
+ printf("Error: Allocation status 0x%X\n", temp_reg);
+ return ERROR_IN_PAGE_ALLOC;
+ }
+ /* page inquiry command to ensure that the page was allocated */
+ temp_reg = secmem_set_cmd(PAGE(page_num) | CMD_INQUIRY);
+
+ /* if the page is not owned => problem */
+ if ((temp_reg & SMCSJR_PO) != PAGE_OWNED) {
+ printf("Allocation of page %u in partition %u failed 0x%X\n",
+ page_num, partition_num, temp_reg);
+
+ return ERROR_IN_PAGE_ALLOC;
+ }
+
+ return 0;
+}
+
+int inline_cnstr_jobdesc_blob_dek(uint32_t *desc, const uint8_t *plain_txt,
+ uint8_t *dek_blob, uint32_t in_sz)
+{
+ ccsr_sec_t *sec = (void *)CONFIG_SYS_FSL_SEC_ADDR;
+ uint32_t sm_vid = SM_VERSION(sec_in32(&sec->smvid));
+ uint32_t jr_id = 0;
+
+ uint32_t ret = 0;
+ u32 aad_w1, aad_w2;
+ /* output blob will have 32 bytes key blob in beginning and
+ * 16 byte HMAC identifier at end of data blob */
+ uint32_t out_sz = in_sz + KEY_BLOB_SIZE + MAC_SIZE;
+ /* Setting HDR for blob */
+ uint8_t wrapped_key_hdr[8] = {HDR_TAG, 0x00, WRP_HDR_SIZE + out_sz,
+ HDR_PAR, HAB_MOD, HAB_ALG, in_sz, HAB_FLG};
+
+ /* initialize the blob array */
+ memset(dek_blob, 0, out_sz + 8);
+ /* Copy the header into the DEK blob buffer */
+ memcpy(dek_blob, wrapped_key_hdr, sizeof(wrapped_key_hdr));
+
+ /* allocating secure memory */
+ ret = caam_page_alloc(PAGE_1, PARTITION_1);
+ if (ret)
+ return ret;
+
+ /* Write DEK to secure memory */
+ memcpy((uint32_t *)SEC_MEM_PAGE1, (uint32_t *)plain_txt, in_sz);
+
+ unsigned long start = (unsigned long)SEC_MEM_PAGE1 &
+ ~(ARCH_DMA_MINALIGN - 1);
+ unsigned long end = ALIGN(start + 0x1000, ARCH_DMA_MINALIGN);
+ flush_dcache_range(start, end);
+
+ /* Now configure the access rights of the partition */
+ sec_out32(CAAM_SMAG1JR(sm_vid, jr_id, PARTITION_1), KS_G1);
+ sec_out32(CAAM_SMAG2JR(sm_vid, jr_id, PARTITION_1), 0);
+ sec_out32(CAAM_SMAPJR(sm_vid, jr_id, PARTITION_1), PERM);
+
+ /* construct aad for AES */
+ aad_w1 = (in_sz << OP_ALG_ALGSEL_SHIFT) | KEY_AES_SRC | LD_CCM_MODE;
+ aad_w2 = 0x0;
+
+ init_job_desc(desc, 0);
+
+ append_cmd(desc, CMD_LOAD | CLASS_2 | KEY_IMM | KEY_ENC |
+ (0x0c << LDST_OFFSET_SHIFT) | 0x08);
+
+ append_u32(desc, aad_w1);
+
+ append_u32(desc, aad_w2);
+
+ append_cmd_ptr(desc, (caam_dma_addr_t)SEC_MEM_PAGE1, in_sz, CMD_SEQ_IN_PTR);
+
+ append_cmd_ptr(desc, (caam_dma_addr_t)(ulong)(dek_blob + 8), out_sz, CMD_SEQ_OUT_PTR);
+
+ append_operation(desc, OP_TYPE_ENCAP_PROTOCOL | OP_PCLID_BLOB |
+ OP_PCLID_SECMEM);
+
+ return ret;
+}
+#endif
+
+void inline_cnstr_jobdesc_hash(uint32_t *desc,
+ const uint8_t *msg, uint32_t msgsz, uint8_t *digest,
+ u32 alg_type, uint32_t alg_size, int sg_tbl)
+{
+ /* SHA 256 , output is of length 32 words */
+ uint32_t storelen = alg_size;
+ u32 options;
+ caam_dma_addr_t dma_addr_in, dma_addr_out;
+
+ dma_addr_in = virt_to_phys((void *)msg);
+ dma_addr_out = virt_to_phys((void *)digest);
+
+ init_job_desc(desc, 0);
+ append_operation(desc, OP_TYPE_CLASS2_ALG |
+ OP_ALG_AAI_HASH | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT | OP_ALG_ICV_OFF | alg_type);
+
+ options = LDST_CLASS_2_CCB | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2;
+ if (sg_tbl)
+ options |= FIFOLDST_SGF;
+ if (msgsz > 0xffff) {
+ options |= FIFOLDST_EXT;
+ append_fifo_load(desc, dma_addr_in, 0, options);
+ append_cmd(desc, msgsz);
+ } else {
+ append_fifo_load(desc, dma_addr_in, msgsz, options);
+ }
+
+ append_store(desc, dma_addr_out, storelen,
+ LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_CONTEXT);
+}
+#ifndef CONFIG_SPL_BUILD
+void inline_cnstr_jobdesc_blob_encap(uint32_t *desc, uint8_t *key_idnfr,
+ uint8_t *plain_txt, uint8_t *enc_blob,
+ uint32_t in_sz)
+{
+ caam_dma_addr_t dma_addr_key_idnfr, dma_addr_in, dma_addr_out;
+ uint32_t key_sz = KEY_IDNFR_SZ_BYTES;
+ /* output blob will have 32 bytes key blob in beginning and
+ * 16 byte HMAC identifier at end of data blob */
+ uint32_t out_sz = in_sz + KEY_BLOB_SIZE + MAC_SIZE;
+
+ dma_addr_key_idnfr = virt_to_phys((void *)key_idnfr);
+ dma_addr_in = virt_to_phys((void *)plain_txt);
+ dma_addr_out = virt_to_phys((void *)enc_blob);
+
+ init_job_desc(desc, 0);
+
+ append_key(desc, dma_addr_key_idnfr, key_sz, CLASS_2);
+
+ append_seq_in_ptr(desc, dma_addr_in, in_sz, 0);
+
+ append_seq_out_ptr(desc, dma_addr_out, out_sz, 0);
+
+ append_operation(desc, OP_TYPE_ENCAP_PROTOCOL | OP_PCLID_BLOB);
+}
+
+void inline_cnstr_jobdesc_blob_decap(uint32_t *desc, uint8_t *key_idnfr,
+ uint8_t *enc_blob, uint8_t *plain_txt,
+ uint32_t out_sz)
+{
+ caam_dma_addr_t dma_addr_key_idnfr, dma_addr_in, dma_addr_out;
+ uint32_t key_sz = KEY_IDNFR_SZ_BYTES;
+ uint32_t in_sz = out_sz + KEY_BLOB_SIZE + MAC_SIZE;
+
+ dma_addr_key_idnfr = virt_to_phys((void *)key_idnfr);
+ dma_addr_in = virt_to_phys((void *)enc_blob);
+ dma_addr_out = virt_to_phys((void *)plain_txt);
+
+ init_job_desc(desc, 0);
+
+ append_key(desc, dma_addr_key_idnfr, key_sz, CLASS_2);
+
+ append_seq_in_ptr(desc, dma_addr_in, in_sz, 0);
+
+ append_seq_out_ptr(desc, dma_addr_out, out_sz, 0);
+
+ append_operation(desc, OP_TYPE_DECAP_PROTOCOL | OP_PCLID_BLOB);
+}
+#endif
+/*
+ * Descriptor to instantiate RNG State Handle 0 in normal mode and
+ * load the JDKEK, TDKEK and TDSK registers
+ */
+void inline_cnstr_jobdesc_rng_instantiation(u32 *desc, int handle, int do_sk)
+{
+ u32 *jump_cmd;
+
+ init_job_desc(desc, 0);
+
+ /* INIT RNG in non-test mode */
+ append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
+ (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT |
+ OP_ALG_PR_ON);
+
+ /* For SH0, Secure Keys must be generated as well */
+ if (!handle && do_sk) {
+ /* wait for done */
+ jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1);
+ set_jump_tgt_here(desc, jump_cmd);
+
+ /*
+ * load 1 to clear written reg:
+ * resets the done interrupt and returns the RNG to idle.
+ */
+ append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
+
+ /* generate secure keys (non-test) */
+ append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
+ OP_ALG_RNG4_SK);
+ }
+}
+
+/* Descriptor for deinstantiation of the RNG block. */
+void inline_cnstr_jobdesc_rng_deinstantiation(u32 *desc, int handle)
+{
+ init_job_desc(desc, 0);
+
+ append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
+ (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INITFINAL);
+}
+
+void inline_cnstr_jobdesc_rng(u32 *desc, void *data_out, u32 size)
+{
+ dma_addr_t dma_data_out = virt_to_phys(data_out);
+
+ init_job_desc(desc, 0);
+ append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG |
+ OP_ALG_PR_ON);
+ append_fifo_store(desc, dma_data_out, size, FIFOST_TYPE_RNGSTORE);
+}
+
+/* Change key size to bytes form bits in calling function*/
+void inline_cnstr_jobdesc_pkha_rsaexp(uint32_t *desc,
+ struct pk_in_params *pkin, uint8_t *out,
+ uint32_t out_siz)
+{
+ caam_dma_addr_t dma_addr_e, dma_addr_a, dma_addr_n, dma_addr_out;
+
+ dma_addr_e = virt_to_phys((void *)pkin->e);
+ dma_addr_a = virt_to_phys((void *)pkin->a);
+ dma_addr_n = virt_to_phys((void *)pkin->n);
+ dma_addr_out = virt_to_phys((void *)out);
+
+ init_job_desc(desc, 0);
+ append_key(desc, dma_addr_e, pkin->e_siz, KEY_DEST_PKHA_E | CLASS_1);
+
+ append_fifo_load(desc, dma_addr_a,
+ pkin->a_siz, LDST_CLASS_1_CCB | FIFOLD_TYPE_PK_A);
+
+ append_fifo_load(desc, dma_addr_n,
+ pkin->n_siz, LDST_CLASS_1_CCB | FIFOLD_TYPE_PK_N);
+
+ append_operation(desc, OP_TYPE_PK | OP_ALG_PK | OP_ALG_PKMODE_MOD_EXPO);
+
+ append_fifo_store(desc, dma_addr_out, out_siz,
+ LDST_CLASS_1_CCB | FIFOST_TYPE_PKHA_B);
+}
diff --git a/roms/u-boot/drivers/crypto/fsl/jobdesc.h b/roms/u-boot/drivers/crypto/fsl/jobdesc.h
new file mode 100644
index 000000000..c4501abd2
--- /dev/null
+++ b/roms/u-boot/drivers/crypto/fsl/jobdesc.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2014 Freescale Semiconductor, Inc.
+ *
+ */
+
+#ifndef __JOBDESC_H
+#define __JOBDESC_H
+
+#include <common.h>
+#include <asm/io.h>
+#include "rsa_caam.h"
+
+#define KEY_IDNFR_SZ_BYTES 16
+
+#ifdef CONFIG_CMD_DEKBLOB
+/* inline_cnstr_jobdesc_blob_dek:
+ * Intializes and constructs the job descriptor for DEK encapsulation
+ * using the given parameters.
+ * @desc: reference to the job descriptor
+ * @plain_txt: reference to the DEK
+ * @enc_blob: reference where to store the blob
+ * @in_sz: size in bytes of the DEK
+ * @return: 0 on success, ECONSTRJDESC otherwise
+ */
+int inline_cnstr_jobdesc_blob_dek(uint32_t *desc, const uint8_t *plain_txt,
+ uint8_t *enc_blob, uint32_t in_sz);
+#endif
+
+void inline_cnstr_jobdesc_hash(uint32_t *desc,
+ const uint8_t *msg, uint32_t msgsz, uint8_t *digest,
+ u32 alg_type, uint32_t alg_size, int sg_tbl);
+
+void inline_cnstr_jobdesc_blob_encap(uint32_t *desc, uint8_t *key_idnfr,
+ uint8_t *plain_txt, uint8_t *enc_blob,
+ uint32_t in_sz);
+
+void inline_cnstr_jobdesc_blob_decap(uint32_t *desc, uint8_t *key_idnfr,
+ uint8_t *enc_blob, uint8_t *plain_txt,
+ uint32_t out_sz);
+
+void inline_cnstr_jobdesc_rng_instantiation(u32 *desc, int handle, int do_sk);
+
+void inline_cnstr_jobdesc_rng_deinstantiation(u32 *desc, int handle);
+
+void inline_cnstr_jobdesc_rng(u32 *desc, void *data_out, u32 size);
+
+void inline_cnstr_jobdesc_pkha_rsaexp(uint32_t *desc,
+ struct pk_in_params *pkin, uint8_t *out,
+ uint32_t out_siz);
+
+#endif
diff --git a/roms/u-boot/drivers/crypto/fsl/jr.c b/roms/u-boot/drivers/crypto/fsl/jr.c
new file mode 100644
index 000000000..22b649219
--- /dev/null
+++ b/roms/u-boot/drivers/crypto/fsl/jr.c
@@ -0,0 +1,745 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2008-2014 Freescale Semiconductor, Inc.
+ * Copyright 2018 NXP
+ *
+ * Based on CAAM driver in drivers/crypto/caam in Linux
+ */
+
+#include <common.h>
+#include <cpu_func.h>
+#include <linux/kernel.h>
+#include <log.h>
+#include <malloc.h>
+#include "fsl_sec.h"
+#include "jr.h"
+#include "jobdesc.h"
+#include "desc_constr.h"
+#include <time.h>
+#include <asm/cache.h>
+#ifdef CONFIG_FSL_CORENET
+#include <asm/cache.h>
+#include <asm/fsl_pamu.h>
+#endif
+#include <dm/lists.h>
+#include <linux/delay.h>
+
+#define CIRC_CNT(head, tail, size) (((head) - (tail)) & (size - 1))
+#define CIRC_SPACE(head, tail, size) CIRC_CNT((tail), (head) + 1, (size))
+
+uint32_t sec_offset[CONFIG_SYS_FSL_MAX_NUM_OF_SEC] = {
+ 0,
+#if defined(CONFIG_ARCH_C29X)
+ CONFIG_SYS_FSL_SEC_IDX_OFFSET,
+ 2 * CONFIG_SYS_FSL_SEC_IDX_OFFSET
+#endif
+};
+
+#define SEC_ADDR(idx) \
+ (ulong)((CONFIG_SYS_FSL_SEC_ADDR + sec_offset[idx]))
+
+#define SEC_JR0_ADDR(idx) \
+ (ulong)(SEC_ADDR(idx) + \
+ (CONFIG_SYS_FSL_JR0_OFFSET - CONFIG_SYS_FSL_SEC_OFFSET))
+
+struct jobring jr0[CONFIG_SYS_FSL_MAX_NUM_OF_SEC];
+
+static inline void start_jr0(uint8_t sec_idx)
+{
+ ccsr_sec_t *sec = (void *)SEC_ADDR(sec_idx);
+ u32 ctpr_ms = sec_in32(&sec->ctpr_ms);
+ u32 scfgr = sec_in32(&sec->scfgr);
+
+ if (ctpr_ms & SEC_CTPR_MS_VIRT_EN_INCL) {
+ /* VIRT_EN_INCL = 1 & VIRT_EN_POR = 1 or
+ * VIRT_EN_INCL = 1 & VIRT_EN_POR = 0 & SEC_SCFGR_VIRT_EN = 1
+ */
+ if ((ctpr_ms & SEC_CTPR_MS_VIRT_EN_POR) ||
+ (scfgr & SEC_SCFGR_VIRT_EN))
+ sec_out32(&sec->jrstartr, CONFIG_JRSTARTR_JR0);
+ } else {
+ /* VIRT_EN_INCL = 0 && VIRT_EN_POR_VALUE = 1 */
+ if (ctpr_ms & SEC_CTPR_MS_VIRT_EN_POR)
+ sec_out32(&sec->jrstartr, CONFIG_JRSTARTR_JR0);
+ }
+}
+
+static inline void jr_reset_liodn(uint8_t sec_idx)
+{
+ ccsr_sec_t *sec = (void *)SEC_ADDR(sec_idx);
+ sec_out32(&sec->jrliodnr[0].ls, 0);
+}
+
+static inline void jr_disable_irq(uint8_t sec_idx)
+{
+ struct jr_regs *regs = (struct jr_regs *)SEC_JR0_ADDR(sec_idx);
+ uint32_t jrcfg = sec_in32(&regs->jrcfg1);
+
+ jrcfg = jrcfg | JR_INTMASK;
+
+ sec_out32(&regs->jrcfg1, jrcfg);
+}
+
+static void jr_initregs(uint8_t sec_idx)
+{
+ struct jr_regs *regs = (struct jr_regs *)SEC_JR0_ADDR(sec_idx);
+ struct jobring *jr = &jr0[sec_idx];
+ caam_dma_addr_t ip_base = virt_to_phys((void *)jr->input_ring);
+ caam_dma_addr_t op_base = virt_to_phys((void *)jr->output_ring);
+
+#ifdef CONFIG_CAAM_64BIT
+ sec_out32(&regs->irba_h, ip_base >> 32);
+#else
+ sec_out32(&regs->irba_h, 0x0);
+#endif
+ sec_out32(&regs->irba_l, (uint32_t)ip_base);
+#ifdef CONFIG_CAAM_64BIT
+ sec_out32(&regs->orba_h, op_base >> 32);
+#else
+ sec_out32(&regs->orba_h, 0x0);
+#endif
+ sec_out32(&regs->orba_l, (uint32_t)op_base);
+ sec_out32(&regs->ors, JR_SIZE);
+ sec_out32(&regs->irs, JR_SIZE);
+
+ if (!jr->irq)
+ jr_disable_irq(sec_idx);
+}
+
+static int jr_init(uint8_t sec_idx)
+{
+ struct jobring *jr = &jr0[sec_idx];
+
+ memset(jr, 0, sizeof(struct jobring));
+
+ jr->jq_id = DEFAULT_JR_ID;
+ jr->irq = DEFAULT_IRQ;
+
+#ifdef CONFIG_FSL_CORENET
+ jr->liodn = DEFAULT_JR_LIODN;
+#endif
+ jr->size = JR_SIZE;
+ jr->input_ring = (caam_dma_addr_t *)memalign(ARCH_DMA_MINALIGN,
+ JR_SIZE * sizeof(caam_dma_addr_t));
+ if (!jr->input_ring)
+ return -1;
+
+ jr->op_size = roundup(JR_SIZE * sizeof(struct op_ring),
+ ARCH_DMA_MINALIGN);
+ jr->output_ring =
+ (struct op_ring *)memalign(ARCH_DMA_MINALIGN, jr->op_size);
+ if (!jr->output_ring)
+ return -1;
+
+ memset(jr->input_ring, 0, JR_SIZE * sizeof(caam_dma_addr_t));
+ memset(jr->output_ring, 0, jr->op_size);
+
+ start_jr0(sec_idx);
+
+ jr_initregs(sec_idx);
+
+ return 0;
+}
+
+static int jr_sw_cleanup(uint8_t sec_idx)
+{
+ struct jobring *jr = &jr0[sec_idx];
+
+ jr->head = 0;
+ jr->tail = 0;
+ jr->read_idx = 0;
+ jr->write_idx = 0;
+ memset(jr->info, 0, sizeof(jr->info));
+ memset(jr->input_ring, 0, jr->size * sizeof(caam_dma_addr_t));
+ memset(jr->output_ring, 0, jr->size * sizeof(struct op_ring));
+
+ return 0;
+}
+
+static int jr_hw_reset(uint8_t sec_idx)
+{
+ struct jr_regs *regs = (struct jr_regs *)SEC_JR0_ADDR(sec_idx);
+ uint32_t timeout = 100000;
+ uint32_t jrint, jrcr;
+
+ sec_out32(&regs->jrcr, JRCR_RESET);
+ do {
+ jrint = sec_in32(&regs->jrint);
+ } while (((jrint & JRINT_ERR_HALT_MASK) ==
+ JRINT_ERR_HALT_INPROGRESS) && --timeout);
+
+ jrint = sec_in32(&regs->jrint);
+ if (((jrint & JRINT_ERR_HALT_MASK) !=
+ JRINT_ERR_HALT_INPROGRESS) && timeout == 0)
+ return -1;
+
+ timeout = 100000;
+ sec_out32(&regs->jrcr, JRCR_RESET);
+ do {
+ jrcr = sec_in32(&regs->jrcr);
+ } while ((jrcr & JRCR_RESET) && --timeout);
+
+ if (timeout == 0)
+ return -1;
+
+ return 0;
+}
+
+/* -1 --- error, can't enqueue -- no space available */
+static int jr_enqueue(uint32_t *desc_addr,
+ void (*callback)(uint32_t status, void *arg),
+ void *arg, uint8_t sec_idx)
+{
+ struct jr_regs *regs = (struct jr_regs *)SEC_JR0_ADDR(sec_idx);
+ struct jobring *jr = &jr0[sec_idx];
+ int head = jr->head;
+ uint32_t desc_word;
+ int length = desc_len(desc_addr);
+ int i;
+#ifdef CONFIG_CAAM_64BIT
+ uint32_t *addr_hi, *addr_lo;
+#endif
+
+ /* The descriptor must be submitted to SEC block as per endianness
+ * of the SEC Block.
+ * So, if the endianness of Core and SEC block is different, each word
+ * of the descriptor will be byte-swapped.
+ */
+ for (i = 0; i < length; i++) {
+ desc_word = desc_addr[i];
+ sec_out32((uint32_t *)&desc_addr[i], desc_word);
+ }
+
+ caam_dma_addr_t desc_phys_addr = virt_to_phys(desc_addr);
+
+ jr->info[head].desc_phys_addr = desc_phys_addr;
+ jr->info[head].callback = (void *)callback;
+ jr->info[head].arg = arg;
+ jr->info[head].op_done = 0;
+
+ unsigned long start = (unsigned long)&jr->info[head] &
+ ~(ARCH_DMA_MINALIGN - 1);
+ unsigned long end = ALIGN((unsigned long)&jr->info[head] +
+ sizeof(struct jr_info), ARCH_DMA_MINALIGN);
+ flush_dcache_range(start, end);
+
+#ifdef CONFIG_CAAM_64BIT
+ /* Write the 64 bit Descriptor address on Input Ring.
+ * The 32 bit hign and low part of the address will
+ * depend on endianness of SEC block.
+ */
+#ifdef CONFIG_SYS_FSL_SEC_LE
+ addr_lo = (uint32_t *)(&jr->input_ring[head]);
+ addr_hi = (uint32_t *)(&jr->input_ring[head]) + 1;
+#elif defined(CONFIG_SYS_FSL_SEC_BE)
+ addr_hi = (uint32_t *)(&jr->input_ring[head]);
+ addr_lo = (uint32_t *)(&jr->input_ring[head]) + 1;
+#endif /* ifdef CONFIG_SYS_FSL_SEC_LE */
+
+ sec_out32(addr_hi, (uint32_t)(desc_phys_addr >> 32));
+ sec_out32(addr_lo, (uint32_t)(desc_phys_addr));
+
+#else
+ /* Write the 32 bit Descriptor address on Input Ring. */
+ sec_out32(&jr->input_ring[head], desc_phys_addr);
+#endif /* ifdef CONFIG_CAAM_64BIT */
+
+ start = (unsigned long)&jr->input_ring[head] & ~(ARCH_DMA_MINALIGN - 1);
+ end = ALIGN((unsigned long)&jr->input_ring[head] +
+ sizeof(caam_dma_addr_t), ARCH_DMA_MINALIGN);
+ flush_dcache_range(start, end);
+
+ jr->head = (head + 1) & (jr->size - 1);
+
+ /* Invalidate output ring */
+ start = (unsigned long)jr->output_ring &
+ ~(ARCH_DMA_MINALIGN - 1);
+ end = ALIGN((unsigned long)jr->output_ring + jr->op_size,
+ ARCH_DMA_MINALIGN);
+ invalidate_dcache_range(start, end);
+
+ sec_out32(&regs->irja, 1);
+
+ return 0;
+}
+
+static int jr_dequeue(int sec_idx)
+{
+ struct jr_regs *regs = (struct jr_regs *)SEC_JR0_ADDR(sec_idx);
+ struct jobring *jr = &jr0[sec_idx];
+ int head = jr->head;
+ int tail = jr->tail;
+ int idx, i, found;
+ void (*callback)(uint32_t status, void *arg);
+ void *arg = NULL;
+#ifdef CONFIG_CAAM_64BIT
+ uint32_t *addr_hi, *addr_lo;
+#else
+ uint32_t *addr;
+#endif
+
+ while (sec_in32(&regs->orsf) && CIRC_CNT(jr->head, jr->tail,
+ jr->size)) {
+
+ found = 0;
+
+ caam_dma_addr_t op_desc;
+ #ifdef CONFIG_CAAM_64BIT
+ /* Read the 64 bit Descriptor address from Output Ring.
+ * The 32 bit hign and low part of the address will
+ * depend on endianness of SEC block.
+ */
+ #ifdef CONFIG_SYS_FSL_SEC_LE
+ addr_lo = (uint32_t *)(&jr->output_ring[jr->tail].desc);
+ addr_hi = (uint32_t *)(&jr->output_ring[jr->tail].desc) + 1;
+ #elif defined(CONFIG_SYS_FSL_SEC_BE)
+ addr_hi = (uint32_t *)(&jr->output_ring[jr->tail].desc);
+ addr_lo = (uint32_t *)(&jr->output_ring[jr->tail].desc) + 1;
+ #endif /* ifdef CONFIG_SYS_FSL_SEC_LE */
+
+ op_desc = ((u64)sec_in32(addr_hi) << 32) |
+ ((u64)sec_in32(addr_lo));
+
+ #else
+ /* Read the 32 bit Descriptor address from Output Ring. */
+ addr = (uint32_t *)&jr->output_ring[jr->tail].desc;
+ op_desc = sec_in32(addr);
+ #endif /* ifdef CONFIG_CAAM_64BIT */
+
+ uint32_t status = sec_in32(&jr->output_ring[jr->tail].status);
+
+ for (i = 0; CIRC_CNT(head, tail + i, jr->size) >= 1; i++) {
+ idx = (tail + i) & (jr->size - 1);
+ if (op_desc == jr->info[idx].desc_phys_addr) {
+ found = 1;
+ break;
+ }
+ }
+
+ /* Error condition if match not found */
+ if (!found)
+ return -1;
+
+ jr->info[idx].op_done = 1;
+ callback = (void *)jr->info[idx].callback;
+ arg = jr->info[idx].arg;
+
+ /* When the job on tail idx gets done, increment
+ * tail till the point where job completed out of oredr has
+ * been taken into account
+ */
+ if (idx == tail)
+ do {
+ tail = (tail + 1) & (jr->size - 1);
+ } while (jr->info[tail].op_done);
+
+ jr->tail = tail;
+ jr->read_idx = (jr->read_idx + 1) & (jr->size - 1);
+
+ sec_out32(&regs->orjr, 1);
+ jr->info[idx].op_done = 0;
+
+ callback(status, arg);
+ }
+
+ return 0;
+}
+
+static void desc_done(uint32_t status, void *arg)
+{
+ struct result *x = arg;
+ x->status = status;
+#ifndef CONFIG_SPL_BUILD
+ caam_jr_strstatus(status);
+#endif
+ x->done = 1;
+}
+
+static inline int run_descriptor_jr_idx(uint32_t *desc, uint8_t sec_idx)
+{
+ unsigned long long timeval = 0;
+ unsigned long long timeout = CONFIG_USEC_DEQ_TIMEOUT;
+ struct result op;
+ int ret = 0;
+
+ memset(&op, 0, sizeof(op));
+
+ ret = jr_enqueue(desc, desc_done, &op, sec_idx);
+ if (ret) {
+ debug("Error in SEC enq\n");
+ ret = JQ_ENQ_ERR;
+ goto out;
+ }
+
+ while (op.done != 1) {
+ udelay(1);
+ timeval += 1;
+
+ ret = jr_dequeue(sec_idx);
+ if (ret) {
+ debug("Error in SEC deq\n");
+ ret = JQ_DEQ_ERR;
+ goto out;
+ }
+
+ if (timeval > timeout) {
+ debug("SEC Dequeue timed out\n");
+ ret = JQ_DEQ_TO_ERR;
+ goto out;
+ }
+ }
+
+ if (op.status) {
+ debug("Error %x\n", op.status);
+ ret = op.status;
+ }
+out:
+ return ret;
+}
+
+int run_descriptor_jr(uint32_t *desc)
+{
+ return run_descriptor_jr_idx(desc, 0);
+}
+
+static inline int jr_reset_sec(uint8_t sec_idx)
+{
+ if (jr_hw_reset(sec_idx) < 0)
+ return -1;
+
+ /* Clean up the jobring structure maintained by software */
+ jr_sw_cleanup(sec_idx);
+
+ return 0;
+}
+
+int jr_reset(void)
+{
+ return jr_reset_sec(0);
+}
+
+static inline int sec_reset_idx(uint8_t sec_idx)
+{
+ ccsr_sec_t *sec = (void *)SEC_ADDR(sec_idx);
+ uint32_t mcfgr = sec_in32(&sec->mcfgr);
+ uint32_t timeout = 100000;
+
+ mcfgr |= MCFGR_SWRST;
+ sec_out32(&sec->mcfgr, mcfgr);
+
+ mcfgr |= MCFGR_DMA_RST;
+ sec_out32(&sec->mcfgr, mcfgr);
+ do {
+ mcfgr = sec_in32(&sec->mcfgr);
+ } while ((mcfgr & MCFGR_DMA_RST) == MCFGR_DMA_RST && --timeout);
+
+ if (timeout == 0)
+ return -1;
+
+ timeout = 100000;
+ do {
+ mcfgr = sec_in32(&sec->mcfgr);
+ } while ((mcfgr & MCFGR_SWRST) == MCFGR_SWRST && --timeout);
+
+ if (timeout == 0)
+ return -1;
+
+ return 0;
+}
+int sec_reset(void)
+{
+ return sec_reset_idx(0);
+}
+#ifndef CONFIG_SPL_BUILD
+static int deinstantiate_rng(u8 sec_idx, int state_handle_mask)
+{
+ u32 *desc;
+ int sh_idx, ret = 0;
+ int desc_size = ALIGN(sizeof(u32) * 2, ARCH_DMA_MINALIGN);
+
+ desc = memalign(ARCH_DMA_MINALIGN, desc_size);
+ if (!desc) {
+ debug("cannot allocate RNG init descriptor memory\n");
+ return -ENOMEM;
+ }
+
+ for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
+ /*
+ * If the corresponding bit is set, then it means the state
+ * handle was initialized by us, and thus it needs to be
+ * deinitialized as well
+ */
+
+ if (state_handle_mask & RDSTA_IF(sh_idx)) {
+ /*
+ * Create the descriptor for deinstantating this state
+ * handle.
+ */
+ inline_cnstr_jobdesc_rng_deinstantiation(desc, sh_idx);
+ flush_dcache_range((unsigned long)desc,
+ (unsigned long)desc + desc_size);
+
+ ret = run_descriptor_jr_idx(desc, sec_idx);
+ if (ret) {
+ printf("SEC%u: RNG4 SH%d deinstantiation failed with error 0x%x\n",
+ sec_idx, sh_idx, ret);
+ ret = -EIO;
+ break;
+ }
+
+ printf("SEC%u: Deinstantiated RNG4 SH%d\n",
+ sec_idx, sh_idx);
+ }
+ }
+
+ free(desc);
+ return ret;
+}
+
+static int instantiate_rng(u8 sec_idx, int gen_sk)
+{
+ u32 *desc;
+ u32 rdsta_val;
+ int ret = 0, sh_idx, size;
+ ccsr_sec_t __iomem *sec = (ccsr_sec_t __iomem *)SEC_ADDR(sec_idx);
+ struct rng4tst __iomem *rng =
+ (struct rng4tst __iomem *)&sec->rng;
+
+ desc = memalign(ARCH_DMA_MINALIGN, sizeof(uint32_t) * 6);
+ if (!desc) {
+ printf("cannot allocate RNG init descriptor memory\n");
+ return -1;
+ }
+
+ for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
+ /*
+ * If the corresponding bit is set, this state handle
+ * was initialized by somebody else, so it's left alone.
+ */
+ rdsta_val = sec_in32(&rng->rdsta);
+ if (rdsta_val & (RDSTA_IF(sh_idx))) {
+ if (rdsta_val & RDSTA_PR(sh_idx))
+ continue;
+
+ printf("SEC%u: RNG4 SH%d was instantiated w/o prediction resistance. Tearing it down\n",
+ sec_idx, sh_idx);
+
+ ret = deinstantiate_rng(sec_idx, RDSTA_IF(sh_idx));
+ if (ret)
+ break;
+ }
+
+ inline_cnstr_jobdesc_rng_instantiation(desc, sh_idx, gen_sk);
+ size = roundup(sizeof(uint32_t) * 6, ARCH_DMA_MINALIGN);
+ flush_dcache_range((unsigned long)desc,
+ (unsigned long)desc + size);
+
+ ret = run_descriptor_jr_idx(desc, sec_idx);
+
+ if (ret)
+ printf("SEC%u: RNG4 SH%d instantiation failed with error 0x%x\n",
+ sec_idx, sh_idx, ret);
+
+ rdsta_val = sec_in32(&rng->rdsta);
+ if (!(rdsta_val & RDSTA_IF(sh_idx))) {
+ free(desc);
+ return -1;
+ }
+
+ memset(desc, 0, sizeof(uint32_t) * 6);
+ }
+
+ free(desc);
+
+ return ret;
+}
+
+static u8 get_rng_vid(uint8_t sec_idx)
+{
+ ccsr_sec_t *sec = (void *)SEC_ADDR(sec_idx);
+ u8 vid;
+
+ if (caam_get_era() < 10) {
+ vid = (sec_in32(&sec->chavid_ls) & SEC_CHAVID_RNG_LS_MASK)
+ >> SEC_CHAVID_LS_RNG_SHIFT;
+ } else {
+ vid = (sec_in32(&sec->vreg.rng) & CHA_VER_VID_MASK)
+ >> CHA_VER_VID_SHIFT;
+ }
+
+ return vid;
+}
+
+/*
+ * By default, the TRNG runs for 200 clocks per sample;
+ * 1200 clocks per sample generates better entropy.
+ */
+static void kick_trng(int ent_delay, uint8_t sec_idx)
+{
+ ccsr_sec_t __iomem *sec = (ccsr_sec_t __iomem *)SEC_ADDR(sec_idx);
+ struct rng4tst __iomem *rng =
+ (struct rng4tst __iomem *)&sec->rng;
+ u32 val;
+
+ /* put RNG4 into program mode */
+ sec_setbits32(&rng->rtmctl, RTMCTL_PRGM);
+ /* rtsdctl bits 0-15 contain "Entropy Delay, which defines the
+ * length (in system clocks) of each Entropy sample taken
+ * */
+ val = sec_in32(&rng->rtsdctl);
+ val = (val & ~RTSDCTL_ENT_DLY_MASK) |
+ (ent_delay << RTSDCTL_ENT_DLY_SHIFT);
+ sec_out32(&rng->rtsdctl, val);
+ /* min. freq. count, equal to 1/4 of the entropy sample length */
+ sec_out32(&rng->rtfreqmin, ent_delay >> 2);
+ /* disable maximum frequency count */
+ sec_out32(&rng->rtfreqmax, RTFRQMAX_DISABLE);
+ /*
+ * select raw sampling in both entropy shifter
+ * and statistical checker
+ */
+ sec_setbits32(&rng->rtmctl, RTMCTL_SAMP_MODE_RAW_ES_SC);
+ /* put RNG4 into run mode */
+ sec_clrbits32(&rng->rtmctl, RTMCTL_PRGM);
+}
+
+static int rng_init(uint8_t sec_idx)
+{
+ int ret, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
+ ccsr_sec_t __iomem *sec = (ccsr_sec_t __iomem *)SEC_ADDR(sec_idx);
+ struct rng4tst __iomem *rng =
+ (struct rng4tst __iomem *)&sec->rng;
+ u32 inst_handles;
+
+ gen_sk = !(sec_in32(&rng->rdsta) & RDSTA_SKVN);
+ do {
+ inst_handles = sec_in32(&rng->rdsta) & RDSTA_MASK;
+
+ /*
+ * If either of the SH's were instantiated by somebody else
+ * then it is assumed that the entropy
+ * parameters are properly set and thus the function
+ * setting these (kick_trng(...)) is skipped.
+ * Also, if a handle was instantiated, do not change
+ * the TRNG parameters.
+ */
+ if (!inst_handles) {
+ kick_trng(ent_delay, sec_idx);
+ ent_delay += 400;
+ }
+ /*
+ * if instantiate_rng(...) fails, the loop will rerun
+ * and the kick_trng(...) function will modfiy the
+ * upper and lower limits of the entropy sampling
+ * interval, leading to a sucessful initialization of
+ * the RNG.
+ */
+ ret = instantiate_rng(sec_idx, gen_sk);
+ } while ((ret == -1) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
+ if (ret) {
+ printf("SEC%u: Failed to instantiate RNG\n", sec_idx);
+ return ret;
+ }
+
+ /* Enable RDB bit so that RNG works faster */
+ sec_setbits32(&sec->scfgr, SEC_SCFGR_RDBENABLE);
+
+ return ret;
+}
+#endif
+int sec_init_idx(uint8_t sec_idx)
+{
+ ccsr_sec_t *sec = (void *)SEC_ADDR(sec_idx);
+ uint32_t mcr = sec_in32(&sec->mcfgr);
+ int ret = 0;
+
+#ifdef CONFIG_FSL_CORENET
+ uint32_t liodnr;
+ uint32_t liodn_ns;
+ uint32_t liodn_s;
+#endif
+
+ if (!(sec_idx < CONFIG_SYS_FSL_MAX_NUM_OF_SEC)) {
+ printf("SEC%u: initialization failed\n", sec_idx);
+ return -1;
+ }
+
+ /*
+ * Modifying CAAM Read/Write Attributes
+ * For LS2080A
+ * For AXI Write - Cacheable, Write Back, Write allocate
+ * For AXI Read - Cacheable, Read allocate
+ * Only For LS2080a, to solve CAAM coherency issues
+ */
+#ifdef CONFIG_ARCH_LS2080A
+ mcr = (mcr & ~MCFGR_AWCACHE_MASK) | (0xb << MCFGR_AWCACHE_SHIFT);
+ mcr = (mcr & ~MCFGR_ARCACHE_MASK) | (0x6 << MCFGR_ARCACHE_SHIFT);
+#else
+ mcr = (mcr & ~MCFGR_AWCACHE_MASK) | (0x2 << MCFGR_AWCACHE_SHIFT);
+#endif
+
+#ifdef CONFIG_CAAM_64BIT
+ mcr |= (1 << MCFGR_PS_SHIFT);
+#endif
+ sec_out32(&sec->mcfgr, mcr);
+
+#ifdef CONFIG_FSL_CORENET
+#ifdef CONFIG_SPL_BUILD
+ /*
+ * For SPL Build, Set the Liodns in SEC JR0 for
+ * creating PAMU entries corresponding to these.
+ * For normal build, these are set in set_liodns().
+ */
+ liodn_ns = CONFIG_SPL_JR0_LIODN_NS & JRNSLIODN_MASK;
+ liodn_s = CONFIG_SPL_JR0_LIODN_S & JRSLIODN_MASK;
+
+ liodnr = sec_in32(&sec->jrliodnr[0].ls) &
+ ~(JRNSLIODN_MASK | JRSLIODN_MASK);
+ liodnr = liodnr |
+ (liodn_ns << JRNSLIODN_SHIFT) |
+ (liodn_s << JRSLIODN_SHIFT);
+ sec_out32(&sec->jrliodnr[0].ls, liodnr);
+#else
+ liodnr = sec_in32(&sec->jrliodnr[0].ls);
+ liodn_ns = (liodnr & JRNSLIODN_MASK) >> JRNSLIODN_SHIFT;
+ liodn_s = (liodnr & JRSLIODN_MASK) >> JRSLIODN_SHIFT;
+#endif
+#endif
+
+ ret = jr_init(sec_idx);
+ if (ret < 0) {
+ printf("SEC%u: initialization failed\n", sec_idx);
+ return -1;
+ }
+
+#ifdef CONFIG_FSL_CORENET
+ ret = sec_config_pamu_table(liodn_ns, liodn_s);
+ if (ret < 0)
+ return -1;
+
+ pamu_enable();
+#endif
+#ifndef CONFIG_SPL_BUILD
+ if (get_rng_vid(sec_idx) >= 4) {
+ if (rng_init(sec_idx) < 0) {
+ printf("SEC%u: RNG instantiation failed\n", sec_idx);
+ return -1;
+ }
+
+ if (IS_ENABLED(CONFIG_DM_RNG)) {
+ ret = device_bind_driver(NULL, "caam-rng", "caam-rng",
+ NULL);
+ if (ret)
+ printf("Couldn't bind rng driver (%d)\n", ret);
+ }
+
+ printf("SEC%u: RNG instantiated\n", sec_idx);
+ }
+#endif
+ return ret;
+}
+
+int sec_init(void)
+{
+ return sec_init_idx(0);
+}
diff --git a/roms/u-boot/drivers/crypto/fsl/jr.h b/roms/u-boot/drivers/crypto/fsl/jr.h
new file mode 100644
index 000000000..1047aa772
--- /dev/null
+++ b/roms/u-boot/drivers/crypto/fsl/jr.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2008-2014 Freescale Semiconductor, Inc.
+ *
+ */
+
+#ifndef __JR_H
+#define __JR_H
+
+#include <linux/compiler.h>
+#include "type.h"
+
+#define JR_SIZE 4
+/* Timeout currently defined as 10 sec */
+#define CONFIG_USEC_DEQ_TIMEOUT 10000000U
+
+#define DEFAULT_JR_ID 0
+#define DEFAULT_JR_LIODN 0
+#define DEFAULT_IRQ 0 /* Interrupts not to be configured */
+
+#define MCFGR_SWRST ((uint32_t)(1)<<31) /* Software Reset */
+#define MCFGR_DMA_RST ((uint32_t)(1)<<28) /* DMA Reset */
+#define MCFGR_PS_SHIFT 16
+#define MCFGR_AWCACHE_SHIFT 8
+#define MCFGR_AWCACHE_MASK (0xf << MCFGR_AWCACHE_SHIFT)
+#define MCFGR_ARCACHE_SHIFT 12
+#define MCFGR_ARCACHE_MASK (0xf << MCFGR_ARCACHE_SHIFT)
+
+#define JR_INTMASK 0x00000001
+#define JRCR_RESET 0x01
+#define JRINT_ERR_HALT_INPROGRESS 0x4
+#define JRINT_ERR_HALT_MASK 0xc
+#define JRNSLIODN_SHIFT 16
+#define JRNSLIODN_MASK 0x0fff0000
+#define JRSLIODN_SHIFT 0
+#define JRSLIODN_MASK 0x00000fff
+
+#define JQ_DEQ_ERR -1
+#define JQ_DEQ_TO_ERR -2
+#define JQ_ENQ_ERR -3
+
+#define RNG4_MAX_HANDLES 2
+
+struct op_ring {
+ caam_dma_addr_t desc;
+ uint32_t status;
+} __packed;
+
+struct jr_info {
+ void (*callback)(uint32_t status, void *arg);
+ caam_dma_addr_t desc_phys_addr;
+ uint32_t desc_len;
+ uint32_t op_done;
+ void *arg;
+};
+
+struct jobring {
+ int jq_id;
+ int irq;
+ int liodn;
+ /* Head is the index where software would enq the descriptor in
+ * the i/p ring
+ */
+ int head;
+ /* Tail index would be used by s/w ehile enqueuing to determine if
+ * there is any space left in the s/w maintained i/p rings
+ */
+ /* Also in case of deq tail will be incremented only in case of
+ * in-order job completion
+ */
+ int tail;
+ /* Read index of the output ring. It may not match with tail in case
+ * of out of order completetion
+ */
+ int read_idx;
+ /* Write index to input ring. Would be always equal to head */
+ int write_idx;
+ /* Size of the rings. */
+ int size;
+ /* Op ring size aligned to cache line size */
+ int op_size;
+ /* The ip and output rings have to be accessed by SEC. So the
+ * pointers will ahve to point to the housekeeping region provided
+ * by SEC
+ */
+ /*Circular Ring of i/p descriptors */
+ caam_dma_addr_t *input_ring;
+ /* Circular Ring of o/p descriptors */
+ /* Circula Ring containing info regarding descriptors in i/p
+ * and o/p ring
+ */
+ /* This ring can be on the stack */
+ struct jr_info info[JR_SIZE];
+ struct op_ring *output_ring;
+ /* Offset in CCSR to the SEC engine to which this JR belongs */
+ uint32_t sec_offset;
+
+};
+
+struct result {
+ int done;
+ uint32_t status;
+};
+
+void caam_jr_strstatus(u32 status);
+int run_descriptor_jr(uint32_t *desc);
+
+#endif
diff --git a/roms/u-boot/drivers/crypto/fsl/rng.c b/roms/u-boot/drivers/crypto/fsl/rng.c
new file mode 100644
index 000000000..063649480
--- /dev/null
+++ b/roms/u-boot/drivers/crypto/fsl/rng.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2020 Michael Walle <michael@walle.cc>
+ *
+ * Driver for Freescale Cryptographic Accelerator and Assurance
+ * Module (CAAM) hardware random number generator.
+ */
+
+#include <asm/cache.h>
+#include <common.h>
+#include <cpu_func.h>
+#include <dm.h>
+#include <rng.h>
+#include <linux/kernel.h>
+#include "desc_constr.h"
+#include "jobdesc.h"
+#include "jr.h"
+
+#define CAAM_RNG_MAX_FIFO_STORE_SIZE 16
+#define CAAM_RNG_DESC_LEN (3 * CAAM_CMD_SZ + CAAM_PTR_SZ)
+
+struct caam_rng_priv {
+ u32 desc[CAAM_RNG_DESC_LEN / 4];
+ u8 data[CAAM_RNG_MAX_FIFO_STORE_SIZE] __aligned(ARCH_DMA_MINALIGN);
+};
+
+static int caam_rng_read_one(struct caam_rng_priv *priv)
+{
+ int size = ALIGN(CAAM_RNG_MAX_FIFO_STORE_SIZE, ARCH_DMA_MINALIGN);
+ int ret;
+
+ ret = run_descriptor_jr(priv->desc);
+ if (ret < 0)
+ return -EIO;
+
+ invalidate_dcache_range((unsigned long)priv->data,
+ (unsigned long)priv->data + size);
+
+ return 0;
+}
+
+static int caam_rng_read(struct udevice *dev, void *data, size_t len)
+{
+ struct caam_rng_priv *priv = dev_get_priv(dev);
+ u8 *buffer = data;
+ size_t size;
+ int ret;
+
+ while (len) {
+ ret = caam_rng_read_one(priv);
+ if (ret)
+ return ret;
+
+ size = min(len, (size_t)CAAM_RNG_MAX_FIFO_STORE_SIZE);
+
+ memcpy(buffer, priv->data, size);
+ buffer += size;
+ len -= size;
+ }
+
+ return 0;
+}
+
+static int caam_rng_probe(struct udevice *dev)
+{
+ struct caam_rng_priv *priv = dev_get_priv(dev);
+ ulong size = ALIGN(CAAM_RNG_DESC_LEN, ARCH_DMA_MINALIGN);
+
+ inline_cnstr_jobdesc_rng(priv->desc, priv->data,
+ CAAM_RNG_MAX_FIFO_STORE_SIZE);
+ flush_dcache_range((unsigned long)priv->desc,
+ (unsigned long)priv->desc + size);
+
+ return 0;
+}
+
+static const struct dm_rng_ops caam_rng_ops = {
+ .read = caam_rng_read,
+};
+
+U_BOOT_DRIVER(caam_rng) = {
+ .name = "caam-rng",
+ .id = UCLASS_RNG,
+ .ops = &caam_rng_ops,
+ .probe = caam_rng_probe,
+ .priv_auto = sizeof(struct caam_rng_priv),
+ .flags = DM_FLAG_ALLOC_PRIV_DMA,
+};
diff --git a/roms/u-boot/drivers/crypto/fsl/rsa_caam.h b/roms/u-boot/drivers/crypto/fsl/rsa_caam.h
new file mode 100644
index 000000000..9a6a8afa4
--- /dev/null
+++ b/roms/u-boot/drivers/crypto/fsl/rsa_caam.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2014 Freescale Semiconductor, Inc.
+ */
+
+#ifndef __RSA_CAAM_H
+#define __RSA_CAAM_H
+
+#include <common.h>
+
+/**
+ * struct pk_in_params - holder for input to PKHA block in CAAM
+ * These parameters are required to perform Modular Exponentiation
+ * using PKHA Block in CAAM
+ */
+struct pk_in_params {
+ const uint8_t *e; /* public exponent as byte array */
+ uint32_t e_siz; /* size of e[] in number of bytes */
+ const uint8_t *n; /* modulus as byte array */
+ uint32_t n_siz; /* size of n[] in number of bytes */
+ const uint8_t *a; /* Signature as byte array */
+ uint32_t a_siz; /* size of a[] in number of bytes */
+ uint8_t *b; /* Result exp. modulus in number of bytes */
+ uint32_t b_siz; /* size of b[] in number of bytes */
+};
+
+#endif
diff --git a/roms/u-boot/drivers/crypto/fsl/sec.c b/roms/u-boot/drivers/crypto/fsl/sec.c
new file mode 100644
index 000000000..f0a4a63d8
--- /dev/null
+++ b/roms/u-boot/drivers/crypto/fsl/sec.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2014 Freescale Semiconductor, Inc.
+ */
+
+#include <common.h>
+#include <linux/libfdt.h>
+#include <fdt_support.h>
+#if CONFIG_SYS_FSL_SEC_COMPAT == 2 || CONFIG_SYS_FSL_SEC_COMPAT >= 4
+#include <fsl_sec.h>
+#endif
+
+/*
+ * update crypto node properties to a specified revision of the SEC
+ * called with sec_rev == 0 if not on an E processor
+ */
+#if CONFIG_SYS_FSL_SEC_COMPAT == 2 /* SEC 2.x/3.x */
+void fdt_fixup_crypto_node(void *blob, int sec_rev)
+{
+ static const struct sec_rev_prop {
+ u32 sec_rev;
+ u32 num_channels;
+ u32 channel_fifo_len;
+ u32 exec_units_mask;
+ u32 descriptor_types_mask;
+ } sec_rev_prop_list[] = {
+ { 0x0200, 4, 24, 0x07e, 0x01010ebf }, /* SEC 2.0 */
+ { 0x0201, 4, 24, 0x0fe, 0x012b0ebf }, /* SEC 2.1 */
+ { 0x0202, 1, 24, 0x04c, 0x0122003f }, /* SEC 2.2 */
+ { 0x0204, 4, 24, 0x07e, 0x012b0ebf }, /* SEC 2.4 */
+ { 0x0300, 4, 24, 0x9fe, 0x03ab0ebf }, /* SEC 3.0 */
+ { 0x0301, 4, 24, 0xbfe, 0x03ab0ebf }, /* SEC 3.1 */
+ { 0x0303, 4, 24, 0x97c, 0x03a30abf }, /* SEC 3.3 */
+ };
+ static char compat_strlist[ARRAY_SIZE(sec_rev_prop_list) *
+ sizeof("fsl,secX.Y")];
+ int crypto_node, sec_idx, err;
+ char *p;
+ u32 val;
+
+ /* locate crypto node based on lowest common compatible */
+ crypto_node = fdt_node_offset_by_compatible(blob, -1, "fsl,sec2.0");
+ if (crypto_node == -FDT_ERR_NOTFOUND)
+ return;
+
+ /* delete it if not on an E-processor */
+ if (crypto_node > 0 && !sec_rev) {
+ fdt_del_node(blob, crypto_node);
+ return;
+ }
+
+ /* else we got called for possible uprev */
+ for (sec_idx = 0; sec_idx < ARRAY_SIZE(sec_rev_prop_list); sec_idx++)
+ if (sec_rev_prop_list[sec_idx].sec_rev == sec_rev)
+ break;
+
+ if (sec_idx == ARRAY_SIZE(sec_rev_prop_list)) {
+ puts("warning: unknown SEC revision number\n");
+ return;
+ }
+
+ err = fdt_setprop_u32(blob, crypto_node, "fsl,num-channels",
+ sec_rev_prop_list[sec_idx].num_channels);
+ if (err < 0)
+ printf("WARNING: could not set crypto property: %s\n",
+ fdt_strerror(err));
+
+ err = fdt_setprop_u32(blob, crypto_node, "fsl,descriptor-types-mask",
+ sec_rev_prop_list[sec_idx].descriptor_types_mask);
+ if (err < 0)
+ printf("WARNING: could not set crypto property: %s\n",
+ fdt_strerror(err));
+
+ err = fdt_setprop_u32(blob, crypto_node, "fsl,exec-units-mask",
+ sec_rev_prop_list[sec_idx].exec_units_mask);
+ if (err < 0)
+ printf("WARNING: could not set crypto property: %s\n",
+ fdt_strerror(err));
+
+ err = fdt_setprop_u32(blob, crypto_node, "fsl,channel-fifo-len",
+ sec_rev_prop_list[sec_idx].channel_fifo_len);
+ if (err < 0)
+ printf("WARNING: could not set crypto property: %s\n",
+ fdt_strerror(err));
+
+ val = 0;
+ while (sec_idx >= 0) {
+ p = compat_strlist + val;
+ val += sprintf(p, "fsl,sec%d.%d",
+ (sec_rev_prop_list[sec_idx].sec_rev & 0xff00) >> 8,
+ sec_rev_prop_list[sec_idx].sec_rev & 0x00ff) + 1;
+ sec_idx--;
+ }
+ err = fdt_setprop(blob, crypto_node, "compatible", &compat_strlist,
+ val);
+ if (err < 0)
+ printf("WARNING: could not set crypto property: %s\n",
+ fdt_strerror(err));
+}
+#elif CONFIG_SYS_FSL_SEC_COMPAT >= 4 /* SEC4 */
+/**
+ * caam_get_era() - fetch the CAAM's era
+ *
+ * The SEC module povides an "Era" which can be used to differentiate
+ * between different revisions.
+ *
+ * Return: era of the SEC.
+ */
+u8 caam_get_era(void)
+{
+ static const struct {
+ u16 ip_id;
+ u8 maj_rev;
+ u8 era;
+ } caam_eras[] = {
+ {0x0A10, 1, 1},
+ {0x0A10, 2, 2},
+ {0x0A12, 1, 3},
+ {0x0A14, 1, 3},
+ {0x0A14, 2, 4},
+ {0x0A16, 1, 4},
+ {0x0A10, 3, 4},
+ {0x0A11, 1, 4},
+ {0x0A18, 1, 4},
+ {0x0A11, 2, 5},
+ {0x0A12, 2, 5},
+ {0x0A13, 1, 5},
+ {0x0A1C, 1, 5}
+ };
+
+ ccsr_sec_t __iomem *sec = (void __iomem *)CONFIG_SYS_FSL_SEC_ADDR;
+ u32 secvid_ms = sec_in32(&sec->secvid_ms);
+ u32 ccbvid = sec_in32(&sec->ccbvid);
+ u16 ip_id = (secvid_ms & SEC_SECVID_MS_IPID_MASK) >>
+ SEC_SECVID_MS_IPID_SHIFT;
+ u8 maj_rev = (secvid_ms & SEC_SECVID_MS_MAJ_REV_MASK) >>
+ SEC_SECVID_MS_MAJ_REV_SHIFT;
+ u8 era = (ccbvid & SEC_CCBVID_ERA_MASK) >> SEC_CCBVID_ERA_SHIFT;
+
+ int i;
+
+ if (era) /* This is '0' prior to CAAM ERA-6 */
+ return era;
+
+ for (i = 0; i < ARRAY_SIZE(caam_eras); i++)
+ if (caam_eras[i].ip_id == ip_id &&
+ caam_eras[i].maj_rev == maj_rev)
+ return caam_eras[i].era;
+
+ return 0;
+}
+
+static void fdt_fixup_crypto_era(void *blob, u32 era)
+{
+ int err;
+ int crypto_node;
+
+ crypto_node = fdt_path_offset(blob, "crypto");
+ if (crypto_node < 0) {
+ printf("WARNING: Missing crypto node\n");
+ return;
+ }
+
+ err = fdt_setprop_u32(blob, crypto_node, "fsl,sec-era", era);
+ if (err < 0) {
+ printf("ERROR: could not set fsl,sec-era property: %s\n",
+ fdt_strerror(err));
+ }
+}
+
+void fdt_fixup_crypto_node(void *blob, int sec_rev)
+{
+ u8 era;
+
+ if (!sec_rev) {
+ fdt_del_node_and_alias(blob, "crypto");
+ return;
+ }
+
+ /* Add SEC ERA information in compatible */
+ era = caam_get_era();
+ if (era) {
+ fdt_fixup_crypto_era(blob, era);
+ } else {
+ printf("WARNING: Unable to get ERA for CAAM rev: %d\n",
+ sec_rev);
+ }
+}
+#endif
diff --git a/roms/u-boot/drivers/crypto/fsl/type.h b/roms/u-boot/drivers/crypto/fsl/type.h
new file mode 100644
index 000000000..b7031a60f
--- /dev/null
+++ b/roms/u-boot/drivers/crypto/fsl/type.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2020 NXP
+ *
+ */
+
+#ifndef CRYPTO_FSL_TYPE_H
+#define CRYPTO_FSL_TYPE_H
+
+#ifdef CONFIG_CAAM_64BIT
+typedef unsigned long long caam_dma_addr_t;
+#else
+typedef u32 caam_dma_addr_t;
+#endif
+
+#endif
diff --git a/roms/u-boot/drivers/crypto/rsa_mod_exp/Kconfig b/roms/u-boot/drivers/crypto/rsa_mod_exp/Kconfig
new file mode 100644
index 000000000..6dcb39a8d
--- /dev/null
+++ b/roms/u-boot/drivers/crypto/rsa_mod_exp/Kconfig
@@ -0,0 +1,5 @@
+config DM_MOD_EXP
+ bool "Enable Driver Model for RSA Modular Exponentiation"
+ depends on DM
+ help
+ If you want to use driver model for RSA Modular Exponentiation, say Y.
diff --git a/roms/u-boot/drivers/crypto/rsa_mod_exp/Makefile b/roms/u-boot/drivers/crypto/rsa_mod_exp/Makefile
new file mode 100644
index 000000000..7e829d3c1
--- /dev/null
+++ b/roms/u-boot/drivers/crypto/rsa_mod_exp/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# (C) Copyright 2014 Freescale Semiconductor, Inc.
+
+obj-$(CONFIG_RSA) += mod_exp_uclass.o
+obj-$(CONFIG_RSA_SOFTWARE_EXP) += mod_exp_sw.o
diff --git a/roms/u-boot/drivers/crypto/rsa_mod_exp/mod_exp_sw.c b/roms/u-boot/drivers/crypto/rsa_mod_exp/mod_exp_sw.c
new file mode 100644
index 000000000..7bed444c3
--- /dev/null
+++ b/roms/u-boot/drivers/crypto/rsa_mod_exp/mod_exp_sw.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2014 Freescale Semiconductor, Inc.
+ * Author: Ruchika Gupta <ruchika.gupta@freescale.com>
+ */
+
+#include <config.h>
+#include <common.h>
+#include <dm.h>
+#include <log.h>
+#include <u-boot/rsa-mod-exp.h>
+
+static int mod_exp_sw(struct udevice *dev, const uint8_t *sig, uint32_t sig_len,
+ struct key_prop *prop, uint8_t *out)
+{
+ int ret = 0;
+
+ ret = rsa_mod_exp_sw(sig, sig_len, prop, out);
+ if (ret) {
+ debug("%s: RSA failed to verify: %d\n", __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct mod_exp_ops mod_exp_ops_sw = {
+ .mod_exp = mod_exp_sw,
+};
+
+U_BOOT_DRIVER(mod_exp_sw) = {
+ .name = "mod_exp_sw",
+ .id = UCLASS_MOD_EXP,
+ .ops = &mod_exp_ops_sw,
+ .flags = DM_FLAG_PRE_RELOC,
+};
+
+U_BOOT_DRVINFO(mod_exp_sw) = {
+ .name = "mod_exp_sw",
+};
diff --git a/roms/u-boot/drivers/crypto/rsa_mod_exp/mod_exp_uclass.c b/roms/u-boot/drivers/crypto/rsa_mod_exp/mod_exp_uclass.c
new file mode 100644
index 000000000..f5ae35e13
--- /dev/null
+++ b/roms/u-boot/drivers/crypto/rsa_mod_exp/mod_exp_uclass.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2014 Freescale Semiconductor, Inc
+ * Author: Ruchika Gupta <ruchika.gupta@freescale.com>
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <asm/global_data.h>
+#include <u-boot/rsa-mod-exp.h>
+#include <errno.h>
+#include <fdtdec.h>
+#include <malloc.h>
+#include <asm/io.h>
+#include <linux/list.h>
+
+#if !defined(USE_HOSTCC) && defined(CONFIG_NEEDS_MANUAL_RELOC)
+DECLARE_GLOBAL_DATA_PTR;
+#endif
+
+int rsa_mod_exp(struct udevice *dev, const uint8_t *sig, uint32_t sig_len,
+ struct key_prop *node, uint8_t *out)
+{
+ struct mod_exp_ops *ops = (struct mod_exp_ops *)device_get_ops(dev);
+
+#if !defined(USE_HOSTCC) && defined(CONFIG_NEEDS_MANUAL_RELOC)
+ static bool done;
+
+ if (!done) {
+ done = true;
+ ops->mod_exp += gd->reloc_off;
+ }
+#endif
+
+ if (!ops->mod_exp)
+ return -ENOSYS;
+
+ return ops->mod_exp(dev, sig, sig_len, node, out);
+}
+
+UCLASS_DRIVER(mod_exp) = {
+ .id = UCLASS_MOD_EXP,
+ .name = "rsa_mod_exp",
+};