aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/layerscape/patches-5.4/804-crypto-0020-MLKU-25-3-crypto-caam-add-Secure-Memory-support.patch
diff options
context:
space:
mode:
authorYangbo Lu <yangbo.lu@nxp.com>2020-04-10 10:47:05 +0800
committerPetr Štetiar <ynezz@true.cz>2020-05-07 12:53:06 +0200
commitcddd4591404fb4c53dc0b3c0b15b942cdbed4356 (patch)
tree392c1179de46b0f804e3789edca19069b64e6b44 /target/linux/layerscape/patches-5.4/804-crypto-0020-MLKU-25-3-crypto-caam-add-Secure-Memory-support.patch
parentd1d2c0b5579ea4f69a42246c9318539d61ba1999 (diff)
downloadupstream-cddd4591404fb4c53dc0b3c0b15b942cdbed4356.tar.gz
upstream-cddd4591404fb4c53dc0b3c0b15b942cdbed4356.tar.bz2
upstream-cddd4591404fb4c53dc0b3c0b15b942cdbed4356.zip
layerscape: add patches-5.4
Add patches for linux-5.4. The patches are from NXP LSDK-20.04 release which was tagged LSDK-20.04-V5.4. https://source.codeaurora.org/external/qoriq/qoriq-components/linux/ For boards LS1021A-IOT, and Traverse-LS1043 which are not involved in LSDK, port the dts patches from 4.14. The patches are sorted into the following categories: 301-arch-xxxx 302-dts-xxxx 303-core-xxxx 701-net-xxxx 801-audio-xxxx 802-can-xxxx 803-clock-xxxx 804-crypto-xxxx 805-display-xxxx 806-dma-xxxx 807-gpio-xxxx 808-i2c-xxxx 809-jailhouse-xxxx 810-keys-xxxx 811-kvm-xxxx 812-pcie-xxxx 813-pm-xxxx 814-qe-xxxx 815-sata-xxxx 816-sdhc-xxxx 817-spi-xxxx 818-thermal-xxxx 819-uart-xxxx 820-usb-xxxx 821-vfio-xxxx Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
Diffstat (limited to 'target/linux/layerscape/patches-5.4/804-crypto-0020-MLKU-25-3-crypto-caam-add-Secure-Memory-support.patch')
-rw-r--r--target/linux/layerscape/patches-5.4/804-crypto-0020-MLKU-25-3-crypto-caam-add-Secure-Memory-support.patch2503
1 files changed, 2503 insertions, 0 deletions
diff --git a/target/linux/layerscape/patches-5.4/804-crypto-0020-MLKU-25-3-crypto-caam-add-Secure-Memory-support.patch b/target/linux/layerscape/patches-5.4/804-crypto-0020-MLKU-25-3-crypto-caam-add-Secure-Memory-support.patch
new file mode 100644
index 0000000000..697f24bb07
--- /dev/null
+++ b/target/linux/layerscape/patches-5.4/804-crypto-0020-MLKU-25-3-crypto-caam-add-Secure-Memory-support.patch
@@ -0,0 +1,2503 @@
+From 32221046a302245a63d5e00d16cf3008b5b31255 Mon Sep 17 00:00:00 2001
+From: Steve Cornelius <steve.cornelius@freescale.com>
+Date: Tue, 23 Jul 2013 20:47:32 -0700
+Subject: [PATCH] MLKU-25-3 crypto: caam - add Secure Memory support
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This is a squash of the following i.MX BSP commits
+(rel_imx_4.19.35_1.1.0_rc2)
+
+1. ae8175a3f1be ("MLK-9710-10 Add CCM defs for FIFO_STORE instruction")
+2. 9512280d066b ("MLK-9769-11 Add SM register defs, and expanded driver-private storage.")
+3. a9dc44de8150 ("MLK-9769-10 Add Blob command bitdefs.")
+4. 8f6a17b41917 ("ENGR00289885 [iMX6Q] Add Secure Memory and SECVIO support.")
+5. c7d4f9db1077 ("MLK-9710-11 Add internal key cover and external blob export/import to prototype SM-API")
+6. 568e449edfca ("MLK-9710-12 Adapt sm_test as a black-key handling example")
+7. f42f12d9cb19 ("MLK-9710-13 Correct size in BLOB_OVERHEAD definition")
+8. 022fc2b33f57 ("MLK-9710-14 Un-pad cache sizes for blob export/import")
+9. 8d3e8c3c4dc1 ("MLK-9710-15 Correct size of padded key buffers")
+10. 997fb2ff88ec ("MLK-9710-5 Unregister Secure Memory platform device upon shutdown")
+11. 5316249198ee ("MLK-10897-1 ARM: imx7d: Add CAAM support for i.mx7d")
+12. 07566f42a4ec ("MLK-11103 Missing register in Secure memory configuration v1")
+13. 3004636304e1 ("MLK-12302 caam: Secure Memory platform device creation crashes")
+14. 0e6ed5a819f7 ("MLK-13779 crypto: caam - initialize kslock spinlock")
+15. b1254b6b5f52 ("Add missing NULL checks in CAAM sm")
+16. 61f57509bc9a ("MLK-17992: caam: sm: Fix compilation warnings")
+17. 41cf3d4c580c ("MLK-15473-1: crypto: caam: Add CAAM driver support for iMX8 soc family")
+18. bb8742481209 ("MLK-17253-1: crypto: caam: Fix computation of SM pages addresses")
+19. 308796dfae3b ("MLK-17253-2: crypto: caam: Use correct memory function for Secure Memory")
+20. ba2cb6b5fb10 ("MLK-17732-2: SM store: Support iMX8QX and iMX8QM")
+21. de710d376af6 ("MLK-17674-1: sm_store remove CONFIG_OF")
+22. cfcae647434e ("MLK-17674-2: CAAM SM : get base address from device tree")
+23. f49ebbd5eefa ("MLK-17992: caam: sm: Fix compilation warnings")
+24. 345ead4338b9 ("MLK-17841: crypto: caam: Correct bugs in Secure Memory")
+25. c17811f3fffc ("MLK-18082: crypto: caam: sm: Fix encap/decap function to handle errors")
+26. 41bcba1d4c9b ("MLK-18082: crypto: caam: sm: Fix descriptor running functions")
+27. b7385ab94784 ("MLK-20204: drivers: crypto: caam: sm: Remove deadcode")
+28. 1d749430cb63 ("MLK-20204: drivers: crypto: caam: sm: test: Dealloc keyslot properly")
+29. 6a5c2d9d358f ("crypto: caam - lower SM test verbosity")
+30. 1a6bc92c0c87 ("MLK-21617: crypto: caam - update SM test error handling")
+
+Signed-off-by: Dan Douglass <dan.douglass@nxp.com>
+Signed-off-by: Victoria Milhoan <vicki.milhoan@freescale.com>
+Signed-off-by: Steve Cornelius <steve.cornelius@nxp.com>
+Signed-off-by: Octavian Purdila <octavian.purdila@nxp.com>
+Signed-off-by: Radu Solea <radu.solea@nxp.com>
+Signed-off-by: Franck LENORMAND <franck.lenormand@nxp.com>
+Signed-off-by: Aymen Sghaier <aymen.sghaier@nxp.com>
+Signed-off-by: Silvano di Ninno <silvano.dininno@nxp.com>
+
+that have been reworked:
+
+4.
+-make SM depend on JR
+-enable SM, SECVIO only on i.MX SoCs
+-fix resource leak - add off_node_put() where needed
+
+Split commit in three:
+1 - SNVS/SECVIO driver
+2 - Secure Memory driver
+3 - DT changes
+
+11.
+Clock handling dropped - logic already upstream.
+
+17.
+Keep only Secure Memory related changes.
+Changes related to page 0 registers have been added previously.
+Other changes are dropped.
+
+21.
+Always use first jr in ctrlpriv->jr[] array to access registers
+in page 0 (aliased in jr page), irrespective of SCU presence.
+
+Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
+---
+ drivers/crypto/caam/Kconfig | 30 +
+ drivers/crypto/caam/Makefile | 2 +
+ drivers/crypto/caam/ctrl.c | 37 ++
+ drivers/crypto/caam/desc.h | 21 +
+ drivers/crypto/caam/intern.h | 4 +
+ drivers/crypto/caam/regs.h | 158 ++++-
+ drivers/crypto/caam/sm.h | 127 ++++
+ drivers/crypto/caam/sm_store.c | 1332 ++++++++++++++++++++++++++++++++++++++++
+ drivers/crypto/caam/sm_test.c | 571 +++++++++++++++++
+ 9 files changed, 2279 insertions(+), 3 deletions(-)
+ create mode 100644 drivers/crypto/caam/sm.h
+ create mode 100644 drivers/crypto/caam/sm_store.c
+ create mode 100644 drivers/crypto/caam/sm_test.c
+
+--- a/drivers/crypto/caam/Kconfig
++++ b/drivers/crypto/caam/Kconfig
+@@ -155,6 +155,36 @@ config CRYPTO_DEV_FSL_CAAM_RNG_TEST
+ caam RNG. This test is several minutes long and executes
+ just before the RNG is registered with the hw_random API.
+
++config CRYPTO_DEV_FSL_CAAM_SM
++ tristate "CAAM Secure Memory / Keystore API (EXPERIMENTAL)"
++ help
++ Enables use of a prototype kernel-level Keystore API with CAAM
++ Secure Memory for insertion/extraction of bus-protected secrets.
++
++config CRYPTO_DEV_FSL_CAAM_SM_SLOTSIZE
++ int "Size of each keystore slot in Secure Memory"
++ depends on CRYPTO_DEV_FSL_CAAM_SM
++ range 5 9
++ default 7
++ help
++ Select size of allocation units to divide Secure Memory pages into
++ (the size of a "slot" as referenced inside the API code).
++ Established as powers of two.
++ Examples:
++ 5 => 32 bytes
++ 6 => 64 bytes
++ 7 => 128 bytes
++ 8 => 256 bytes
++ 9 => 512 bytes
++
++config CRYPTO_DEV_FSL_CAAM_SM_TEST
++ tristate "CAAM Secure Memory - Keystore Test/Example (EXPERIMENTAL)"
++ depends on CRYPTO_DEV_FSL_CAAM_SM
++ help
++ Example thread to exercise the Keystore API and to verify that
++ stored and recovered secrets can be used for general purpose
++ encryption/decryption.
++
+ config CRYPTO_DEV_FSL_CAAM_SECVIO
+ tristate "CAAM/SNVS Security Violation Handler (EXPERIMENTAL)"
+ help
+--- a/drivers/crypto/caam/Makefile
++++ b/drivers/crypto/caam/Makefile
+@@ -21,6 +21,8 @@ caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRY
+ caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
+ caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
+ caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caampkc.o pkc_desc.o
++caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_SM) += sm_store.o
++caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_SM_TEST) += sm_test.o
+ caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_SECVIO) += secvio.o
+
+ caam-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += qi.o
+--- a/drivers/crypto/caam/ctrl.c
++++ b/drivers/crypto/caam/ctrl.c
+@@ -17,6 +17,7 @@
+ #include "jr.h"
+ #include "desc_constr.h"
+ #include "ctrl.h"
++#include "sm.h"
+
+ bool caam_dpaa2;
+ EXPORT_SYMBOL(caam_dpaa2);
+@@ -573,6 +574,7 @@ static int caam_probe(struct platform_de
+ const struct soc_device_attribute *imx_soc_match;
+ struct device *dev;
+ struct device_node *nprop, *np;
++ struct resource res_regs;
+ struct caam_ctrl __iomem *ctrl;
+ struct caam_drv_private *ctrlpriv;
+ struct caam_perfmon __iomem *perfmon;
+@@ -719,9 +721,44 @@ iomap_ctrl:
+ BLOCK_OFFSET * DECO_BLOCK_NUMBER
+ );
+
++ /* Only i.MX SoCs have sm */
++ if (!imx_soc_match)
++ goto mc_fw;
++
++ /* Get CAAM-SM node and of_iomap() and save */
++ np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-caam-sm");
++ if (!np)
++ return -ENODEV;
++
++ /* Get CAAM SM registers base address from device tree */
++ ret = of_address_to_resource(np, 0, &res_regs);
++ if (ret) {
++ dev_err(dev, "failed to retrieve registers base from device tree\n");
++ of_node_put(np);
++ return -ENODEV;
++ }
++
++ ctrlpriv->sm_phy = res_regs.start;
++ ctrlpriv->sm_base = devm_ioremap_resource(dev, &res_regs);
++ if (IS_ERR(ctrlpriv->sm_base)) {
++ of_node_put(np);
++ return PTR_ERR(ctrlpriv->sm_base);
++ }
++
++ if (!of_machine_is_compatible("fsl,imx8mn") &&
++ !of_machine_is_compatible("fsl,imx8mm") &&
++ !of_machine_is_compatible("fsl,imx8mq") &&
++ !of_machine_is_compatible("fsl,imx8qm") &&
++ !of_machine_is_compatible("fsl,imx8qxp"))
++ ctrlpriv->sm_size = resource_size(&res_regs);
++ else
++ ctrlpriv->sm_size = PG_SIZE_64K;
++ of_node_put(np);
++
+ if (!reg_access)
+ goto set_dma_mask;
+
++mc_fw:
+ /*
+ * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
+ * long pointers in master configuration register.
+--- a/drivers/crypto/caam/desc.h
++++ b/drivers/crypto/caam/desc.h
+@@ -403,6 +403,10 @@
+ #define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT)
+ #define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT)
+ #define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT)
++#define FIFOST_TYPE_AF_SBOX_CCM_JKEK (0x10 << FIFOST_TYPE_SHIFT)
++#define FIFOST_TYPE_AF_SBOX_CCM_TKEK (0x11 << FIFOST_TYPE_SHIFT)
++#define FIFOST_TYPE_KEY_CCM_JKEK (0x14 << FIFOST_TYPE_SHIFT)
++#define FIFOST_TYPE_KEY_CCM_TKEK (0x15 << FIFOST_TYPE_SHIFT)
+ #define FIFOST_TYPE_AF_SBOX_JKEK (0x20 << FIFOST_TYPE_SHIFT)
+ #define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
+ #define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT)
+@@ -1136,6 +1140,23 @@
+ #define OP_PCL_PKPROT_ECC 0x0002
+ #define OP_PCL_PKPROT_F2M 0x0001
+
++/* Blob protocol protinfo bits */
++#define OP_PCL_BLOB_TK 0x0200
++#define OP_PCL_BLOB_EKT 0x0100
++
++#define OP_PCL_BLOB_K2KR_MEM 0x0000
++#define OP_PCL_BLOB_K2KR_C1KR 0x0010
++#define OP_PCL_BLOB_K2KR_C2KR 0x0030
++#define OP_PCL_BLOB_K2KR_AFHAS 0x0050
++#define OP_PCL_BLOB_K2KR_C2KR_SPLIT 0x0070
++
++#define OP_PCL_BLOB_PTXT_SECMEM 0x0008
++#define OP_PCL_BLOB_BLACK 0x0004
++
++#define OP_PCL_BLOB_FMT_NORMAL 0x0000
++#define OP_PCL_BLOB_FMT_MSTR 0x0002
++#define OP_PCL_BLOB_FMT_TEST 0x0003
++
+ /* For non-protocol/alg-only op commands */
+ #define OP_ALG_TYPE_SHIFT 24
+ #define OP_ALG_TYPE_MASK (0x7 << OP_ALG_TYPE_SHIFT)
+--- a/drivers/crypto/caam/intern.h
++++ b/drivers/crypto/caam/intern.h
+@@ -66,6 +66,7 @@ struct caam_drv_private_jr {
+ * Driver-private storage for a single CAAM block instance
+ */
+ struct caam_drv_private {
++ struct device *smdev;
+
+ /* Physical-presence section */
+ struct caam_ctrl __iomem *ctrl; /* controller region */
+@@ -73,6 +74,9 @@ struct caam_drv_private {
+ struct caam_assurance __iomem *assure;
+ struct caam_queue_if __iomem *qi; /* QI control region */
+ struct caam_job_ring __iomem *jr[4]; /* JobR's register space */
++ dma_addr_t __iomem *sm_base; /* Secure memory storage base */
++ phys_addr_t sm_phy; /* Secure memory storage physical */
++ u32 sm_size;
+
+ struct iommu_domain *domain;
+
+--- a/drivers/crypto/caam/regs.h
++++ b/drivers/crypto/caam/regs.h
+@@ -382,6 +382,12 @@ struct version_regs {
+ #define CHA_VER_VID_MD_LP512 0x1ull
+ #define CHA_VER_VID_MD_HP 0x2ull
+
++/*
++ * caam_perfmon - Performance Monitor/Secure Memory Status/
++ * CAAM Global Status/Component Version IDs
++ *
++ * Spans f00-fff wherever instantiated
++ */
+ struct sec_vid {
+ u16 ip_id;
+ u8 maj_rev;
+@@ -412,17 +418,22 @@ struct caam_perfmon {
+ #define CTPR_MS_PG_SZ_SHIFT 4
+ u32 comp_parms_ms; /* CTPR - Compile Parameters Register */
+ u32 comp_parms_ls; /* CTPR - Compile Parameters Register */
+- u64 rsvd1[2];
++ /* Secure Memory State Visibility */
++ u32 rsvd1;
++ u32 smstatus; /* Secure memory status */
++ u32 rsvd2;
++ u32 smpartown; /* Secure memory partition owner */
+
+ /* CAAM Global Status fc0-fdf */
+ u64 faultaddr; /* FAR - Fault Address */
+ u32 faultliodn; /* FALR - Fault Address LIODN */
+ u32 faultdetail; /* FADR - Fault Addr Detail */
+- u32 rsvd2;
+ #define CSTA_PLEND BIT(10)
+ #define CSTA_ALT_PLEND BIT(18)
++ u32 rsvd3;
+ u32 status; /* CSTA - CAAM Status */
+- u64 rsvd3;
++ u32 smpart; /* Secure Memory Partition Parameters */
++ u32 smvid; /* Secure Memory Version ID */
+
+ /* Component Instantiation Parameters fe0-fff */
+ u32 rtic_id; /* RVID - RTIC Version ID */
+@@ -441,6 +452,62 @@ struct caam_perfmon {
+ u32 caam_id_ls; /* CAAMVID - CAAM Version ID LS */
+ };
+
++#define SMSTATUS_PART_SHIFT 28
++#define SMSTATUS_PART_MASK (0xf << SMSTATUS_PART_SHIFT)
++#define SMSTATUS_PAGE_SHIFT 16
++#define SMSTATUS_PAGE_MASK (0x7ff << SMSTATUS_PAGE_SHIFT)
++#define SMSTATUS_MID_SHIFT 8
++#define SMSTATUS_MID_MASK (0x3f << SMSTATUS_MID_SHIFT)
++#define SMSTATUS_ACCERR_SHIFT 4
++#define SMSTATUS_ACCERR_MASK (0xf << SMSTATUS_ACCERR_SHIFT)
++#define SMSTATUS_ACCERR_NONE 0
++#define SMSTATUS_ACCERR_ALLOC 1 /* Page not allocated */
++#define SMSTATUS_ACCESS_ID 2 /* Not granted by ID */
++#define SMSTATUS_ACCESS_WRITE 3 /* Writes not allowed */
++#define SMSTATUS_ACCESS_READ 4 /* Reads not allowed */
++#define SMSTATUS_ACCESS_NONKEY 6 /* Non-key reads not allowed */
++#define SMSTATUS_ACCESS_BLOB 9 /* Blob access not allowed */
++#define SMSTATUS_ACCESS_DESCB 10 /* Descriptor Blob access spans pages */
++#define SMSTATUS_ACCESS_NON_SM 11 /* Outside Secure Memory range */
++#define SMSTATUS_ACCESS_XPAGE 12 /* Access crosses pages */
++#define SMSTATUS_ACCESS_INITPG 13 /* Page still initializing */
++#define SMSTATUS_STATE_SHIFT 0
++#define SMSTATUS_STATE_MASK (0xf << SMSTATUS_STATE_SHIFT)
++#define SMSTATUS_STATE_RESET 0
++#define SMSTATUS_STATE_INIT 1
++#define SMSTATUS_STATE_NORMAL 2
++#define SMSTATUS_STATE_FAIL 3
++
++/* up to 15 rings, 2 bits shifted by ring number */
++#define SMPARTOWN_RING_SHIFT 2
++#define SMPARTOWN_RING_MASK 3
++#define SMPARTOWN_AVAILABLE 0
++#define SMPARTOWN_NOEXIST 1
++#define SMPARTOWN_UNAVAILABLE 2
++#define SMPARTOWN_OURS 3
++
++/* Maximum number of pages possible */
++#define SMPART_MAX_NUMPG_SHIFT 16
++#define SMPART_MAX_NUMPG_MASK (0x3f << SMPART_MAX_NUMPG_SHIFT)
++
++/* Maximum partition number */
++#define SMPART_MAX_PNUM_SHIFT 12
++#define SMPART_MAX_PNUM_MASK (0xf << SMPART_MAX_PNUM_SHIFT)
++
++/* Highest possible page number */
++#define SMPART_MAX_PG_SHIFT 0
++#define SMPART_MAX_PG_MASK (0x3f << SMPART_MAX_PG_SHIFT)
++
++/* Max size of a page */
++#define SMVID_PG_SIZE_SHIFT 16
++#define SMVID_PG_SIZE_MASK (0x7 << SMVID_PG_SIZE_SHIFT)
++
++/* Major/Minor Version ID */
++#define SMVID_MAJ_VERS_SHIFT 8
++#define SMVID_MAJ_VERS (0xf << SMVID_MAJ_VERS_SHIFT)
++#define SMVID_MIN_VERS_SHIFT 0
++#define SMVID_MIN_VERS (0xf << SMVID_MIN_VERS_SHIFT)
++
+ /* LIODN programming for DMA configuration */
+ #define MSTRID_LOCK_LIODN 0x80000000
+ #define MSTRID_LOCK_MAKETRUSTED 0x00010000 /* only for JR masterid */
+@@ -645,6 +712,35 @@ struct caam_ctrl {
+ #define JRSTART_JR2_START 0x00000004 /* Start Job ring 2 */
+ #define JRSTART_JR3_START 0x00000008 /* Start Job ring 3 */
+
++/* Secure Memory Configuration - if you have it */
++/* Secure Memory Register Offset from JR Base Reg*/
++#define SM_V1_OFFSET 0x0f4
++#define SM_V2_OFFSET 0xa00
++
++/* Minimum SM Version ID requiring v2 SM register mapping */
++#define SMVID_V2 0x20105
++
++struct caam_secure_mem_v1 {
++ u32 sm_cmd; /* SMCJRx - Secure memory command */
++ u32 rsvd1;
++ u32 sm_status; /* SMCSJRx - Secure memory status */
++ u32 rsvd2;
++
++ u32 sm_perm; /* SMAPJRx - Secure memory access perms */
++ u32 sm_group2; /* SMAP2JRx - Secure memory access group 2 */
++ u32 sm_group1; /* SMAP1JRx - Secure memory access group 1 */
++};
++
++struct caam_secure_mem_v2 {
++ u32 sm_perm; /* SMAPJRx - Secure memory access perms */
++ u32 sm_group2; /* SMAP2JRx - Secure memory access group 2 */
++ u32 sm_group1; /* SMAP1JRx - Secure memory access group 1 */
++ u32 rsvd1[118];
++ u32 sm_cmd; /* SMCJRx - Secure memory command */
++ u32 rsvd2;
++ u32 sm_status; /* SMCSJRx - Secure memory status */
++};
++
+ /*
+ * caam_job_ring - direct job ring setup
+ * 1-4 possible per instantiation, base + 1000/2000/3000/4000
+@@ -815,6 +911,62 @@ struct caam_job_ring {
+
+ #define JRCR_RESET 0x01
+
++/* secure memory command */
++#define SMC_PAGE_SHIFT 16
++#define SMC_PAGE_MASK (0xffff << SMC_PAGE_SHIFT)
++#define SMC_PART_SHIFT 8
++#define SMC_PART_MASK (0x0f << SMC_PART_SHIFT)
++#define SMC_CMD_SHIFT 0
++#define SMC_CMD_MASK (0x0f << SMC_CMD_SHIFT)
++
++#define SMC_CMD_ALLOC_PAGE 0x01 /* allocate page to this partition */
++#define SMC_CMD_DEALLOC_PAGE 0x02 /* deallocate page from partition */
++#define SMC_CMD_DEALLOC_PART 0x03 /* deallocate partition */
++#define SMC_CMD_PAGE_INQUIRY 0x05 /* find partition associate with page */
++
++/* secure memory (command) status */
++#define SMCS_PAGE_SHIFT 16
++#define SMCS_PAGE_MASK (0x0fff << SMCS_PAGE_SHIFT)
++#define SMCS_CMDERR_SHIFT 14
++#define SMCS_CMDERR_MASK (3 << SMCS_CMDERR_SHIFT)
++#define SMCS_ALCERR_SHIFT 12
++#define SMCS_ALCERR_MASK (3 << SMCS_ALCERR_SHIFT)
++#define SMCS_PGOWN_SHIFT 6
++#define SMCS_PGWON_MASK (3 << SMCS_PGOWN_SHIFT)
++#define SMCS_PART_SHIFT 0
++#define SMCS_PART_MASK (0xf << SMCS_PART_SHIFT)
++
++#define SMCS_CMDERR_NONE 0
++#define SMCS_CMDERR_INCOMP 1 /* Command not yet complete */
++#define SMCS_CMDERR_SECFAIL 2 /* Security failure occurred */
++#define SMCS_CMDERR_OVERFLOW 3 /* Command overflow */
++
++#define SMCS_ALCERR_NONE 0
++#define SMCS_ALCERR_PSPERR 1 /* Partion marked PSP (dealloc only) */
++#define SMCS_ALCERR_PAGEAVAIL 2 /* Page not available */
++#define SMCS_ALCERR_PARTOWN 3 /* Partition ownership error */
++
++#define SMCS_PGOWN_AVAIL 0 /* Page is available */
++#define SMCS_PGOWN_NOEXIST 1 /* Page initializing or nonexistent */
++#define SMCS_PGOWN_NOOWN 2 /* Page owned by another processor */
++#define SMCS_PGOWN_OWNED 3 /* Page belongs to this processor */
++
++/* secure memory access permissions */
++#define SMCS_PERM_KEYMOD_SHIFT 16
++#define SMCA_PERM_KEYMOD_MASK (0xff << SMCS_PERM_KEYMOD_SHIFT)
++#define SMCA_PERM_CSP_ZERO 0x8000 /* Zero when deallocated or released */
++#define SMCA_PERM_PSP_LOCK 0x4000 /* Part./pages can't be deallocated */
++#define SMCA_PERM_PERM_LOCK 0x2000 /* Lock permissions */
++#define SMCA_PERM_GRP_LOCK 0x1000 /* Lock access groups */
++#define SMCA_PERM_RINGID_SHIFT 10
++#define SMCA_PERM_RINGID_MASK (3 << SMCA_PERM_RINGID_SHIFT)
++#define SMCA_PERM_G2_BLOB 0x0080 /* Group 2 blob import/export */
++#define SMCA_PERM_G2_WRITE 0x0020 /* Group 2 write */
++#define SMCA_PERM_G2_READ 0x0010 /* Group 2 read */
++#define SMCA_PERM_G1_BLOB 0x0008 /* Group 1... */
++#define SMCA_PERM_G1_WRITE 0x0002
++#define SMCA_PERM_G1_READ 0x0001
++
+ /*
+ * caam_assurance - Assurance Controller View
+ * base + 0x6000 padded out to 0x1000
+--- /dev/null
++++ b/drivers/crypto/caam/sm.h
+@@ -0,0 +1,127 @@
++/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
++/*
++ * CAAM Secure Memory/Keywrap API Definitions
++ *
++ * Copyright 2008-2015 Freescale Semiconductor, Inc.
++ * Copyright 2016-2019 NXP
++ */
++
++#ifndef SM_H
++#define SM_H
++
++
++/* Storage access permissions */
++#define SM_PERM_READ 0x01
++#define SM_PERM_WRITE 0x02
++#define SM_PERM_BLOB 0x03
++
++/* Define treatment of secure memory vs. general memory blobs */
++#define SM_SECMEM 0
++#define SM_GENMEM 1
++
++/* Define treatment of red/black keys */
++#define RED_KEY 0
++#define BLACK_KEY 1
++
++/* Define key encryption/covering options */
++#define KEY_COVER_ECB 0 /* cover key in AES-ECB */
++#define KEY_COVER_CCM 1 /* cover key with AES-CCM */
++
++/*
++ * Round a key size up to an AES blocksize boundary so to allow for
++ * padding out to a full block
++ */
++#define AES_BLOCK_PAD(x) ((x % 16) ? ((x >> 4) + 1) << 4 : x)
++
++/* Define space required for BKEK + MAC tag storage in any blob */
++#define BLOB_OVERHEAD (32 + 16)
++
++/* Keystore maintenance functions */
++void sm_init_keystore(struct device *dev);
++u32 sm_detect_keystore_units(struct device *dev);
++int sm_establish_keystore(struct device *dev, u32 unit);
++void sm_release_keystore(struct device *dev, u32 unit);
++void caam_sm_shutdown(struct platform_device *pdev);
++int caam_sm_example_init(struct platform_device *pdev);
++
++/* Keystore accessor functions */
++extern int sm_keystore_slot_alloc(struct device *dev, u32 unit, u32 size,
++ u32 *slot);
++extern int sm_keystore_slot_dealloc(struct device *dev, u32 unit, u32 slot);
++extern int sm_keystore_slot_load(struct device *dev, u32 unit, u32 slot,
++ const u8 *key_data, u32 key_length);
++extern int sm_keystore_slot_read(struct device *dev, u32 unit, u32 slot,
++ u32 key_length, u8 *key_data);
++extern int sm_keystore_cover_key(struct device *dev, u32 unit, u32 slot,
++ u16 key_length, u8 keyauth);
++extern int sm_keystore_slot_export(struct device *dev, u32 unit, u32 slot,
++ u8 keycolor, u8 keyauth, u8 *outbuf,
++ u16 keylen, u8 *keymod);
++extern int sm_keystore_slot_import(struct device *dev, u32 unit, u32 slot,
++ u8 keycolor, u8 keyauth, u8 *inbuf,
++ u16 keylen, u8 *keymod);
++
++/* Prior functions from legacy API, deprecated */
++extern int sm_keystore_slot_encapsulate(struct device *dev, u32 unit,
++ u32 inslot, u32 outslot, u16 secretlen,
++ u8 *keymod, u16 keymodlen);
++extern int sm_keystore_slot_decapsulate(struct device *dev, u32 unit,
++ u32 inslot, u32 outslot, u16 secretlen,
++ u8 *keymod, u16 keymodlen);
++
++/* Data structure to hold per-slot information */
++struct keystore_data_slot_info {
++ u8 allocated; /* Track slot assignments */
++ u32 key_length; /* Size of the key */
++};
++
++/* Data structure to hold keystore information */
++struct keystore_data {
++ void *base_address; /* Virtual base of secure memory pages */
++ void *phys_address; /* Physical base of secure memory pages */
++ u32 slot_count; /* Number of slots in the keystore */
++ struct keystore_data_slot_info *slot; /* Per-slot information */
++};
++
++/* store the detected attributes of a secure memory page */
++struct sm_page_descriptor {
++ u16 phys_pagenum; /* may be discontiguous */
++ u16 own_part; /* Owning partition */
++ void *pg_base; /* Calculated virtual address */
++ void *pg_phys; /* Calculated physical address */
++ struct keystore_data *ksdata;
++};
++
++struct caam_drv_private_sm {
++ struct device *parentdev; /* this ends up as the controller */
++ struct device *smringdev; /* ring that owns this instance */
++ struct platform_device *sm_pdev; /* Secure Memory platform device */
++ spinlock_t kslock ____cacheline_aligned;
++
++ /* SM Register offset from JR base address */
++ u32 sm_reg_offset;
++
++ /* Default parameters for geometry */
++ u32 max_pages; /* maximum pages this instance can support */
++ u32 top_partition; /* highest partition number in this instance */
++ u32 top_page; /* highest page number in this instance */
++ u32 page_size; /* page size */
++ u32 slot_size; /* selected size of each storage block */
++
++ /* Partition/Page Allocation Map */
++ u32 localpages; /* Number of pages we can access */
++ struct sm_page_descriptor *pagedesc; /* Allocated per-page */
++
++ /* Installed handlers for keystore access */
++ int (*data_init)(struct device *dev, u32 unit);
++ void (*data_cleanup)(struct device *dev, u32 unit);
++ int (*slot_alloc)(struct device *dev, u32 unit, u32 size, u32 *slot);
++ int (*slot_dealloc)(struct device *dev, u32 unit, u32 slot);
++ void *(*slot_get_address)(struct device *dev, u32 unit, u32 handle);
++ void *(*slot_get_physical)(struct device *dev, u32 unit, u32 handle);
++ u32 (*slot_get_base)(struct device *dev, u32 unit, u32 handle);
++ u32 (*slot_get_offset)(struct device *dev, u32 unit, u32 handle);
++ u32 (*slot_get_slot_size)(struct device *dev, u32 unit, u32 handle);
++};
++
++#endif /* SM_H */
+--- /dev/null
++++ b/drivers/crypto/caam/sm_store.c
+@@ -0,0 +1,1332 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
++/*
++ * CAAM Secure Memory Storage Interface
++ *
++ * Copyright 2008-2015 Freescale Semiconductor, Inc.
++ * Copyright 2016-2019 NXP
++ *
++ * Loosely based on the SHW Keystore API for SCC/SCC2
++ * Experimental implementation and NOT intended for upstream use. Expect
++ * this interface to be amended significantly in the future once it becomes
++ * integrated into live applications.
++ *
++ * Known issues:
++ *
++ * - Executes one instance of an secure memory "driver". This is tied to the
++ * fact that job rings can't run as standalone instances in the present
++ * configuration.
++ *
++ * - It does not expose a userspace interface. The value of a userspace
++ * interface for access to secrets is a point for further architectural
++ * discussion.
++ *
++ * - Partition/permission management is not part of this interface. It
++ * depends on some level of "knowledge" agreed upon between bootloader,
++ * provisioning applications, and OS-hosted software (which uses this
++ * driver).
++ *
++ * - No means of identifying the location or purpose of secrets managed by
++ * this interface exists; "slot location" and format of a given secret
++ * needs to be agreed upon between bootloader, provisioner, and OS-hosted
++ * application.
++ */
++
++#include "compat.h"
++#include "regs.h"
++#include "jr.h"
++#include "desc.h"
++#include "intern.h"
++#include "error.h"
++#include "sm.h"
++#include <linux/of_address.h>
++
++#define SECMEM_KEYMOD_LEN 8
++#define GENMEM_KEYMOD_LEN 16
++
++#ifdef SM_DEBUG_CONT
++void sm_show_page(struct device *dev, struct sm_page_descriptor *pgdesc)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ u32 i, *smdata;
++
++ dev_info(dev, "physical page %d content at 0x%08x\n",
++ pgdesc->phys_pagenum, pgdesc->pg_base);
++ smdata = pgdesc->pg_base;
++ for (i = 0; i < (smpriv->page_size / sizeof(u32)); i += 4)
++ dev_info(dev, "[0x%08x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
++ (u32)&smdata[i], smdata[i], smdata[i+1], smdata[i+2],
++ smdata[i+3]);
++}
++#endif
++
++#define INITIAL_DESCSZ 16 /* size of tmp buffer for descriptor const. */
++
++static __always_inline u32 sm_send_cmd(struct caam_drv_private_sm *smpriv,
++ struct caam_drv_private_jr *jrpriv,
++ u32 cmd, u32 *status)
++{
++ void __iomem *write_address;
++ void __iomem *read_address;
++
++ if (smpriv->sm_reg_offset == SM_V1_OFFSET) {
++ struct caam_secure_mem_v1 *sm_regs_v1;
++
++ sm_regs_v1 = (struct caam_secure_mem_v1 *)
++ ((void *)jrpriv->rregs + SM_V1_OFFSET);
++ write_address = &sm_regs_v1->sm_cmd;
++ read_address = &sm_regs_v1->sm_status;
++
++ } else if (smpriv->sm_reg_offset == SM_V2_OFFSET) {
++ struct caam_secure_mem_v2 *sm_regs_v2;
++
++ sm_regs_v2 = (struct caam_secure_mem_v2 *)
++ ((void *)jrpriv->rregs + SM_V2_OFFSET);
++ write_address = &sm_regs_v2->sm_cmd;
++ read_address = &sm_regs_v2->sm_status;
++
++ } else {
++ return -EINVAL;
++ }
++
++ wr_reg32(write_address, cmd);
++
++ udelay(10);
++
++ /* Read until the command has terminated and the status is correct */
++ do {
++ *status = rd_reg32(read_address);
++ } while (((*status & SMCS_CMDERR_MASK) >> SMCS_CMDERR_SHIFT)
++ == SMCS_CMDERR_INCOMP);
++
++ return 0;
++}
++
++/*
++ * Construct a black key conversion job descriptor
++ *
++ * This function constructs a job descriptor capable of performing
++ * a key blackening operation on a plaintext secure memory resident object.
++ *
++ * - desc pointer to a pointer to the descriptor generated by this
++ * function. Caller will be responsible to kfree() this
++ * descriptor after execution.
++ * - key physical pointer to the plaintext, which will also hold
++ * the result. Since encryption occurs in place, caller must
++ * ensure that the space is large enough to accommodate the
++ * blackened key
++ * - keysz size of the plaintext
++ * - auth if a CCM-covered key is required, use KEY_COVER_CCM, else
++ * use KEY_COVER_ECB.
++ *
++ * KEY to key1 from @key_addr LENGTH 16 BYTES;
++ * FIFO STORE from key1[ecb] TO @key_addr LENGTH 16 BYTES;
++ *
++ * Note that this variant uses the JDKEK only; it does not accommodate the
++ * trusted key encryption key at this time.
++ *
++ */
++static int blacken_key_jobdesc(u32 **desc, void *key, u16 keysz, bool auth)
++{
++ u32 *tdesc, tmpdesc[INITIAL_DESCSZ];
++ u16 dsize, idx;
++
++ memset(tmpdesc, 0, INITIAL_DESCSZ * sizeof(u32));
++ idx = 1;
++
++ /* Load key to class 1 key register */
++ tmpdesc[idx++] = CMD_KEY | CLASS_1 | (keysz & KEY_LENGTH_MASK);
++ tmpdesc[idx++] = (uintptr_t)key;
++
++ /* ...and write back out via FIFO store*/
++ tmpdesc[idx] = CMD_FIFO_STORE | CLASS_1 | (keysz & KEY_LENGTH_MASK);
++
++ /* plus account for ECB/CCM option in FIFO_STORE */
++ if (auth == KEY_COVER_ECB)
++ tmpdesc[idx] |= FIFOST_TYPE_KEY_KEK;
++ else
++ tmpdesc[idx] |= FIFOST_TYPE_KEY_CCM_JKEK;
++
++ idx++;
++ tmpdesc[idx++] = (uintptr_t)key;
++
++ /* finish off the job header */
++ tmpdesc[0] = CMD_DESC_HDR | HDR_ONE | (idx & HDR_DESCLEN_MASK);
++ dsize = idx * sizeof(u32);
++
++ /* now allocate execution buffer and coat it with executable */
++ tdesc = kmalloc(dsize, GFP_KERNEL | GFP_DMA);
++ if (tdesc == NULL)
++ return 0;
++
++ memcpy(tdesc, tmpdesc, dsize);
++ *desc = tdesc;
++
++ return dsize;
++}
++
++/*
++ * Construct a blob encapsulation job descriptor
++ *
++ * This function dynamically constructs a blob encapsulation job descriptor
++ * from the following arguments:
++ *
++ * - desc pointer to a pointer to the descriptor generated by this
++ * function. Caller will be responsible to kfree() this
++ * descriptor after execution.
++ * - keymod Physical pointer to a key modifier, which must reside in a
++ * contiguous piece of memory. Modifier will be assumed to be
++ * 8 bytes long for a blob of type SM_SECMEM, or 16 bytes long
++ * for a blob of type SM_GENMEM (see blobtype argument).
++ * - secretbuf Physical pointer to a secret, normally a black or red key,
++ * possibly residing within an accessible secure memory page,
++ * of the secret to be encapsulated to an output blob.
++ * - outbuf Physical pointer to the destination buffer to receive the
++ * encapsulated output. This buffer will need to be 48 bytes
++ * larger than the input because of the added encapsulation data.
++ * The generated descriptor will account for the increase in size,
++ * but the caller must also account for this increase in the
++ * buffer allocator.
++ * - secretsz Size of input secret, in bytes. This is limited to 65536
++ * less the size of blob overhead, since the length embeds into
++ * DECO pointer in/out instructions.
++ * - keycolor Determines if the source data is covered (black key) or
++ * plaintext (red key). RED_KEY or BLACK_KEY are defined in
++ * for this purpose.
++ * - blobtype Determine if encapsulated blob should be a secure memory
++ * blob (SM_SECMEM), with partition data embedded with key
++ * material, or a general memory blob (SM_GENMEM).
++ * - auth If BLACK_KEY source is covered via AES-CCM, specify
++ * KEY_COVER_CCM, else uses AES-ECB (KEY_COVER_ECB).
++ *
++ * Upon completion, desc points to a buffer containing a CAAM job
++ * descriptor which encapsulates data into an externally-storable blob
++ * suitable for use across power cycles.
++ *
++ * This is an example of a black key encapsulation job into a general memory
++ * blob. Notice the 16-byte key modifier in the LOAD instruction. Also note
++ * the output 48 bytes longer than the input:
++ *
++ * [00] B0800008 jobhdr: stidx=0 len=8
++ * [01] 14400010 ld: ccb2-key len=16 offs=0
++ * [02] 08144891 ptr->@0x08144891
++ * [03] F800003A seqoutptr: len=58
++ * [04] 01000000 out_ptr->@0x01000000
++ * [05] F000000A seqinptr: len=10
++ * [06] 09745090 in_ptr->@0x09745090
++ * [07] 870D0004 operation: encap blob reg=memory, black, format=normal
++ *
++ * This is an example of a red key encapsulation job for storing a red key
++ * into a secure memory blob. Note the 8 byte modifier on the 12 byte offset
++ * in the LOAD instruction; this accounts for blob permission storage:
++ *
++ * [00] B0800008 jobhdr: stidx=0 len=8
++ * [01] 14400C08 ld: ccb2-key len=8 offs=12
++ * [02] 087D0784 ptr->@0x087d0784
++ * [03] F8000050 seqoutptr: len=80
++ * [04] 09251BB2 out_ptr->@0x09251bb2
++ * [05] F0000020 seqinptr: len=32
++ * [06] 40000F31 in_ptr->@0x40000f31
++ * [07] 870D0008 operation: encap blob reg=memory, red, sec_mem,
++ * format=normal
++ *
++ * Note: this function only generates 32-bit pointers at present, and should
++ * be refactored using a scheme that allows both 32 and 64 bit addressing
++ */
++
++static int blob_encap_jobdesc(u32 **desc, dma_addr_t keymod,
++ void *secretbuf, dma_addr_t outbuf,
++ u16 secretsz, u8 keycolor, u8 blobtype, u8 auth)
++{
++ u32 *tdesc, tmpdesc[INITIAL_DESCSZ];
++ u16 dsize, idx;
++
++ memset(tmpdesc, 0, INITIAL_DESCSZ * sizeof(u32));
++ idx = 1;
++
++ /*
++ * Key modifier works differently for secure/general memory blobs
++ * This accounts for the permission/protection data encapsulated
++ * within the blob if a secure memory blob is requested
++ */
++ if (blobtype == SM_SECMEM)
++ tmpdesc[idx++] = CMD_LOAD | LDST_CLASS_2_CCB |
++ LDST_SRCDST_BYTE_KEY |
++ ((12 << LDST_OFFSET_SHIFT) & LDST_OFFSET_MASK)
++ | (8 & LDST_LEN_MASK);
++ else /* is general memory blob */
++ tmpdesc[idx++] = CMD_LOAD | LDST_CLASS_2_CCB |
++ LDST_SRCDST_BYTE_KEY | (16 & LDST_LEN_MASK);
++
++ tmpdesc[idx++] = (u32)keymod;
++
++ /*
++ * Encapsulation output must include space for blob key encryption
++ * key and MAC tag
++ */
++ tmpdesc[idx++] = CMD_SEQ_OUT_PTR | (secretsz + BLOB_OVERHEAD);
++ tmpdesc[idx++] = (u32)outbuf;
++
++ /* Input data, should be somewhere in secure memory */
++ tmpdesc[idx++] = CMD_SEQ_IN_PTR | secretsz;
++ tmpdesc[idx++] = (uintptr_t)secretbuf;
++
++ /* Set blob encap, then color */
++ tmpdesc[idx] = CMD_OPERATION | OP_TYPE_ENCAP_PROTOCOL | OP_PCLID_BLOB;
++
++ if (blobtype == SM_SECMEM)
++ tmpdesc[idx] |= OP_PCL_BLOB_PTXT_SECMEM;
++
++ if (auth == KEY_COVER_CCM)
++ tmpdesc[idx] |= OP_PCL_BLOB_EKT;
++
++ if (keycolor == BLACK_KEY)
++ tmpdesc[idx] |= OP_PCL_BLOB_BLACK;
++
++ idx++;
++ tmpdesc[0] = CMD_DESC_HDR | HDR_ONE | (idx & HDR_DESCLEN_MASK);
++ dsize = idx * sizeof(u32);
++
++ tdesc = kmalloc(dsize, GFP_KERNEL | GFP_DMA);
++ if (tdesc == NULL)
++ return 0;
++
++ memcpy(tdesc, tmpdesc, dsize);
++ *desc = tdesc;
++ return dsize;
++}
++
++/*
++ * Construct a blob decapsulation job descriptor
++ *
++ * This function dynamically constructs a blob decapsulation job descriptor
++ * from the following arguments:
++ *
++ * - desc pointer to a pointer to the descriptor generated by this
++ * function. Caller will be responsible to kfree() this
++ * descriptor after execution.
++ * - keymod Physical pointer to a key modifier, which must reside in a
++ * contiguous piece of memory. Modifier will be assumed to be
++ * 8 bytes long for a blob of type SM_SECMEM, or 16 bytes long
++ * for a blob of type SM_GENMEM (see blobtype argument).
++ * - blobbuf Physical pointer (into external memory) of the blob to
++ * be decapsulated. Blob must reside in a contiguous memory
++ * segment.
++ * - outbuf Physical pointer of the decapsulated output, possibly into
++ * a location within a secure memory page. Must be contiguous.
++ * - secretsz Size of encapsulated secret in bytes (not the size of the
++ * input blob).
++ * - keycolor Determines if decapsulated content is encrypted (BLACK_KEY)
++ * or left as plaintext (RED_KEY).
++ * - blobtype Determine if encapsulated blob should be a secure memory
++ * blob (SM_SECMEM), with partition data embedded with key
++ * material, or a general memory blob (SM_GENMEM).
++ * - auth If decapsulation path is specified by BLACK_KEY, then if
++ * AES-CCM is requested for key covering use KEY_COVER_CCM, else
++ * use AES-ECB (KEY_COVER_ECB).
++ *
++ * Upon completion, desc points to a buffer containing a CAAM job descriptor
++ * that decapsulates a key blob from external memory into a black (encrypted)
++ * key or red (plaintext) content.
++ *
++ * This is an example of a black key decapsulation job from a general memory
++ * blob. Notice the 16-byte key modifier in the LOAD instruction.
++ *
++ * [00] B0800008 jobhdr: stidx=0 len=8
++ * [01] 14400010 ld: ccb2-key len=16 offs=0
++ * [02] 08A63B7F ptr->@0x08a63b7f
++ * [03] F8000010 seqoutptr: len=16
++ * [04] 01000000 out_ptr->@0x01000000
++ * [05] F000003A seqinptr: len=58
++ * [06] 01000010 in_ptr->@0x01000010
++ * [07] 860D0004 operation: decap blob reg=memory, black, format=normal
++ *
++ * This is an example of a red key decapsulation job for restoring a red key
++ * from a secure memory blob. Note the 8 byte modifier on the 12 byte offset
++ * in the LOAD instruction:
++ *
++ * [00] B0800008 jobhdr: stidx=0 len=8
++ * [01] 14400C08 ld: ccb2-key len=8 offs=12
++ * [02] 01000000 ptr->@0x01000000
++ * [03] F8000020 seqoutptr: len=32
++ * [04] 400000E6 out_ptr->@0x400000e6
++ * [05] F0000050 seqinptr: len=80
++ * [06] 08F0C0EA in_ptr->@0x08f0c0ea
++ * [07] 860D0008 operation: decap blob reg=memory, red, sec_mem,
++ * format=normal
++ *
++ * Note: this function only generates 32-bit pointers at present, and should
++ * be refactored using a scheme that allows both 32 and 64 bit addressing
++ */
++
++static int blob_decap_jobdesc(u32 **desc, dma_addr_t keymod, dma_addr_t blobbuf,
++ u8 *outbuf, u16 secretsz, u8 keycolor,
++ u8 blobtype, u8 auth)
++{
++ u32 *tdesc, tmpdesc[INITIAL_DESCSZ];
++ u16 dsize, idx;
++
++ memset(tmpdesc, 0, INITIAL_DESCSZ * sizeof(u32));
++ idx = 1;
++
++ /* Load key modifier */
++ if (blobtype == SM_SECMEM)
++ tmpdesc[idx++] = CMD_LOAD | LDST_CLASS_2_CCB |
++ LDST_SRCDST_BYTE_KEY |
++ ((12 << LDST_OFFSET_SHIFT) & LDST_OFFSET_MASK)
++ | (8 & LDST_LEN_MASK);
++ else /* is general memory blob */
++ tmpdesc[idx++] = CMD_LOAD | LDST_CLASS_2_CCB |
++ LDST_SRCDST_BYTE_KEY | (16 & LDST_LEN_MASK);
++
++ tmpdesc[idx++] = (u32)keymod;
++
++ /* Compensate BKEK + MAC tag over size of encapsulated secret */
++ tmpdesc[idx++] = CMD_SEQ_IN_PTR | (secretsz + BLOB_OVERHEAD);
++ tmpdesc[idx++] = (u32)blobbuf;
++ tmpdesc[idx++] = CMD_SEQ_OUT_PTR | secretsz;
++ tmpdesc[idx++] = (uintptr_t)outbuf;
++
++ /* Decapsulate from secure memory partition to black blob */
++ tmpdesc[idx] = CMD_OPERATION | OP_TYPE_DECAP_PROTOCOL | OP_PCLID_BLOB;
++
++ if (blobtype == SM_SECMEM)
++ tmpdesc[idx] |= OP_PCL_BLOB_PTXT_SECMEM;
++
++ if (auth == KEY_COVER_CCM)
++ tmpdesc[idx] |= OP_PCL_BLOB_EKT;
++
++ if (keycolor == BLACK_KEY)
++ tmpdesc[idx] |= OP_PCL_BLOB_BLACK;
++
++ idx++;
++ tmpdesc[0] = CMD_DESC_HDR | HDR_ONE | (idx & HDR_DESCLEN_MASK);
++ dsize = idx * sizeof(u32);
++
++ tdesc = kmalloc(dsize, GFP_KERNEL | GFP_DMA);
++ if (tdesc == NULL)
++ return 0;
++
++ memcpy(tdesc, tmpdesc, dsize);
++ *desc = tdesc;
++ return dsize;
++}
++
++/*
++ * Pseudo-synchronous ring access functions for carrying out key
++ * encapsulation and decapsulation
++ */
++
++struct sm_key_job_result {
++ int error;
++ struct completion completion;
++};
++
++void sm_key_job_done(struct device *dev, u32 *desc, u32 err, void *context)
++{
++ struct sm_key_job_result *res = context;
++
++ if (err)
++ caam_jr_strstatus(dev, err);
++
++ res->error = err; /* save off the error for postprocessing */
++
++ complete(&res->completion); /* mark us complete */
++}
++
++static int sm_key_job(struct device *ksdev, u32 *jobdesc)
++{
++ struct sm_key_job_result testres = {0};
++ struct caam_drv_private_sm *kspriv;
++ int rtn = 0;
++
++ kspriv = dev_get_drvdata(ksdev);
++
++ init_completion(&testres.completion);
++
++ rtn = caam_jr_enqueue(kspriv->smringdev, jobdesc, sm_key_job_done,
++ &testres);
++ if (rtn)
++ goto exit;
++
++ wait_for_completion_interruptible(&testres.completion);
++ rtn = testres.error;
++
++exit:
++ return rtn;
++}
++
++/*
++ * Following section establishes the default methods for keystore access
++ * They are NOT intended for use external to this module
++ *
++ * In the present version, these are the only means for the higher-level
++ * interface to deal with the mechanics of accessing the phyiscal keystore
++ */
++
++
++int slot_alloc(struct device *dev, u32 unit, u32 size, u32 *slot)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ struct keystore_data *ksdata = smpriv->pagedesc[unit].ksdata;
++ u32 i;
++#ifdef SM_DEBUG
++ dev_info(dev, "slot_alloc(): requesting slot for %d bytes\n", size);
++#endif
++
++ if (size > smpriv->slot_size)
++ return -EKEYREJECTED;
++
++ for (i = 0; i < ksdata->slot_count; i++) {
++ if (ksdata->slot[i].allocated == 0) {
++ ksdata->slot[i].allocated = 1;
++ (*slot) = i;
++#ifdef SM_DEBUG
++ dev_info(dev, "slot_alloc(): new slot %d allocated\n",
++ *slot);
++#endif
++ return 0;
++ }
++ }
++
++ return -ENOSPC;
++}
++EXPORT_SYMBOL(slot_alloc);
++
++int slot_dealloc(struct device *dev, u32 unit, u32 slot)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ struct keystore_data *ksdata = smpriv->pagedesc[unit].ksdata;
++ u8 __iomem *slotdata;
++
++#ifdef SM_DEBUG
++ dev_info(dev, "slot_dealloc(): releasing slot %d\n", slot);
++#endif
++ if (slot >= ksdata->slot_count)
++ return -EINVAL;
++ slotdata = ksdata->base_address + slot * smpriv->slot_size;
++
++ if (ksdata->slot[slot].allocated == 1) {
++ /* Forcibly overwrite the data from the keystore */
++ memset_io(ksdata->base_address + slot * smpriv->slot_size, 0,
++ smpriv->slot_size);
++
++ ksdata->slot[slot].allocated = 0;
++#ifdef SM_DEBUG
++ dev_info(dev, "slot_dealloc(): slot %d released\n", slot);
++#endif
++ return 0;
++ }
++
++ return -EINVAL;
++}
++EXPORT_SYMBOL(slot_dealloc);
++
++void *slot_get_address(struct device *dev, u32 unit, u32 slot)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ struct keystore_data *ksdata = smpriv->pagedesc[unit].ksdata;
++
++ if (slot >= ksdata->slot_count)
++ return NULL;
++
++#ifdef SM_DEBUG
++ dev_info(dev, "slot_get_address(): slot %d is 0x%08x\n", slot,
++ (u32)ksdata->base_address + slot * smpriv->slot_size);
++#endif
++
++ return ksdata->base_address + slot * smpriv->slot_size;
++}
++
++void *slot_get_physical(struct device *dev, u32 unit, u32 slot)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ struct keystore_data *ksdata = smpriv->pagedesc[unit].ksdata;
++
++ if (slot >= ksdata->slot_count)
++ return NULL;
++
++#ifdef SM_DEBUG
++ dev_info(dev, "%s: slot %d is 0x%08x\n", __func__, slot,
++ (u32)ksdata->phys_address + slot * smpriv->slot_size);
++#endif
++
++ return ksdata->phys_address + slot * smpriv->slot_size;
++}
++
++u32 slot_get_base(struct device *dev, u32 unit, u32 slot)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ struct keystore_data *ksdata = smpriv->pagedesc[unit].ksdata;
++
++ /*
++ * There could potentially be more than one secure partition object
++ * associated with this keystore. For now, there is just one.
++ */
++
++ (void)slot;
++
++#ifdef SM_DEBUG
++ dev_info(dev, "slot_get_base(): slot %d = 0x%08x\n",
++ slot, (u32)ksdata->base_address);
++#endif
++
++ return (uintptr_t)(ksdata->base_address);
++}
++
++u32 slot_get_offset(struct device *dev, u32 unit, u32 slot)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ struct keystore_data *ksdata = smpriv->pagedesc[unit].ksdata;
++
++ if (slot >= ksdata->slot_count)
++ return -EINVAL;
++
++#ifdef SM_DEBUG
++ dev_info(dev, "slot_get_offset(): slot %d = %d\n", slot,
++ slot * smpriv->slot_size);
++#endif
++
++ return slot * smpriv->slot_size;
++}
++
++u32 slot_get_slot_size(struct device *dev, u32 unit, u32 slot)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++
++
++#ifdef SM_DEBUG
++ dev_info(dev, "slot_get_slot_size(): slot %d = %d\n", slot,
++ smpriv->slot_size);
++#endif
++ /* All slots are the same size in the default implementation */
++ return smpriv->slot_size;
++}
++
++
++
++int kso_init_data(struct device *dev, u32 unit)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ struct keystore_data *keystore_data = NULL;
++ u32 slot_count;
++ u32 keystore_data_size;
++
++ /*
++ * Calculate the required size of the keystore data structure, based
++ * on the number of keys that can fit in the partition.
++ */
++ slot_count = smpriv->page_size / smpriv->slot_size;
++#ifdef SM_DEBUG
++ dev_info(dev, "kso_init_data: %d slots initializing\n", slot_count);
++#endif
++
++ keystore_data_size = sizeof(struct keystore_data) +
++ slot_count *
++ sizeof(struct keystore_data_slot_info);
++
++ keystore_data = kzalloc(keystore_data_size, GFP_KERNEL);
++
++ if (!keystore_data)
++ return -ENOMEM;
++
++#ifdef SM_DEBUG
++ dev_info(dev, "kso_init_data: keystore data size = %d\n",
++ keystore_data_size);
++#endif
++
++ /*
++ * Place the slot information structure directly after the keystore data
++ * structure.
++ */
++ keystore_data->slot = (struct keystore_data_slot_info *)
++ (keystore_data + 1);
++ keystore_data->slot_count = slot_count;
++
++ smpriv->pagedesc[unit].ksdata = keystore_data;
++ smpriv->pagedesc[unit].ksdata->base_address =
++ smpriv->pagedesc[unit].pg_base;
++ smpriv->pagedesc[unit].ksdata->phys_address =
++ smpriv->pagedesc[unit].pg_phys;
++
++ return 0;
++}
++
++void kso_cleanup_data(struct device *dev, u32 unit)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ struct keystore_data *keystore_data = NULL;
++
++ if (smpriv->pagedesc[unit].ksdata != NULL)
++ keystore_data = smpriv->pagedesc[unit].ksdata;
++
++ /* Release the allocated keystore management data */
++ kfree(smpriv->pagedesc[unit].ksdata);
++
++ return;
++}
++
++
++
++/*
++ * Keystore management section
++ */
++
++void sm_init_keystore(struct device *dev)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++
++ smpriv->data_init = kso_init_data;
++ smpriv->data_cleanup = kso_cleanup_data;
++ smpriv->slot_alloc = slot_alloc;
++ smpriv->slot_dealloc = slot_dealloc;
++ smpriv->slot_get_address = slot_get_address;
++ smpriv->slot_get_physical = slot_get_physical;
++ smpriv->slot_get_base = slot_get_base;
++ smpriv->slot_get_offset = slot_get_offset;
++ smpriv->slot_get_slot_size = slot_get_slot_size;
++#ifdef SM_DEBUG
++ dev_info(dev, "sm_init_keystore(): handlers installed\n");
++#endif
++}
++EXPORT_SYMBOL(sm_init_keystore);
++
++/* Return available pages/units */
++u32 sm_detect_keystore_units(struct device *dev)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++
++ return smpriv->localpages;
++}
++EXPORT_SYMBOL(sm_detect_keystore_units);
++
++/*
++ * Do any keystore specific initializations
++ */
++int sm_establish_keystore(struct device *dev, u32 unit)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++
++#ifdef SM_DEBUG
++ dev_info(dev, "sm_establish_keystore(): unit %d initializing\n", unit);
++#endif
++
++ if (smpriv->data_init == NULL)
++ return -EINVAL;
++
++ /* Call the data_init function for any user setup */
++ return smpriv->data_init(dev, unit);
++}
++EXPORT_SYMBOL(sm_establish_keystore);
++
++void sm_release_keystore(struct device *dev, u32 unit)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++
++#ifdef SM_DEBUG
++ dev_info(dev, "sm_establish_keystore(): unit %d releasing\n", unit);
++#endif
++ if ((smpriv != NULL) && (smpriv->data_cleanup != NULL))
++ smpriv->data_cleanup(dev, unit);
++
++ return;
++}
++EXPORT_SYMBOL(sm_release_keystore);
++
++/*
++ * Subsequent interfacce (sm_keystore_*) forms the accessor interfacce to
++ * the keystore
++ */
++int sm_keystore_slot_alloc(struct device *dev, u32 unit, u32 size, u32 *slot)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ int retval = -EINVAL;
++
++ spin_lock(&smpriv->kslock);
++
++ if ((smpriv->slot_alloc == NULL) ||
++ (smpriv->pagedesc[unit].ksdata == NULL))
++ goto out;
++
++ retval = smpriv->slot_alloc(dev, unit, size, slot);
++
++out:
++ spin_unlock(&smpriv->kslock);
++ return retval;
++}
++EXPORT_SYMBOL(sm_keystore_slot_alloc);
++
++int sm_keystore_slot_dealloc(struct device *dev, u32 unit, u32 slot)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ int retval = -EINVAL;
++
++ spin_lock(&smpriv->kslock);
++
++ if ((smpriv->slot_alloc == NULL) ||
++ (smpriv->pagedesc[unit].ksdata == NULL))
++ goto out;
++
++ retval = smpriv->slot_dealloc(dev, unit, slot);
++out:
++ spin_unlock(&smpriv->kslock);
++ return retval;
++}
++EXPORT_SYMBOL(sm_keystore_slot_dealloc);
++
++int sm_keystore_slot_load(struct device *dev, u32 unit, u32 slot,
++ const u8 *key_data, u32 key_length)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ int retval = -EINVAL;
++ u32 slot_size;
++ u8 __iomem *slot_location;
++
++ spin_lock(&smpriv->kslock);
++
++ slot_size = smpriv->slot_get_slot_size(dev, unit, slot);
++
++ if (key_length > slot_size) {
++ retval = -EFBIG;
++ goto out;
++ }
++
++ slot_location = smpriv->slot_get_address(dev, unit, slot);
++
++ memcpy_toio(slot_location, key_data, key_length);
++
++ retval = 0;
++
++out:
++ spin_unlock(&smpriv->kslock);
++ return retval;
++}
++EXPORT_SYMBOL(sm_keystore_slot_load);
++
++int sm_keystore_slot_read(struct device *dev, u32 unit, u32 slot,
++ u32 key_length, u8 *key_data)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ int retval = -EINVAL;
++ u8 __iomem *slot_addr;
++ u32 slot_size;
++
++ spin_lock(&smpriv->kslock);
++
++ slot_addr = smpriv->slot_get_address(dev, unit, slot);
++ slot_size = smpriv->slot_get_slot_size(dev, unit, slot);
++
++ if (key_length > slot_size) {
++ retval = -EKEYREJECTED;
++ goto out;
++ }
++
++ memcpy_fromio(key_data, slot_addr, key_length);
++ retval = 0;
++
++out:
++ spin_unlock(&smpriv->kslock);
++ return retval;
++}
++EXPORT_SYMBOL(sm_keystore_slot_read);
++
++/*
++ * Blacken a clear key in a slot. Operates "in place".
++ * Limited to class 1 keys at the present time
++ */
++int sm_keystore_cover_key(struct device *dev, u32 unit, u32 slot,
++ u16 key_length, u8 keyauth)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ int retval = 0;
++ u8 __iomem *slotaddr;
++ void *slotphys;
++ u32 dsize, jstat;
++ u32 __iomem *coverdesc = NULL;
++
++ /* Get the address of the object in the slot */
++ slotaddr = (u8 *)smpriv->slot_get_address(dev, unit, slot);
++ slotphys = (u8 *)smpriv->slot_get_physical(dev, unit, slot);
++
++ dsize = blacken_key_jobdesc(&coverdesc, slotphys, key_length, keyauth);
++ if (!dsize)
++ return -ENOMEM;
++ jstat = sm_key_job(dev, coverdesc);
++ if (jstat)
++ retval = -EIO;
++
++ kfree(coverdesc);
++ return retval;
++}
++EXPORT_SYMBOL(sm_keystore_cover_key);
++
++/* Export a black/red key to a blob in external memory */
++int sm_keystore_slot_export(struct device *dev, u32 unit, u32 slot, u8 keycolor,
++ u8 keyauth, u8 *outbuf, u16 keylen, u8 *keymod)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ int retval = 0;
++ u8 __iomem *slotaddr, *lkeymod;
++ u8 __iomem *slotphys;
++ dma_addr_t keymod_dma, outbuf_dma;
++ u32 dsize, jstat;
++ u32 __iomem *encapdesc = NULL;
++ struct device *dev_for_dma_op;
++
++ /* Use the ring as device for DMA operations */
++ dev_for_dma_op = smpriv->smringdev;
++
++ /* Get the base address(es) of the specified slot */
++ slotaddr = (u8 *)smpriv->slot_get_address(dev, unit, slot);
++ slotphys = smpriv->slot_get_physical(dev, unit, slot);
++
++ /* Allocate memory for key modifier compatible with DMA */
++ lkeymod = kmalloc(SECMEM_KEYMOD_LEN, GFP_KERNEL | GFP_DMA);
++ if (!lkeymod) {
++ retval = (-ENOMEM);
++ goto exit;
++ }
++
++ /* Get DMA address for the key modifier */
++ keymod_dma = dma_map_single(dev_for_dma_op, lkeymod,
++ SECMEM_KEYMOD_LEN, DMA_TO_DEVICE);
++ if (dma_mapping_error(dev_for_dma_op, keymod_dma)) {
++ dev_err(dev, "unable to map keymod: %p\n", lkeymod);
++ retval = (-ENOMEM);
++ goto free_keymod;
++ }
++
++ /* Copy the keymod and synchronize the DMA */
++ memcpy(lkeymod, keymod, SECMEM_KEYMOD_LEN);
++ dma_sync_single_for_device(dev_for_dma_op, keymod_dma,
++ SECMEM_KEYMOD_LEN, DMA_TO_DEVICE);
++
++ /* Get DMA address for the destination */
++ outbuf_dma = dma_map_single(dev_for_dma_op, outbuf,
++ keylen + BLOB_OVERHEAD, DMA_FROM_DEVICE);
++ if (dma_mapping_error(dev_for_dma_op, outbuf_dma)) {
++ dev_err(dev, "unable to map outbuf: %p\n", outbuf);
++ retval = (-ENOMEM);
++ goto unmap_keymod;
++ }
++
++ /* Build the encapsulation job descriptor */
++ dsize = blob_encap_jobdesc(&encapdesc, keymod_dma, slotphys, outbuf_dma,
++ keylen, keycolor, SM_SECMEM, keyauth);
++ if (!dsize) {
++ dev_err(dev, "can't alloc an encapsulation descriptor\n");
++ retval = -ENOMEM;
++ goto unmap_outbuf;
++ }
++
++ /* Run the job */
++ jstat = sm_key_job(dev, encapdesc);
++ if (jstat) {
++ retval = (-EIO);
++ goto free_desc;
++ }
++
++ /* Synchronize the data received */
++ dma_sync_single_for_cpu(dev_for_dma_op, outbuf_dma,
++ keylen + BLOB_OVERHEAD, DMA_FROM_DEVICE);
++
++free_desc:
++ kfree(encapdesc);
++
++unmap_outbuf:
++ dma_unmap_single(dev_for_dma_op, outbuf_dma, keylen + BLOB_OVERHEAD,
++ DMA_FROM_DEVICE);
++
++unmap_keymod:
++ dma_unmap_single(dev_for_dma_op, keymod_dma, SECMEM_KEYMOD_LEN,
++ DMA_TO_DEVICE);
++
++free_keymod:
++ kfree(lkeymod);
++
++exit:
++ return retval;
++}
++EXPORT_SYMBOL(sm_keystore_slot_export);
++
++/* Import a black/red key from a blob residing in external memory */
++int sm_keystore_slot_import(struct device *dev, u32 unit, u32 slot, u8 keycolor,
++ u8 keyauth, u8 *inbuf, u16 keylen, u8 *keymod)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ int retval = 0;
++ u8 __iomem *slotaddr, *lkeymod;
++ u8 __iomem *slotphys;
++ dma_addr_t keymod_dma, inbuf_dma;
++ u32 dsize, jstat;
++ u32 __iomem *decapdesc = NULL;
++ struct device *dev_for_dma_op;
++
++ /* Use the ring as device for DMA operations */
++ dev_for_dma_op = smpriv->smringdev;
++
++ /* Get the base address(es) of the specified slot */
++ slotaddr = (u8 *)smpriv->slot_get_address(dev, unit, slot);
++ slotphys = smpriv->slot_get_physical(dev, unit, slot);
++
++ /* Allocate memory for key modifier compatible with DMA */
++ lkeymod = kmalloc(SECMEM_KEYMOD_LEN, GFP_KERNEL | GFP_DMA);
++ if (!lkeymod) {
++ retval = (-ENOMEM);
++ goto exit;
++ }
++
++ /* Get DMA address for the key modifier */
++ keymod_dma = dma_map_single(dev_for_dma_op, lkeymod,
++ SECMEM_KEYMOD_LEN, DMA_TO_DEVICE);
++ if (dma_mapping_error(dev_for_dma_op, keymod_dma)) {
++ dev_err(dev, "unable to map keymod: %p\n", lkeymod);
++ retval = (-ENOMEM);
++ goto free_keymod;
++ }
++
++ /* Copy the keymod and synchronize the DMA */
++ memcpy(lkeymod, keymod, SECMEM_KEYMOD_LEN);
++ dma_sync_single_for_device(dev_for_dma_op, keymod_dma,
++ SECMEM_KEYMOD_LEN, DMA_TO_DEVICE);
++
++ /* Get DMA address for the input */
++ inbuf_dma = dma_map_single(dev_for_dma_op, inbuf,
++ keylen + BLOB_OVERHEAD, DMA_TO_DEVICE);
++ if (dma_mapping_error(dev_for_dma_op, inbuf_dma)) {
++ dev_err(dev, "unable to map inbuf: %p\n", (void *)inbuf_dma);
++ retval = (-ENOMEM);
++ goto unmap_keymod;
++ }
++
++ /* synchronize the DMA */
++ dma_sync_single_for_device(dev_for_dma_op, inbuf_dma,
++ keylen + BLOB_OVERHEAD, DMA_TO_DEVICE);
++
++ /* Build the encapsulation job descriptor */
++ dsize = blob_decap_jobdesc(&decapdesc, keymod_dma, inbuf_dma, slotphys,
++ keylen, keycolor, SM_SECMEM, keyauth);
++ if (!dsize) {
++ dev_err(dev, "can't alloc a decapsulation descriptor\n");
++ retval = -ENOMEM;
++ goto unmap_inbuf;
++ }
++
++ /* Run the job */
++ jstat = sm_key_job(dev, decapdesc);
++
++ /*
++ * May want to expand upon error meanings a bit. Any CAAM status
++ * is reported as EIO, but we might want to look for something more
++ * meaningful for something like an ICV error on restore, otherwise
++ * the caller is left guessing.
++ */
++ if (jstat) {
++ retval = (-EIO);
++ goto free_desc;
++ }
++
++free_desc:
++ kfree(decapdesc);
++
++unmap_inbuf:
++ dma_unmap_single(dev_for_dma_op, inbuf_dma, keylen + BLOB_OVERHEAD,
++ DMA_TO_DEVICE);
++
++unmap_keymod:
++ dma_unmap_single(dev_for_dma_op, keymod_dma, SECMEM_KEYMOD_LEN,
++ DMA_TO_DEVICE);
++
++free_keymod:
++ kfree(lkeymod);
++
++exit:
++ return retval;
++}
++EXPORT_SYMBOL(sm_keystore_slot_import);
++
++/*
++ * Initialization/shutdown subsystem
++ * Assumes statically-invoked startup/shutdown from the controller driver
++ * for the present time, to be reworked when a device tree becomes
++ * available. This code will not modularize in present form.
++ *
++ * Also, simply uses ring 0 for execution at the present
++ */
++
++int caam_sm_startup(struct platform_device *pdev)
++{
++ struct device *ctrldev, *smdev;
++ struct caam_drv_private *ctrlpriv;
++ struct caam_drv_private_sm *smpriv;
++ struct caam_drv_private_jr *jrpriv; /* need this for reg page */
++ struct platform_device *sm_pdev;
++ struct sm_page_descriptor *lpagedesc;
++ u32 page, pgstat, lpagect, detectedpage, smvid, smpart;
++ int ret = 0;
++
++ struct device_node *np;
++ ctrldev = &pdev->dev;
++ ctrlpriv = dev_get_drvdata(ctrldev);
++
++ /*
++ * If ctrlpriv is NULL, it's probably because the caam driver wasn't
++ * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
++ */
++ if (!ctrlpriv) {
++ ret = -ENODEV;
++ goto exit;
++ }
++
++ /*
++ * Set up the private block for secure memory
++ * Only one instance is possible
++ */
++ smpriv = kzalloc(sizeof(struct caam_drv_private_sm), GFP_KERNEL);
++ if (smpriv == NULL) {
++ dev_err(ctrldev, "can't alloc private mem for secure memory\n");
++ ret = -ENOMEM;
++ goto exit;
++ }
++ smpriv->parentdev = ctrldev; /* copy of parent dev is handy */
++ spin_lock_init(&smpriv->kslock);
++
++ /* Create the dev */
++ np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-caam-sm");
++ if (np)
++ of_node_clear_flag(np, OF_POPULATED);
++ sm_pdev = of_platform_device_create(np, "caam_sm", ctrldev);
++
++ if (sm_pdev == NULL) {
++ ret = -EINVAL;
++ goto free_smpriv;
++ }
++
++ /* Save a pointer to the platform device for Secure Memory */
++ smpriv->sm_pdev = sm_pdev;
++ smdev = &sm_pdev->dev;
++ dev_set_drvdata(smdev, smpriv);
++ ctrlpriv->smdev = smdev;
++
++ /* Set the Secure Memory Register Map Version */
++ smvid = rd_reg32(&ctrlpriv->jr[0]->perfmon.smvid);
++ smpart = rd_reg32(&ctrlpriv->jr[0]->perfmon.smpart);
++
++ if (smvid < SMVID_V2)
++ smpriv->sm_reg_offset = SM_V1_OFFSET;
++ else
++ smpriv->sm_reg_offset = SM_V2_OFFSET;
++
++ /*
++ * Collect configuration limit data for reference
++ * This batch comes from the partition data/vid registers in perfmon
++ */
++ smpriv->max_pages = ((smpart & SMPART_MAX_NUMPG_MASK) >>
++ SMPART_MAX_NUMPG_SHIFT) + 1;
++ smpriv->top_partition = ((smpart & SMPART_MAX_PNUM_MASK) >>
++ SMPART_MAX_PNUM_SHIFT) + 1;
++ smpriv->top_page = ((smpart & SMPART_MAX_PG_MASK) >>
++ SMPART_MAX_PG_SHIFT) + 1;
++ smpriv->page_size = 1024 << ((smvid & SMVID_PG_SIZE_MASK) >>
++ SMVID_PG_SIZE_SHIFT);
++ smpriv->slot_size = 1 << CONFIG_CRYPTO_DEV_FSL_CAAM_SM_SLOTSIZE;
++
++#ifdef SM_DEBUG
++ dev_info(smdev, "max pages = %d, top partition = %d\n",
++ smpriv->max_pages, smpriv->top_partition);
++ dev_info(smdev, "top page = %d, page size = %d (total = %d)\n",
++ smpriv->top_page, smpriv->page_size,
++ smpriv->top_page * smpriv->page_size);
++ dev_info(smdev, "selected slot size = %d\n", smpriv->slot_size);
++#endif
++
++ /*
++ * Now probe for partitions/pages to which we have access. Note that
++ * these have likely been set up by a bootloader or platform
++ * provisioning application, so we have to assume that we "inherit"
++ * a configuration and work within the constraints of what it might be.
++ *
++ * Assume use of the zeroth ring in the present iteration (until
++ * we can divorce the controller and ring drivers, and then assign
++ * an SM instance to any ring instance).
++ */
++ smpriv->smringdev = caam_jr_alloc();
++ if (!smpriv->smringdev) {
++ dev_err(smdev, "Device for job ring not created\n");
++ ret = -ENODEV;
++ goto unregister_smpdev;
++ }
++
++ jrpriv = dev_get_drvdata(smpriv->smringdev);
++ lpagect = 0;
++ pgstat = 0;
++ lpagedesc = kzalloc(sizeof(struct sm_page_descriptor)
++ * smpriv->max_pages, GFP_KERNEL);
++ if (lpagedesc == NULL) {
++ ret = -ENOMEM;
++ goto free_smringdev;
++ }
++
++ for (page = 0; page < smpriv->max_pages; page++) {
++ u32 page_ownership;
++
++ if (sm_send_cmd(smpriv, jrpriv,
++ ((page << SMC_PAGE_SHIFT) & SMC_PAGE_MASK) |
++ (SMC_CMD_PAGE_INQUIRY & SMC_CMD_MASK),
++ &pgstat)) {
++ ret = -EINVAL;
++ goto free_lpagedesc;
++ }
++
++ page_ownership = (pgstat & SMCS_PGWON_MASK) >> SMCS_PGOWN_SHIFT;
++ if ((page_ownership == SMCS_PGOWN_OWNED)
++ || (page_ownership == SMCS_PGOWN_NOOWN)) {
++ /* page allocated */
++ lpagedesc[page].phys_pagenum =
++ (pgstat & SMCS_PAGE_MASK) >> SMCS_PAGE_SHIFT;
++ lpagedesc[page].own_part =
++ (pgstat & SMCS_PART_SHIFT) >> SMCS_PART_MASK;
++ lpagedesc[page].pg_base = (u8 *)ctrlpriv->sm_base +
++ (smpriv->page_size * page);
++ if (ctrlpriv->scu_en) {
++/* FIXME: get different addresses viewed by CPU and CAAM from
++ * platform property
++ */
++ lpagedesc[page].pg_phys = (u8 *)0x20800000 +
++ (smpriv->page_size * page);
++ } else {
++ lpagedesc[page].pg_phys =
++ (u8 *) ctrlpriv->sm_phy +
++ (smpriv->page_size * page);
++ }
++ lpagect++;
++#ifdef SM_DEBUG
++ dev_info(smdev,
++ "physical page %d, owning partition = %d\n",
++ lpagedesc[page].phys_pagenum,
++ lpagedesc[page].own_part);
++#endif
++ }
++ }
++
++ smpriv->pagedesc = kzalloc(sizeof(struct sm_page_descriptor) * lpagect,
++ GFP_KERNEL);
++ if (smpriv->pagedesc == NULL) {
++ ret = -ENOMEM;
++ goto free_lpagedesc;
++ }
++ smpriv->localpages = lpagect;
++
++ detectedpage = 0;
++ for (page = 0; page < smpriv->max_pages; page++) {
++ if (lpagedesc[page].pg_base != NULL) { /* e.g. live entry */
++ memcpy(&smpriv->pagedesc[detectedpage],
++ &lpagedesc[page],
++ sizeof(struct sm_page_descriptor));
++#ifdef SM_DEBUG_CONT
++ sm_show_page(smdev, &smpriv->pagedesc[detectedpage]);
++#endif
++ detectedpage++;
++ }
++ }
++
++ kfree(lpagedesc);
++
++ sm_init_keystore(smdev);
++
++ goto exit;
++
++free_lpagedesc:
++ kfree(lpagedesc);
++free_smringdev:
++ caam_jr_free(smpriv->smringdev);
++unregister_smpdev:
++ of_device_unregister(smpriv->sm_pdev);
++free_smpriv:
++ kfree(smpriv);
++
++exit:
++ return ret;
++}
++
++void caam_sm_shutdown(struct platform_device *pdev)
++{
++ struct device *ctrldev, *smdev;
++ struct caam_drv_private *priv;
++ struct caam_drv_private_sm *smpriv;
++
++ ctrldev = &pdev->dev;
++ priv = dev_get_drvdata(ctrldev);
++ smdev = priv->smdev;
++
++ /* Return if resource not initialized by startup */
++ if (smdev == NULL)
++ return;
++
++ smpriv = dev_get_drvdata(smdev);
++
++ caam_jr_free(smpriv->smringdev);
++
++ /* Remove Secure Memory Platform Device */
++ of_device_unregister(smpriv->sm_pdev);
++
++ kfree(smpriv->pagedesc);
++ kfree(smpriv);
++}
++EXPORT_SYMBOL(caam_sm_shutdown);
++
++static void __exit caam_sm_exit(void)
++{
++ struct device_node *dev_node;
++ struct platform_device *pdev;
++
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
++ if (!dev_node) {
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
++ if (!dev_node)
++ return;
++ }
++
++ pdev = of_find_device_by_node(dev_node);
++ if (!pdev)
++ return;
++
++ of_node_put(dev_node);
++
++ caam_sm_shutdown(pdev);
++
++ return;
++}
++
++static int __init caam_sm_init(void)
++{
++ struct device_node *dev_node;
++ struct platform_device *pdev;
++
++ /*
++ * Do of_find_compatible_node() then of_find_device_by_node()
++ * once a functional device tree is available
++ */
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
++ if (!dev_node) {
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
++ if (!dev_node)
++ return -ENODEV;
++ }
++
++ pdev = of_find_device_by_node(dev_node);
++ if (!pdev)
++ return -ENODEV;
++
++ of_node_get(dev_node);
++
++ caam_sm_startup(pdev);
++
++ return 0;
++}
++
++module_init(caam_sm_init);
++module_exit(caam_sm_exit);
++
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_DESCRIPTION("FSL CAAM Secure Memory / Keystore");
++MODULE_AUTHOR("Freescale Semiconductor - NMSG/MAD");
+--- /dev/null
++++ b/drivers/crypto/caam/sm_test.c
+@@ -0,0 +1,571 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
++/*
++ * Secure Memory / Keystore Exemplification Module
++ *
++ * Copyright 2012-2015 Freescale Semiconductor, Inc.
++ * Copyright 2016-2019 NXP
++ *
++ * This module has been overloaded as an example to show:
++ * - Secure memory subsystem initialization/shutdown
++ * - Allocation/deallocation of "slots" in a secure memory page
++ * - Loading and unloading of key material into slots
++ * - Covering of secure memory objects into "black keys" (ECB only at present)
++ * - Verification of key covering (by differentiation only)
++ * - Exportation of keys into secure memory blobs (with display of result)
++ * - Importation of keys from secure memory blobs (with display of result)
++ * - Verification of re-imported keys where possible.
++ *
++ * The module does not show the use of key objects as working key register
++ * source material at this time.
++ *
++ * This module can use a substantial amount of refactoring, which may occur
++ * after the API gets some mileage. Furthermore, expect this module to
++ * eventually disappear once the API is integrated into "real" software.
++ */
++
++#include "compat.h"
++#include "regs.h"
++#include "intern.h"
++#include "desc.h"
++#include "error.h"
++#include "jr.h"
++#include "sm.h"
++
++/* Fixed known pattern for a key modifier */
++static u8 skeymod[] = {
++ 0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08,
++ 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00
++};
++
++/* Fixed known pattern for a key */
++static u8 clrkey[] = {
++ 0x00, 0x01, 0x02, 0x03, 0x04, 0x0f, 0x06, 0x07,
++ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
++ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
++ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
++ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
++ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
++ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
++ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
++ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
++ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
++ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
++ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
++ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
++ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
++ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
++ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
++ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
++ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
++ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
++ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
++ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
++ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
++ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
++ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
++ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
++ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
++ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
++ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
++ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
++ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
++ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
++ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff
++};
++
++static void key_display(struct device *dev, const char *label, u16 size,
++ u8 *key)
++{
++ unsigned i;
++
++ dev_dbg(dev, "%s", label);
++ for (i = 0; i < size; i += 8)
++ dev_dbg(dev,
++ "[%04d] %02x %02x %02x %02x %02x %02x %02x %02x\n",
++ i, key[i], key[i + 1], key[i + 2], key[i + 3],
++ key[i + 4], key[i + 5], key[i + 6], key[i + 7]);
++}
++
++int caam_sm_example_init(struct platform_device *pdev)
++{
++ struct device *ctrldev, *ksdev;
++ struct caam_drv_private *ctrlpriv;
++ struct caam_drv_private_sm *kspriv;
++ u32 unit, units;
++ int rtnval;
++ u8 clrkey8[8], clrkey16[16], clrkey24[24], clrkey32[32];
++ u8 blkkey8[AES_BLOCK_PAD(8)], blkkey16[AES_BLOCK_PAD(16)];
++ u8 blkkey24[AES_BLOCK_PAD(24)], blkkey32[AES_BLOCK_PAD(32)];
++ u8 rstkey8[AES_BLOCK_PAD(8)], rstkey16[AES_BLOCK_PAD(16)];
++ u8 rstkey24[AES_BLOCK_PAD(24)], rstkey32[AES_BLOCK_PAD(32)];
++ u8 __iomem *blob8, *blob16, *blob24, *blob32;
++ u32 keyslot8, keyslot16, keyslot24, keyslot32 = 0;
++
++ blob8 = blob16 = blob24 = blob32 = NULL;
++
++ /*
++ * 3.5.x and later revs for MX6 should be able to ditch this
++ * and detect via dts property
++ */
++ ctrldev = &pdev->dev;
++ ctrlpriv = dev_get_drvdata(ctrldev);
++
++ /*
++ * If ctrlpriv is NULL, it's probably because the caam driver wasn't
++ * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
++ */
++ if (!ctrlpriv)
++ return -ENODEV;
++
++ ksdev = ctrlpriv->smdev;
++ kspriv = dev_get_drvdata(ksdev);
++ if (kspriv == NULL)
++ return -ENODEV;
++
++ /* What keystores are available ? */
++ units = sm_detect_keystore_units(ksdev);
++ if (!units)
++ dev_err(ksdev, "blkkey_ex: no keystore units available\n");
++
++ /*
++ * MX6 bootloader stores some stuff in unit 0, so let's
++ * use 1 or above
++ */
++ if (units < 2) {
++ dev_err(ksdev, "blkkey_ex: insufficient keystore units\n");
++ return -ENODEV;
++ }
++ unit = 1;
++
++ dev_info(ksdev, "blkkey_ex: %d keystore units available\n", units);
++
++ /* Initialize/Establish Keystore */
++ sm_establish_keystore(ksdev, unit); /* Initalize store in #1 */
++
++ /*
++ * Now let's set up buffers for blobs in DMA-able memory. All are
++ * larger than need to be so that blob size can be seen.
++ */
++ blob8 = kzalloc(128, GFP_KERNEL | GFP_DMA);
++ blob16 = kzalloc(128, GFP_KERNEL | GFP_DMA);
++ blob24 = kzalloc(128, GFP_KERNEL | GFP_DMA);
++ blob32 = kzalloc(128, GFP_KERNEL | GFP_DMA);
++
++ if ((blob8 == NULL) || (blob16 == NULL) || (blob24 == NULL) ||
++ (blob32 == NULL)) {
++ rtnval = -ENOMEM;
++ dev_err(ksdev, "blkkey_ex: can't get blob buffers\n");
++ goto freemem;
++ }
++
++ /* Initialize clear keys with a known and recognizable pattern */
++ memcpy(clrkey8, clrkey, 8);
++ memcpy(clrkey16, clrkey, 16);
++ memcpy(clrkey24, clrkey, 24);
++ memcpy(clrkey32, clrkey, 32);
++
++ memset(blkkey8, 0, AES_BLOCK_PAD(8));
++ memset(blkkey16, 0, AES_BLOCK_PAD(16));
++ memset(blkkey24, 0, AES_BLOCK_PAD(24));
++ memset(blkkey32, 0, AES_BLOCK_PAD(32));
++
++ memset(rstkey8, 0, AES_BLOCK_PAD(8));
++ memset(rstkey16, 0, AES_BLOCK_PAD(16));
++ memset(rstkey24, 0, AES_BLOCK_PAD(24));
++ memset(rstkey32, 0, AES_BLOCK_PAD(32));
++
++ /*
++ * Allocate keyslots. Since we're going to blacken keys in-place,
++ * we want slots big enough to pad out to the next larger AES blocksize
++ * so pad them out.
++ */
++ rtnval = sm_keystore_slot_alloc(ksdev, unit, AES_BLOCK_PAD(8),
++ &keyslot8);
++ if (rtnval)
++ goto freemem;
++
++ rtnval = sm_keystore_slot_alloc(ksdev, unit, AES_BLOCK_PAD(16),
++ &keyslot16);
++ if (rtnval)
++ goto dealloc_slot8;
++
++ rtnval = sm_keystore_slot_alloc(ksdev, unit, AES_BLOCK_PAD(24),
++ &keyslot24);
++ if (rtnval)
++ goto dealloc_slot16;
++
++ rtnval = sm_keystore_slot_alloc(ksdev, unit, AES_BLOCK_PAD(32),
++ &keyslot32);
++ if (rtnval)
++ goto dealloc_slot24;
++
++
++ /* Now load clear key data into the newly allocated slots */
++ rtnval = sm_keystore_slot_load(ksdev, unit, keyslot8, clrkey8, 8);
++ if (rtnval)
++ goto dealloc;
++
++ rtnval = sm_keystore_slot_load(ksdev, unit, keyslot16, clrkey16, 16);
++ if (rtnval)
++ goto dealloc;
++
++ rtnval = sm_keystore_slot_load(ksdev, unit, keyslot24, clrkey24, 24);
++ if (rtnval)
++ goto dealloc;
++
++ rtnval = sm_keystore_slot_load(ksdev, unit, keyslot32, clrkey32, 32);
++ if (rtnval)
++ goto dealloc;
++
++ /*
++ * All cleartext keys are loaded into slots (in an unprotected
++ * partition at this time)
++ *
++ * Cover keys in-place
++ */
++ rtnval = sm_keystore_cover_key(ksdev, unit, keyslot8, 8, KEY_COVER_ECB);
++ if (rtnval) {
++ dev_err(ksdev, "blkkey_ex: can't cover 64-bit key\n");
++ goto dealloc;
++ }
++
++ rtnval = sm_keystore_cover_key(ksdev, unit, keyslot16, 16,
++ KEY_COVER_ECB);
++ if (rtnval) {
++ dev_err(ksdev, "blkkey_ex: can't cover 128-bit key\n");
++ goto dealloc;
++ }
++
++ rtnval = sm_keystore_cover_key(ksdev, unit, keyslot24, 24,
++ KEY_COVER_ECB);
++ if (rtnval) {
++ dev_err(ksdev, "blkkey_ex: can't cover 192-bit key\n");
++ goto dealloc;
++ }
++
++ rtnval = sm_keystore_cover_key(ksdev, unit, keyslot32, 32,
++ KEY_COVER_ECB);
++ if (rtnval) {
++ dev_err(ksdev, "blkkey_ex: can't cover 256-bit key\n");
++ goto dealloc;
++ }
++
++ /*
++ * Keys should be covered and appear sufficiently "random"
++ * as a result of the covering (blackening) process. Assuming
++ * non-secure mode, read them back out for examination; they should
++ * appear as random data, completely differing from the clear
++ * inputs. So, this will read them back from secure memory and
++ * compare them. If they match the clear key, then the covering
++ * operation didn't occur.
++ */
++
++ rtnval = sm_keystore_slot_read(ksdev, unit, keyslot8, AES_BLOCK_PAD(8),
++ blkkey8);
++ if (rtnval) {
++ dev_err(ksdev, "blkkey_ex: can't read 64-bit black key\n");
++ goto dealloc;
++ }
++
++ rtnval = sm_keystore_slot_read(ksdev, unit, keyslot16,
++ AES_BLOCK_PAD(16), blkkey16);
++ if (rtnval) {
++ dev_err(ksdev, "blkkey_ex: can't read 128-bit black key\n");
++ goto dealloc;
++ }
++
++ rtnval = sm_keystore_slot_read(ksdev, unit, keyslot24,
++ AES_BLOCK_PAD(24), blkkey24);
++ if (rtnval) {
++ dev_err(ksdev, "blkkey_ex: can't read 192-bit black key\n");
++ goto dealloc;
++ }
++
++ rtnval = sm_keystore_slot_read(ksdev, unit, keyslot32,
++ AES_BLOCK_PAD(32), blkkey32);
++ if (rtnval) {
++ dev_err(ksdev, "blkkey_ex: can't read 256-bit black key\n");
++ goto dealloc;
++ }
++
++ rtnval = -EINVAL;
++ if (!memcmp(blkkey8, clrkey8, 8)) {
++ dev_err(ksdev, "blkkey_ex: 64-bit key cover failed\n");
++ goto dealloc;
++ }
++
++ if (!memcmp(blkkey16, clrkey16, 16)) {
++ dev_err(ksdev, "blkkey_ex: 128-bit key cover failed\n");
++ goto dealloc;
++ }
++
++ if (!memcmp(blkkey24, clrkey24, 24)) {
++ dev_err(ksdev, "blkkey_ex: 192-bit key cover failed\n");
++ goto dealloc;
++ }
++
++ if (!memcmp(blkkey32, clrkey32, 32)) {
++ dev_err(ksdev, "blkkey_ex: 256-bit key cover failed\n");
++ goto dealloc;
++ }
++
++
++ key_display(ksdev, "64-bit clear key:", 8, clrkey8);
++ key_display(ksdev, "64-bit black key:", AES_BLOCK_PAD(8), blkkey8);
++
++ key_display(ksdev, "128-bit clear key:", 16, clrkey16);
++ key_display(ksdev, "128-bit black key:", AES_BLOCK_PAD(16), blkkey16);
++
++ key_display(ksdev, "192-bit clear key:", 24, clrkey24);
++ key_display(ksdev, "192-bit black key:", AES_BLOCK_PAD(24), blkkey24);
++
++ key_display(ksdev, "256-bit clear key:", 32, clrkey32);
++ key_display(ksdev, "256-bit black key:", AES_BLOCK_PAD(32), blkkey32);
++
++ /*
++ * Now encapsulate all keys as SM blobs out to external memory
++ * Blobs will appear as random-looking blocks of data different
++ * from the original source key, and 48 bytes longer than the
++ * original key, to account for the extra data encapsulated within.
++ */
++ key_display(ksdev, "64-bit unwritten blob:", 96, blob8);
++ key_display(ksdev, "128-bit unwritten blob:", 96, blob16);
++ key_display(ksdev, "196-bit unwritten blob:", 96, blob24);
++ key_display(ksdev, "256-bit unwritten blob:", 96, blob32);
++
++ rtnval = sm_keystore_slot_export(ksdev, unit, keyslot8, BLACK_KEY,
++ KEY_COVER_ECB, blob8, 8, skeymod);
++ if (rtnval) {
++ dev_err(ksdev, "blkkey_ex: can't encapsulate 64-bit key\n");
++ goto dealloc;
++ }
++
++ rtnval = sm_keystore_slot_export(ksdev, unit, keyslot16, BLACK_KEY,
++ KEY_COVER_ECB, blob16, 16, skeymod);
++ if (rtnval) {
++ dev_err(ksdev, "blkkey_ex: can't encapsulate 128-bit key\n");
++ goto dealloc;
++ }
++
++ rtnval = sm_keystore_slot_export(ksdev, unit, keyslot24, BLACK_KEY,
++ KEY_COVER_ECB, blob24, 24, skeymod);
++ if (rtnval) {
++ dev_err(ksdev, "blkkey_ex: can't encapsulate 192-bit key\n");
++ goto dealloc;
++ }
++
++ rtnval = sm_keystore_slot_export(ksdev, unit, keyslot32, BLACK_KEY,
++ KEY_COVER_ECB, blob32, 32, skeymod);
++ if (rtnval) {
++ dev_err(ksdev, "blkkey_ex: can't encapsulate 256-bit key\n");
++ goto dealloc;
++ }
++
++ key_display(ksdev, "64-bit black key in blob:", 96, blob8);
++ key_display(ksdev, "128-bit black key in blob:", 96, blob16);
++ key_display(ksdev, "192-bit black key in blob:", 96, blob24);
++ key_display(ksdev, "256-bit black key in blob:", 96, blob32);
++
++ /*
++ * Now re-import black keys from secure-memory blobs stored
++ * in general memory from the previous operation. Since we are
++ * working with black keys, and since power has not cycled, the
++ * restored black keys should match the original blackened keys
++ * (this would not be true if the blobs were save in some non-volatile
++ * store, and power was cycled between the save and restore)
++ */
++ rtnval = sm_keystore_slot_import(ksdev, unit, keyslot8, BLACK_KEY,
++ KEY_COVER_ECB, blob8, 8, skeymod);
++ if (rtnval) {
++ dev_err(ksdev, "blkkey_ex: can't decapsulate 64-bit blob\n");
++ goto dealloc;
++ }
++
++ rtnval = sm_keystore_slot_import(ksdev, unit, keyslot16, BLACK_KEY,
++ KEY_COVER_ECB, blob16, 16, skeymod);
++ if (rtnval) {
++ dev_err(ksdev, "blkkey_ex: can't decapsulate 128-bit blob\n");
++ goto dealloc;
++ }
++
++ rtnval = sm_keystore_slot_import(ksdev, unit, keyslot24, BLACK_KEY,
++ KEY_COVER_ECB, blob24, 24, skeymod);
++ if (rtnval) {
++ dev_err(ksdev, "blkkey_ex: can't decapsulate 196-bit blob\n");
++ goto dealloc;
++ }
++
++ rtnval = sm_keystore_slot_import(ksdev, unit, keyslot32, BLACK_KEY,
++ KEY_COVER_ECB, blob32, 32, skeymod);
++ if (rtnval) {
++ dev_err(ksdev, "blkkey_ex: can't decapsulate 256-bit blob\n");
++ goto dealloc;
++ }
++
++
++ /*
++ * Blobs are now restored as black keys. Read those black keys back
++ * for a comparison with the original black key, they should match
++ */
++ rtnval = sm_keystore_slot_read(ksdev, unit, keyslot8, AES_BLOCK_PAD(8),
++ rstkey8);
++ if (rtnval) {
++ dev_err(ksdev,
++ "blkkey_ex: can't read restored 64-bit black key\n");
++ goto dealloc;
++ }
++
++ rtnval = sm_keystore_slot_read(ksdev, unit, keyslot16,
++ AES_BLOCK_PAD(16), rstkey16);
++ if (rtnval) {
++ dev_err(ksdev,
++ "blkkey_ex: can't read restored 128-bit black key\n");
++ goto dealloc;
++ }
++
++ rtnval = sm_keystore_slot_read(ksdev, unit, keyslot24,
++ AES_BLOCK_PAD(24), rstkey24);
++ if (rtnval) {
++ dev_err(ksdev,
++ "blkkey_ex: can't read restored 196-bit black key\n");
++ goto dealloc;
++ }
++
++ rtnval = sm_keystore_slot_read(ksdev, unit, keyslot32,
++ AES_BLOCK_PAD(32), rstkey32);
++ if (rtnval) {
++ dev_err(ksdev,
++ "blkkey_ex: can't read restored 256-bit black key\n");
++ goto dealloc;
++ }
++
++ key_display(ksdev, "restored 64-bit black key:", AES_BLOCK_PAD(8),
++ rstkey8);
++ key_display(ksdev, "restored 128-bit black key:", AES_BLOCK_PAD(16),
++ rstkey16);
++ key_display(ksdev, "restored 192-bit black key:", AES_BLOCK_PAD(24),
++ rstkey24);
++ key_display(ksdev, "restored 256-bit black key:", AES_BLOCK_PAD(32),
++ rstkey32);
++
++ /*
++ * Compare the restored black keys with the original blackened keys
++ * As long as we're operating within the same power cycle, a black key
++ * restored from a blob should match the original black key IF the
++ * key happens to be of a size that matches a multiple of the AES
++ * blocksize. Any key that is padded to fill the block size will not
++ * match, excepting a key that exceeds a block; only the first full
++ * blocks will match (assuming ECB).
++ *
++ * Therefore, compare the 16 and 32 bit keys, they should match.
++ * The 24 bit key can only match within the first 16 byte block.
++ */
++
++ if (memcmp(rstkey16, blkkey16, AES_BLOCK_PAD(16))) {
++ dev_err(ksdev, "blkkey_ex: 128-bit restored key mismatch\n");
++ rtnval = -EINVAL;
++ }
++
++ /* Only first AES block will match, remainder subject to padding */
++ if (memcmp(rstkey24, blkkey24, 16)) {
++ dev_err(ksdev, "blkkey_ex: 192-bit restored key mismatch\n");
++ rtnval = -EINVAL;
++ }
++
++ if (memcmp(rstkey32, blkkey32, AES_BLOCK_PAD(32))) {
++ dev_err(ksdev, "blkkey_ex: 256-bit restored key mismatch\n");
++ rtnval = -EINVAL;
++ }
++
++
++ /* Remove keys from keystore */
++dealloc:
++ sm_keystore_slot_dealloc(ksdev, unit, keyslot32);
++dealloc_slot24:
++ sm_keystore_slot_dealloc(ksdev, unit, keyslot24);
++dealloc_slot16:
++ sm_keystore_slot_dealloc(ksdev, unit, keyslot16);
++dealloc_slot8:
++ sm_keystore_slot_dealloc(ksdev, unit, keyslot8);
++
++ /* Free resources */
++freemem:
++ kfree(blob8);
++ kfree(blob16);
++ kfree(blob24);
++ kfree(blob32);
++
++ /* Disconnect from keystore and leave */
++ sm_release_keystore(ksdev, unit);
++
++ return rtnval;
++}
++EXPORT_SYMBOL(caam_sm_example_init);
++
++void caam_sm_example_shutdown(void)
++{
++ /* unused in present version */
++ struct device_node *dev_node;
++ struct platform_device *pdev;
++
++ /*
++ * Do of_find_compatible_node() then of_find_device_by_node()
++ * once a functional device tree is available
++ */
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
++ if (!dev_node) {
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
++ if (!dev_node)
++ return;
++ }
++
++ pdev = of_find_device_by_node(dev_node);
++ if (!pdev)
++ return;
++
++ of_node_get(dev_node);
++
++}
++
++static int __init caam_sm_test_init(void)
++{
++ struct device_node *dev_node;
++ struct platform_device *pdev;
++ int ret;
++
++ /*
++ * Do of_find_compatible_node() then of_find_device_by_node()
++ * once a functional device tree is available
++ */
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
++ if (!dev_node) {
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
++ if (!dev_node)
++ return -ENODEV;
++ }
++
++ pdev = of_find_device_by_node(dev_node);
++ if (!pdev)
++ return -ENODEV;
++
++ of_node_put(dev_node);
++
++ ret = caam_sm_example_init(pdev);
++ if (ret)
++ dev_err(&pdev->dev, "SM test failed: %d\n", ret);
++ else
++ dev_info(&pdev->dev, "SM test passed\n");
++
++ return ret;
++}
++
++
++/* Module-based initialization needs to wait for dev tree */
++#ifdef CONFIG_OF
++module_init(caam_sm_test_init);
++module_exit(caam_sm_example_shutdown);
++
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_DESCRIPTION("FSL CAAM Black Key Usage Example");
++MODULE_AUTHOR("Freescale Semiconductor - NMSG/MAD");
++#endif