aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/layerscape/patches-5.4/804-crypto-0003-crypto-caam-use-mapped_-src-dst-_nents-for-descripto.patch
diff options
context:
space:
mode:
authorYangbo Lu <yangbo.lu@nxp.com>2020-04-10 10:47:05 +0800
committerPetr Štetiar <ynezz@true.cz>2020-05-07 12:53:06 +0200
commitcddd4591404fb4c53dc0b3c0b15b942cdbed4356 (patch)
tree392c1179de46b0f804e3789edca19069b64e6b44 /target/linux/layerscape/patches-5.4/804-crypto-0003-crypto-caam-use-mapped_-src-dst-_nents-for-descripto.patch
parentd1d2c0b5579ea4f69a42246c9318539d61ba1999 (diff)
downloadupstream-cddd4591404fb4c53dc0b3c0b15b942cdbed4356.tar.gz
upstream-cddd4591404fb4c53dc0b3c0b15b942cdbed4356.tar.bz2
upstream-cddd4591404fb4c53dc0b3c0b15b942cdbed4356.zip
layerscape: add patches-5.4
Add patches for linux-5.4. The patches are from NXP LSDK-20.04 release which was tagged LSDK-20.04-V5.4. https://source.codeaurora.org/external/qoriq/qoriq-components/linux/ For boards LS1021A-IOT, and Traverse-LS1043 which are not involved in LSDK, port the dts patches from 4.14. The patches are sorted into the following categories: 301-arch-xxxx 302-dts-xxxx 303-core-xxxx 701-net-xxxx 801-audio-xxxx 802-can-xxxx 803-clock-xxxx 804-crypto-xxxx 805-display-xxxx 806-dma-xxxx 807-gpio-xxxx 808-i2c-xxxx 809-jailhouse-xxxx 810-keys-xxxx 811-kvm-xxxx 812-pcie-xxxx 813-pm-xxxx 814-qe-xxxx 815-sata-xxxx 816-sdhc-xxxx 817-spi-xxxx 818-thermal-xxxx 819-uart-xxxx 820-usb-xxxx 821-vfio-xxxx Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
Diffstat (limited to 'target/linux/layerscape/patches-5.4/804-crypto-0003-crypto-caam-use-mapped_-src-dst-_nents-for-descripto.patch')
-rw-r--r--target/linux/layerscape/patches-5.4/804-crypto-0003-crypto-caam-use-mapped_-src-dst-_nents-for-descripto.patch227
1 files changed, 227 insertions, 0 deletions
diff --git a/target/linux/layerscape/patches-5.4/804-crypto-0003-crypto-caam-use-mapped_-src-dst-_nents-for-descripto.patch b/target/linux/layerscape/patches-5.4/804-crypto-0003-crypto-caam-use-mapped_-src-dst-_nents-for-descripto.patch
new file mode 100644
index 0000000000..4e1efc858b
--- /dev/null
+++ b/target/linux/layerscape/patches-5.4/804-crypto-0003-crypto-caam-use-mapped_-src-dst-_nents-for-descripto.patch
@@ -0,0 +1,227 @@
+From e640f4bcfa0088ff696bc5da6063a1ea8d782189 Mon Sep 17 00:00:00 2001
+From: Iuliana Prodan <iuliana.prodan@nxp.com>
+Date: Thu, 26 Sep 2019 15:26:29 +0300
+Subject: [PATCH] crypto: caam - use mapped_{src,dst}_nents for descriptor
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The mapped_{src,dst}_nents _returned_ from the dma_map_sg
+call (which could be less than src/dst_nents) have to be
+used to generate the job descriptors.
+
+Signed-off-by: Iuliana Prodan <iuliana.prodan@nxp.com>
+Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+(cherry picked from commit eff9771d51529acf7f6f58a60b2923b98da28f0e)
+---
+ drivers/crypto/caam/caampkc.c | 72 +++++++++++++++++++++++--------------------
+ drivers/crypto/caam/caampkc.h | 8 +++--
+ 2 files changed, 45 insertions(+), 35 deletions(-)
+
+--- a/drivers/crypto/caam/caampkc.c
++++ b/drivers/crypto/caam/caampkc.c
+@@ -252,9 +252,9 @@ static struct rsa_edesc *rsa_edesc_alloc
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
+- int sgc;
+ int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
+ int src_nents, dst_nents;
++ int mapped_src_nents, mapped_dst_nents;
+ unsigned int diff_size = 0;
+ int lzeros;
+
+@@ -285,13 +285,27 @@ static struct rsa_edesc *rsa_edesc_alloc
+ req_ctx->fixup_src_len);
+ dst_nents = sg_nents_for_len(req->dst, req->dst_len);
+
+- if (!diff_size && src_nents == 1)
++ mapped_src_nents = dma_map_sg(dev, req_ctx->fixup_src, src_nents,
++ DMA_TO_DEVICE);
++ if (unlikely(!mapped_src_nents)) {
++ dev_err(dev, "unable to map source\n");
++ return ERR_PTR(-ENOMEM);
++ }
++ mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
++ DMA_FROM_DEVICE);
++ if (unlikely(!mapped_dst_nents)) {
++ dev_err(dev, "unable to map destination\n");
++ goto src_fail;
++ }
++
++ if (!diff_size && mapped_src_nents == 1)
+ sec4_sg_len = 0; /* no need for an input hw s/g table */
+ else
+- sec4_sg_len = src_nents + !!diff_size;
++ sec4_sg_len = mapped_src_nents + !!diff_size;
+ sec4_sg_index = sec4_sg_len;
+- if (dst_nents > 1)
+- sec4_sg_len += pad_sg_nents(dst_nents);
++
++ if (mapped_dst_nents > 1)
++ sec4_sg_len += pad_sg_nents(mapped_dst_nents);
+ else
+ sec4_sg_len = pad_sg_nents(sec4_sg_len);
+
+@@ -301,19 +315,7 @@ static struct rsa_edesc *rsa_edesc_alloc
+ edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
+ GFP_DMA | flags);
+ if (!edesc)
+- return ERR_PTR(-ENOMEM);
+-
+- sgc = dma_map_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
+- if (unlikely(!sgc)) {
+- dev_err(dev, "unable to map source\n");
+- goto src_fail;
+- }
+-
+- sgc = dma_map_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
+- if (unlikely(!sgc)) {
+- dev_err(dev, "unable to map destination\n");
+ goto dst_fail;
+- }
+
+ edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
+ if (diff_size)
+@@ -324,7 +326,7 @@ static struct rsa_edesc *rsa_edesc_alloc
+ sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len,
+ edesc->sec4_sg + !!diff_size, 0);
+
+- if (dst_nents > 1)
++ if (mapped_dst_nents > 1)
+ sg_to_sec4_sg_last(req->dst, req->dst_len,
+ edesc->sec4_sg + sec4_sg_index, 0);
+
+@@ -335,6 +337,9 @@ static struct rsa_edesc *rsa_edesc_alloc
+ if (!sec4_sg_bytes)
+ return edesc;
+
++ edesc->mapped_src_nents = mapped_src_nents;
++ edesc->mapped_dst_nents = mapped_dst_nents;
++
+ edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
+@@ -351,11 +356,11 @@ static struct rsa_edesc *rsa_edesc_alloc
+ return edesc;
+
+ sec4_sg_fail:
+- dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
++ kfree(edesc);
+ dst_fail:
+- dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
++ dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
+ src_fail:
+- kfree(edesc);
++ dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
+ return ERR_PTR(-ENOMEM);
+ }
+
+@@ -383,15 +388,15 @@ static int set_rsa_pub_pdb(struct akciph
+ return -ENOMEM;
+ }
+
+- if (edesc->src_nents > 1) {
++ if (edesc->mapped_src_nents > 1) {
+ pdb->sgf |= RSA_PDB_SGF_F;
+ pdb->f_dma = edesc->sec4_sg_dma;
+- sec4_sg_index += edesc->src_nents;
++ sec4_sg_index += edesc->mapped_src_nents;
+ } else {
+ pdb->f_dma = sg_dma_address(req_ctx->fixup_src);
+ }
+
+- if (edesc->dst_nents > 1) {
++ if (edesc->mapped_dst_nents > 1) {
+ pdb->sgf |= RSA_PDB_SGF_G;
+ pdb->g_dma = edesc->sec4_sg_dma +
+ sec4_sg_index * sizeof(struct sec4_sg_entry);
+@@ -428,17 +433,18 @@ static int set_rsa_priv_f1_pdb(struct ak
+ return -ENOMEM;
+ }
+
+- if (edesc->src_nents > 1) {
++ if (edesc->mapped_src_nents > 1) {
+ pdb->sgf |= RSA_PRIV_PDB_SGF_G;
+ pdb->g_dma = edesc->sec4_sg_dma;
+- sec4_sg_index += edesc->src_nents;
++ sec4_sg_index += edesc->mapped_src_nents;
++
+ } else {
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
+
+ pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
+ }
+
+- if (edesc->dst_nents > 1) {
++ if (edesc->mapped_dst_nents > 1) {
+ pdb->sgf |= RSA_PRIV_PDB_SGF_F;
+ pdb->f_dma = edesc->sec4_sg_dma +
+ sec4_sg_index * sizeof(struct sec4_sg_entry);
+@@ -493,17 +499,17 @@ static int set_rsa_priv_f2_pdb(struct ak
+ goto unmap_tmp1;
+ }
+
+- if (edesc->src_nents > 1) {
++ if (edesc->mapped_src_nents > 1) {
+ pdb->sgf |= RSA_PRIV_PDB_SGF_G;
+ pdb->g_dma = edesc->sec4_sg_dma;
+- sec4_sg_index += edesc->src_nents;
++ sec4_sg_index += edesc->mapped_src_nents;
+ } else {
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
+
+ pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
+ }
+
+- if (edesc->dst_nents > 1) {
++ if (edesc->mapped_dst_nents > 1) {
+ pdb->sgf |= RSA_PRIV_PDB_SGF_F;
+ pdb->f_dma = edesc->sec4_sg_dma +
+ sec4_sg_index * sizeof(struct sec4_sg_entry);
+@@ -582,17 +588,17 @@ static int set_rsa_priv_f3_pdb(struct ak
+ goto unmap_tmp1;
+ }
+
+- if (edesc->src_nents > 1) {
++ if (edesc->mapped_src_nents > 1) {
+ pdb->sgf |= RSA_PRIV_PDB_SGF_G;
+ pdb->g_dma = edesc->sec4_sg_dma;
+- sec4_sg_index += edesc->src_nents;
++ sec4_sg_index += edesc->mapped_src_nents;
+ } else {
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
+
+ pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
+ }
+
+- if (edesc->dst_nents > 1) {
++ if (edesc->mapped_dst_nents > 1) {
+ pdb->sgf |= RSA_PRIV_PDB_SGF_F;
+ pdb->f_dma = edesc->sec4_sg_dma +
+ sec4_sg_index * sizeof(struct sec4_sg_entry);
+--- a/drivers/crypto/caam/caampkc.h
++++ b/drivers/crypto/caam/caampkc.h
+@@ -112,8 +112,10 @@ struct caam_rsa_req_ctx {
+
+ /**
+ * rsa_edesc - s/w-extended rsa descriptor
+- * @src_nents : number of segments in input scatterlist
+- * @dst_nents : number of segments in output scatterlist
++ * @src_nents : number of segments in input s/w scatterlist
++ * @dst_nents : number of segments in output s/w scatterlist
++ * @mapped_src_nents: number of segments in input h/w link table
++ * @mapped_dst_nents: number of segments in output h/w link table
+ * @sec4_sg_bytes : length of h/w link table
+ * @sec4_sg_dma : dma address of h/w link table
+ * @sec4_sg : pointer to h/w link table
+@@ -123,6 +125,8 @@ struct caam_rsa_req_ctx {
+ struct rsa_edesc {
+ int src_nents;
+ int dst_nents;
++ int mapped_src_nents;
++ int mapped_dst_nents;
+ int sec4_sg_bytes;
+ dma_addr_t sec4_sg_dma;
+ struct sec4_sg_entry *sec4_sg;