summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/hw_random/Kconfig7
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c2
-rw-r--r--drivers/char/hw_random/cavium-rng-vf.c2
-rw-r--r--drivers/char/hw_random/cavium-rng.c2
-rw-r--r--drivers/char/hw_random/imx-rngc.c2
-rw-r--r--drivers/char/hw_random/ks-sa-rng.c257
-rw-r--r--drivers/char/hw_random/mxc-rnga.c23
-rw-r--r--drivers/char/hw_random/omap-rng.c22
-rw-r--r--drivers/char/hw_random/stm32-rng.c44
-rw-r--r--drivers/crypto/Kconfig34
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/atmel-aes.c8
-rw-r--r--drivers/crypto/atmel-sha.c9
-rw-r--r--drivers/crypto/atmel-tdes.c9
-rw-r--r--drivers/crypto/bcm/cipher.c4
-rw-r--r--drivers/crypto/bcm/util.c1
-rw-r--r--drivers/crypto/bfin_crc.c743
-rw-r--r--drivers/crypto/bfin_crc.h124
-rw-r--r--drivers/crypto/caam/caamalg.c21
-rw-r--r--drivers/crypto/caam/caamalg_desc.c165
-rw-r--r--drivers/crypto/caam/caamalg_desc.h24
-rw-r--r--drivers/crypto/caam/caamalg_qi.c388
-rw-r--r--drivers/crypto/caam/ctrl.c42
-rw-r--r--drivers/crypto/caam/qi.c11
-rw-r--r--drivers/crypto/cavium/cpt/cptpf_main.c2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-rsa.c7
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c2
-rw-r--r--drivers/crypto/ccp/ccp-debugfs.c7
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c2
-rw-r--r--drivers/crypto/ccp/ccp-ops.c108
-rw-r--r--drivers/crypto/ccp/psp-dev.c15
-rw-r--r--drivers/crypto/ccp/sp-dev.c6
-rw-r--r--drivers/crypto/ccree/Makefile7
-rw-r--r--drivers/crypto/ccree/cc_aead.c2718
-rw-r--r--drivers/crypto/ccree/cc_aead.h109
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c1651
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.h71
-rw-r--r--drivers/crypto/ccree/cc_cipher.c1150
-rw-r--r--drivers/crypto/ccree/cc_cipher.h59
-rw-r--r--drivers/crypto/ccree/cc_crypto_ctx.h133
-rw-r--r--drivers/crypto/ccree/cc_debugfs.c101
-rw-r--r--drivers/crypto/ccree/cc_debugfs.h32
-rw-r--r--drivers/crypto/ccree/cc_driver.c518
-rw-r--r--drivers/crypto/ccree/cc_driver.h208
-rw-r--r--drivers/crypto/ccree/cc_fips.c120
-rw-r--r--drivers/crypto/ccree/cc_fips.h36
-rw-r--r--drivers/crypto/ccree/cc_hash.c2296
-rw-r--r--drivers/crypto/ccree/cc_hash.h109
-rw-r--r--drivers/crypto/ccree/cc_host_regs.h145
-rw-r--r--drivers/crypto/ccree/cc_hw_queue_defs.h576
-rw-r--r--drivers/crypto/ccree/cc_ivgen.c279
-rw-r--r--drivers/crypto/ccree/cc_ivgen.h55
-rw-r--r--drivers/crypto/ccree/cc_kernel_regs.h168
-rw-r--r--drivers/crypto/ccree/cc_lli_defs.h59
-rw-r--r--drivers/crypto/ccree/cc_pm.c122
-rw-r--r--drivers/crypto/ccree/cc_pm.h56
-rw-r--r--drivers/crypto/ccree/cc_request_mgr.c711
-rw-r--r--drivers/crypto/ccree/cc_request_mgr.h51
-rw-r--r--drivers/crypto/ccree/cc_sram_mgr.c120
-rw-r--r--drivers/crypto/ccree/cc_sram_mgr.h65
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c577
-rw-r--r--drivers/crypto/chelsio/chcr_algo.h11
-rw-r--r--drivers/crypto/chelsio/chcr_core.h6
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h31
-rw-r--r--drivers/crypto/chelsio/chcr_ipsec.c5
-rw-r--r--drivers/crypto/inside-secure/safexcel.c114
-rw-r--r--drivers/crypto/inside-secure/safexcel.h22
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c5
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c258
-rw-r--r--drivers/crypto/ixp4xx_crypto.c2
-rw-r--r--drivers/crypto/marvell/cesa.c1
-rw-r--r--drivers/crypto/mxs-dcp.c14
-rw-r--r--drivers/crypto/n2_core.c12
-rw-r--r--drivers/crypto/nx/nx-842-pseries.c5
-rw-r--r--drivers/crypto/omap-aes.c112
-rw-r--r--drivers/crypto/omap-aes.h3
-rw-r--r--drivers/crypto/omap-crypto.c4
-rw-r--r--drivers/crypto/omap-des.c24
-rw-r--r--drivers/crypto/omap-sham.c106
-rw-r--r--drivers/crypto/picoxcell_crypto.c2
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c3
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c9
-rw-r--r--drivers/crypto/s5p-sss.c34
-rw-r--r--drivers/crypto/sahara.c6
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c964
-rw-r--r--drivers/crypto/stm32/stm32-hash.c41
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-core.c1
-rw-r--r--drivers/crypto/talitos.c218
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c14
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c18
-rw-r--r--drivers/crypto/virtio/Kconfig1
-rw-r--r--drivers/crypto/virtio/virtio_crypto_algs.c16
-rw-r--r--drivers/crypto/virtio/virtio_crypto_common.h4
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c3
-rw-r--r--drivers/staging/ccree/Kconfig4
-rw-r--r--drivers/staging/ccree/Makefile2
98 files changed, 14858 insertions, 1617 deletions
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 4d0f571c15f9..d53541e96bee 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -452,3 +452,10 @@ config UML_RANDOM
(check your distro, or download from
http://sourceforge.net/projects/gkernel/). rngd periodically reads
/dev/hwrng and injects the entropy into /dev/random.
+
+config HW_RANDOM_KEYSTONE
+ depends on ARCH_KEYSTONE
+ default HW_RANDOM
+ tristate "TI Keystone NETCP SA Hardware random number generator"
+ help
+ This option enables Keystone's hardware random generator.
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index b780370bd4eb..533e913c93d1 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -38,3 +38,4 @@ obj-$(CONFIG_HW_RANDOM_MESON) += meson-rng.o
obj-$(CONFIG_HW_RANDOM_CAVIUM) += cavium-rng.o cavium-rng-vf.o
obj-$(CONFIG_HW_RANDOM_MTK) += mtk-rng.o
obj-$(CONFIG_HW_RANDOM_S390) += s390-trng.o
+obj-$(CONFIG_HW_RANDOM_KEYSTONE) += ks-sa-rng.o
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index 7a84cec30c3a..6767d965c36c 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -163,6 +163,8 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
/* Clock is optional on most platforms */
priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
priv->rng.name = pdev->name;
priv->rng.init = bcm2835_rng_init;
diff --git a/drivers/char/hw_random/cavium-rng-vf.c b/drivers/char/hw_random/cavium-rng-vf.c
index dd1007aecb10..2d1352b67168 100644
--- a/drivers/char/hw_random/cavium-rng-vf.c
+++ b/drivers/char/hw_random/cavium-rng-vf.c
@@ -77,7 +77,7 @@ static int cavium_rng_probe_vf(struct pci_dev *pdev,
}
/* Remove the VF */
-void cavium_rng_remove_vf(struct pci_dev *pdev)
+static void cavium_rng_remove_vf(struct pci_dev *pdev)
{
struct cavium_rng *rng;
diff --git a/drivers/char/hw_random/cavium-rng.c b/drivers/char/hw_random/cavium-rng.c
index a944e0a47f42..63d6e68c24d2 100644
--- a/drivers/char/hw_random/cavium-rng.c
+++ b/drivers/char/hw_random/cavium-rng.c
@@ -62,7 +62,7 @@ static int cavium_rng_probe(struct pci_dev *pdev,
}
/* Disable VF and RNG Hardware */
-void cavium_rng_remove(struct pci_dev *pdev)
+static void cavium_rng_remove(struct pci_dev *pdev)
{
struct cavium_rng_pf *rng;
diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c
index eca87249bcff..250123bc4905 100644
--- a/drivers/char/hw_random/imx-rngc.c
+++ b/drivers/char/hw_random/imx-rngc.c
@@ -300,7 +300,7 @@ static int __maybe_unused imx_rngc_resume(struct device *dev)
return 0;
}
-SIMPLE_DEV_PM_OPS(imx_rngc_pm_ops, imx_rngc_suspend, imx_rngc_resume);
+static SIMPLE_DEV_PM_OPS(imx_rngc_pm_ops, imx_rngc_suspend, imx_rngc_resume);
static const struct of_device_id imx_rngc_dt_ids[] = {
{ .compatible = "fsl,imx25-rngb", .data = NULL, },
diff --git a/drivers/char/hw_random/ks-sa-rng.c b/drivers/char/hw_random/ks-sa-rng.c
new file mode 100644
index 000000000000..62c6696c1dbd
--- /dev/null
+++ b/drivers/char/hw_random/ks-sa-rng.c
@@ -0,0 +1,257 @@
+/*
+ * Random Number Generator driver for the Keystone SOC
+ *
+ * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Authors: Sandeep Nair
+ * Vitaly Andrianov
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/hw_random.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <linux/err.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+
+#define SA_CMD_STATUS_OFS 0x8
+
+/* TRNG enable control in SA System module*/
+#define SA_CMD_STATUS_REG_TRNG_ENABLE BIT(3)
+
+/* TRNG start control in TRNG module */
+#define TRNG_CNTL_REG_TRNG_ENABLE BIT(10)
+
+/* Data ready indicator in STATUS register */
+#define TRNG_STATUS_REG_READY BIT(0)
+
+/* Data ready clear control in INTACK register */
+#define TRNG_INTACK_REG_READY BIT(0)
+
+/*
+ * Number of samples taken to gather entropy during startup.
+ * If value is 0, the number of samples is 2^24 else
+ * equals value times 2^8.
+ */
+#define TRNG_DEF_STARTUP_CYCLES 0
+#define TRNG_CNTL_REG_STARTUP_CYCLES_SHIFT 16
+
+/*
+ * Minimum number of samples taken to regenerate entropy
+ * If value is 0, the number of samples is 2^24 else
+ * equals value times 2^6.
+ */
+#define TRNG_DEF_MIN_REFILL_CYCLES 1
+#define TRNG_CFG_REG_MIN_REFILL_CYCLES_SHIFT 0
+
+/*
+ * Maximum number of samples taken to regenerate entropy
+ * If value is 0, the number of samples is 2^24 else
+ * equals value times 2^8.
+ */
+#define TRNG_DEF_MAX_REFILL_CYCLES 0
+#define TRNG_CFG_REG_MAX_REFILL_CYCLES_SHIFT 16
+
+/* Number of CLK input cycles between samples */
+#define TRNG_DEF_CLK_DIV_CYCLES 0
+#define TRNG_CFG_REG_SAMPLE_DIV_SHIFT 8
+
+/* Maximum retries to get rng data */
+#define SA_MAX_RNG_DATA_RETRIES 5
+/* Delay between retries (in usecs) */
+#define SA_RNG_DATA_RETRY_DELAY 5
+
+struct trng_regs {
+ u32 output_l;
+ u32 output_h;
+ u32 status;
+ u32 intmask;
+ u32 intack;
+ u32 control;
+ u32 config;
+};
+
+struct ks_sa_rng {
+ struct device *dev;
+ struct hwrng rng;
+ struct clk *clk;
+ struct regmap *regmap_cfg;
+ struct trng_regs *reg_rng;
+};
+
+static int ks_sa_rng_init(struct hwrng *rng)
+{
+ u32 value;
+ struct device *dev = (struct device *)rng->priv;
+ struct ks_sa_rng *ks_sa_rng = dev_get_drvdata(dev);
+
+ /* Enable RNG module */
+ regmap_write_bits(ks_sa_rng->regmap_cfg, SA_CMD_STATUS_OFS,
+ SA_CMD_STATUS_REG_TRNG_ENABLE,
+ SA_CMD_STATUS_REG_TRNG_ENABLE);
+
+ /* Configure RNG module */
+ writel(0, &ks_sa_rng->reg_rng->control);
+ value = TRNG_DEF_STARTUP_CYCLES << TRNG_CNTL_REG_STARTUP_CYCLES_SHIFT;
+ writel(value, &ks_sa_rng->reg_rng->control);
+
+ value = (TRNG_DEF_MIN_REFILL_CYCLES <<
+ TRNG_CFG_REG_MIN_REFILL_CYCLES_SHIFT) |
+ (TRNG_DEF_MAX_REFILL_CYCLES <<
+ TRNG_CFG_REG_MAX_REFILL_CYCLES_SHIFT) |
+ (TRNG_DEF_CLK_DIV_CYCLES <<
+ TRNG_CFG_REG_SAMPLE_DIV_SHIFT);
+
+ writel(value, &ks_sa_rng->reg_rng->config);
+
+ /* Disable all interrupts from TRNG */
+ writel(0, &ks_sa_rng->reg_rng->intmask);
+
+ /* Enable RNG */
+ value = readl(&ks_sa_rng->reg_rng->control);
+ value |= TRNG_CNTL_REG_TRNG_ENABLE;
+ writel(value, &ks_sa_rng->reg_rng->control);
+
+ return 0;
+}
+
+static void ks_sa_rng_cleanup(struct hwrng *rng)
+{
+ struct device *dev = (struct device *)rng->priv;
+ struct ks_sa_rng *ks_sa_rng = dev_get_drvdata(dev);
+
+ /* Disable RNG */
+ writel(0, &ks_sa_rng->reg_rng->control);
+ regmap_write_bits(ks_sa_rng->regmap_cfg, SA_CMD_STATUS_OFS,
+ SA_CMD_STATUS_REG_TRNG_ENABLE, 0);
+}
+
+static int ks_sa_rng_data_read(struct hwrng *rng, u32 *data)
+{
+ struct device *dev = (struct device *)rng->priv;
+ struct ks_sa_rng *ks_sa_rng = dev_get_drvdata(dev);
+
+ /* Read random data */
+ data[0] = readl(&ks_sa_rng->reg_rng->output_l);
+ data[1] = readl(&ks_sa_rng->reg_rng->output_h);
+
+ writel(TRNG_INTACK_REG_READY, &ks_sa_rng->reg_rng->intack);
+
+ return sizeof(u32) * 2;
+}
+
+static int ks_sa_rng_data_present(struct hwrng *rng, int wait)
+{
+ struct device *dev = (struct device *)rng->priv;
+ struct ks_sa_rng *ks_sa_rng = dev_get_drvdata(dev);
+
+ u32 ready;
+ int j;
+
+ for (j = 0; j < SA_MAX_RNG_DATA_RETRIES; j++) {
+ ready = readl(&ks_sa_rng->reg_rng->status);
+ ready &= TRNG_STATUS_REG_READY;
+
+ if (ready || !wait)
+ break;
+
+ udelay(SA_RNG_DATA_RETRY_DELAY);
+ }
+
+ return ready;
+}
+
+static int ks_sa_rng_probe(struct platform_device *pdev)
+{
+ struct ks_sa_rng *ks_sa_rng;
+ struct device *dev = &pdev->dev;
+ int ret;
+ struct resource *mem;
+
+ ks_sa_rng = devm_kzalloc(dev, sizeof(*ks_sa_rng), GFP_KERNEL);
+ if (!ks_sa_rng)
+ return -ENOMEM;
+
+ ks_sa_rng->dev = dev;
+ ks_sa_rng->rng = (struct hwrng) {
+ .name = "ks_sa_hwrng",
+ .init = ks_sa_rng_init,
+ .data_read = ks_sa_rng_data_read,
+ .data_present = ks_sa_rng_data_present,
+ .cleanup = ks_sa_rng_cleanup,
+ };
+ ks_sa_rng->rng.priv = (unsigned long)dev;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ks_sa_rng->reg_rng = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(ks_sa_rng->reg_rng))
+ return PTR_ERR(ks_sa_rng->reg_rng);
+
+ ks_sa_rng->regmap_cfg =
+ syscon_regmap_lookup_by_phandle(dev->of_node,
+ "ti,syscon-sa-cfg");
+
+ if (IS_ERR(ks_sa_rng->regmap_cfg)) {
+ dev_err(dev, "syscon_node_to_regmap failed\n");
+ return -EINVAL;
+ }
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable SA power-domain\n");
+ pm_runtime_disable(dev);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, ks_sa_rng);
+
+ return devm_hwrng_register(&pdev->dev, &ks_sa_rng->rng);
+}
+
+static int ks_sa_rng_remove(struct platform_device *pdev)
+{
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id ks_sa_rng_dt_match[] = {
+ {
+ .compatible = "ti,keystone-rng",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ks_sa_rng_dt_match);
+
+static struct platform_driver ks_sa_rng_driver = {
+ .driver = {
+ .name = "ks-sa-rng",
+ .of_match_table = ks_sa_rng_dt_match,
+ },
+ .probe = ks_sa_rng_probe,
+ .remove = ks_sa_rng_remove,
+};
+
+module_platform_driver(ks_sa_rng_driver);
+
+MODULE_DESCRIPTION("Keystone NETCP SA H/W Random Number Generator driver");
+MODULE_AUTHOR("Vitaly Andrianov <vitalya@ti.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/mxc-rnga.c b/drivers/char/hw_random/mxc-rnga.c
index 467362262651..f83bee513d91 100644
--- a/drivers/char/hw_random/mxc-rnga.c
+++ b/drivers/char/hw_random/mxc-rnga.c
@@ -16,16 +16,13 @@
* This driver is based on other RNG drivers.
*/
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/ioport.h>
-#include <linux/platform_device.h>
-#include <linux/hw_random.h>
#include <linux/delay.h>
+#include <linux/hw_random.h>
#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
/* RNGA Registers */
#define RNGA_CONTROL 0x00
@@ -197,10 +194,18 @@ static int __exit mxc_rnga_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id mxc_rnga_of_match[] = {
+ { .compatible = "fsl,imx21-rnga", },
+ { .compatible = "fsl,imx31-rnga", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, mxc_rnga_of_match);
+
static struct platform_driver mxc_rnga_driver = {
.driver = {
- .name = "mxc_rnga",
- },
+ .name = "mxc_rnga",
+ .of_match_table = mxc_rnga_of_match,
+ },
.remove = __exit_p(mxc_rnga_remove),
};
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 74d11ae6abe9..b65ff6962899 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -150,6 +150,7 @@ struct omap_rng_dev {
const struct omap_rng_pdata *pdata;
struct hwrng rng;
struct clk *clk;
+ struct clk *clk_reg;
};
static inline u32 omap_rng_read(struct omap_rng_dev *priv, u16 reg)
@@ -480,6 +481,19 @@ static int omap_rng_probe(struct platform_device *pdev)
}
}
+ priv->clk_reg = devm_clk_get(&pdev->dev, "reg");
+ if (IS_ERR(priv->clk_reg) && PTR_ERR(priv->clk_reg) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (!IS_ERR(priv->clk_reg)) {
+ ret = clk_prepare_enable(priv->clk_reg);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Unable to enable the register clk: %d\n",
+ ret);
+ goto err_register;
+ }
+ }
+
ret = (dev->of_node) ? of_get_omap_rng_device_details(priv, pdev) :
get_omap_rng_device_details(priv);
if (ret)
@@ -499,8 +513,8 @@ err_register:
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- if (!IS_ERR(priv->clk))
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk_reg);
+ clk_disable_unprepare(priv->clk);
err_ioremap:
dev_err(dev, "initialization failed.\n");
return ret;
@@ -517,8 +531,8 @@ static int omap_rng_remove(struct platform_device *pdev)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- if (!IS_ERR(priv->clk))
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk_reg);
return 0;
}
diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c
index 63d84e6f1891..0d2328da3b76 100644
--- a/drivers/char/hw_random/stm32-rng.c
+++ b/drivers/char/hw_random/stm32-rng.c
@@ -16,15 +16,18 @@
#include <linux/delay.h>
#include <linux/hw_random.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#define RNG_CR 0x00
#define RNG_CR_RNGEN BIT(2)
+#define RNG_CR_CED BIT(5)
#define RNG_SR 0x04
#define RNG_SR_SEIS BIT(6)
@@ -33,19 +36,12 @@
#define RNG_DR 0x08
-/*
- * It takes 40 cycles @ 48MHz to generate each random number (e.g. <1us).
- * At the time of writing STM32 parts max out at ~200MHz meaning a timeout
- * of 500 leaves us a very comfortable margin for error. The loop to which
- * the timeout applies takes at least 4 instructions per iteration so the
- * timeout is enough to take us up to multi-GHz parts!
- */
-#define RNG_TIMEOUT 500
-
struct stm32_rng_private {
struct hwrng rng;
void __iomem *base;
struct clk *clk;
+ struct reset_control *rst;
+ bool ced;
};
static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
@@ -59,13 +55,16 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
while (max > sizeof(u32)) {
sr = readl_relaxed(priv->base + RNG_SR);
+ /* Manage timeout which is based on timer and take */
+ /* care of initial delay time when enabling rng */
if (!sr && wait) {
- unsigned int timeout = RNG_TIMEOUT;
-
- do {
- cpu_relax();
- sr = readl_relaxed(priv->base + RNG_SR);
- } while (!sr && --timeout);
+ retval = readl_relaxed_poll_timeout_atomic(priv->base
+ + RNG_SR,
+ sr, sr,
+ 10, 50000);
+ if (retval)
+ dev_err((struct device *)priv->rng.priv,
+ "%s: timeout %x!\n", __func__, sr);
}
/* If error detected or data not ready... */
@@ -99,7 +98,11 @@ static int stm32_rng_init(struct hwrng *rng)
if (err)
return err;
- writel_relaxed(RNG_CR_RNGEN, priv->base + RNG_CR);
+ if (priv->ced)
+ writel_relaxed(RNG_CR_RNGEN, priv->base + RNG_CR);
+ else
+ writel_relaxed(RNG_CR_RNGEN | RNG_CR_CED,
+ priv->base + RNG_CR);
/* clear error indicators */
writel_relaxed(0, priv->base + RNG_SR);
@@ -140,6 +143,15 @@ static int stm32_rng_probe(struct platform_device *ofdev)
if (IS_ERR(priv->clk))
return PTR_ERR(priv->clk);
+ priv->rst = devm_reset_control_get(&ofdev->dev, NULL);
+ if (!IS_ERR(priv->rst)) {
+ reset_control_assert(priv->rst);
+ udelay(2);
+ reset_control_deassert(priv->rst);
+ }
+
+ priv->ced = of_property_read_bool(np, "clock-error-detect");
+
dev_set_drvdata(dev, priv);
priv->rng.name = dev_driver_string(dev),
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 4b741b83e23f..d1ea1a07cecb 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -464,13 +464,6 @@ if CRYPTO_DEV_UX500
source "drivers/crypto/ux500/Kconfig"
endif # if CRYPTO_DEV_UX500
-config CRYPTO_DEV_BFIN_CRC
- tristate "Support for Blackfin CRC hardware"
- depends on BF60x
- help
- Newer Blackfin processors have CRC hardware. Select this if you
- want to use the Blackfin CRC module.
-
config CRYPTO_DEV_ATMEL_AUTHENC
tristate "Support for Atmel IPSEC/SSL hw accelerator"
depends on HAS_DMA
@@ -730,4 +723,31 @@ config CRYPTO_DEV_ARTPEC6
To compile this driver as a module, choose M here.
+config CRYPTO_DEV_CCREE
+ tristate "Support for ARM TrustZone CryptoCell family of security processors"
+ depends on CRYPTO && CRYPTO_HW && OF && HAS_DMA
+ default n
+ select CRYPTO_HASH
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_DES
+ select CRYPTO_AEAD
+ select CRYPTO_AUTHENC
+ select CRYPTO_SHA1
+ select CRYPTO_MD5
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
+ select CRYPTO_HMAC
+ select CRYPTO_AES
+ select CRYPTO_CBC
+ select CRYPTO_ECB
+ select CRYPTO_CTR
+ select CRYPTO_XTS
+ help
+ Say 'Y' to enable a driver for the REE interface of the Arm
+ TrustZone CryptoCell family of processors. Currently the
+ CryptoCell 712, 710 and 630 are supported.
+ Choose this if you wish to use hardware acceleration of
+ cryptographic operations on the system REE.
+ If unsure say Y.
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 2513d13ea2c4..7ae87b4f6c8d 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -3,9 +3,9 @@ obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_ECC) += atmel-ecc.o
-obj-$(CONFIG_CRYPTO_DEV_BFIN_CRC) += bfin_crc.o
obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += cavium/
obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/
+obj-$(CONFIG_CRYPTO_DEV_CCREE) += ccree/
obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/
obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/
obj-$(CONFIG_CRYPTO_DEV_NITROX) += cavium/nitrox/
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 691c6465b71e..801aeab5ab1e 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -2155,7 +2155,7 @@ static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key,
badkey:
crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- memzero_explicit(&key, sizeof(keys));
+ memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
@@ -2602,16 +2602,13 @@ static struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pd
}
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- dev_err(&pdev->dev, "could not allocate memory for pdata\n");
+ if (!pdata)
return ERR_PTR(-ENOMEM);
- }
pdata->dma_slave = devm_kzalloc(&pdev->dev,
sizeof(*(pdata->dma_slave)),
GFP_KERNEL);
if (!pdata->dma_slave) {
- dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
devm_kfree(&pdev->dev, pdata);
return ERR_PTR(-ENOMEM);
}
@@ -2649,7 +2646,6 @@ static int atmel_aes_probe(struct platform_device *pdev)
aes_dd = devm_kzalloc(&pdev->dev, sizeof(*aes_dd), GFP_KERNEL);
if (aes_dd == NULL) {
- dev_err(dev, "unable to alloc data struct.\n");
err = -ENOMEM;
goto aes_dd_err;
}
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 8874aa5ca0f7..4d43081120db 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -2726,18 +2726,14 @@ static struct crypto_platform_data *atmel_sha_of_init(struct platform_device *pd
}
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- dev_err(&pdev->dev, "could not allocate memory for pdata\n");
+ if (!pdata)
return ERR_PTR(-ENOMEM);
- }
pdata->dma_slave = devm_kzalloc(&pdev->dev,
sizeof(*(pdata->dma_slave)),
GFP_KERNEL);
- if (!pdata->dma_slave) {
- dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
+ if (!pdata->dma_slave)
return ERR_PTR(-ENOMEM);
- }
return pdata;
}
@@ -2758,7 +2754,6 @@ static int atmel_sha_probe(struct platform_device *pdev)
sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL);
if (sha_dd == NULL) {
- dev_err(dev, "unable to alloc data struct.\n");
err = -ENOMEM;
goto sha_dd_err;
}
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 592124f8382b..97b0423efa7f 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -1312,18 +1312,14 @@ static struct crypto_platform_data *atmel_tdes_of_init(struct platform_device *p
}
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- dev_err(&pdev->dev, "could not allocate memory for pdata\n");
+ if (!pdata)
return ERR_PTR(-ENOMEM);
- }
pdata->dma_slave = devm_kzalloc(&pdev->dev,
sizeof(*(pdata->dma_slave)),
GFP_KERNEL);
- if (!pdata->dma_slave) {
- dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
+ if (!pdata->dma_slave)
return ERR_PTR(-ENOMEM);
- }
return pdata;
}
@@ -1344,7 +1340,6 @@ static int atmel_tdes_probe(struct platform_device *pdev)
tdes_dd = devm_kmalloc(&pdev->dev, sizeof(*tdes_dd), GFP_KERNEL);
if (tdes_dd == NULL) {
- dev_err(dev, "unable to alloc data struct.\n");
err = -ENOMEM;
goto tdes_dd_err;
}
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 2b75f95bbe1b..309c67c7012f 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -818,7 +818,7 @@ static int handle_ahash_req(struct iproc_reqctx_s *rctx)
/* AES hashing keeps key size in type field, so need to copy it here */
if (hash_parms.alg == HASH_ALG_AES)
- hash_parms.type = cipher_parms.type;
+ hash_parms.type = (enum hash_type)cipher_parms.type;
else
hash_parms.type = spu->spu_hash_type(rctx->total_sent);
@@ -1409,7 +1409,7 @@ static int handle_aead_req(struct iproc_reqctx_s *rctx)
rctx->iv_ctr_len);
if (ctx->auth.alg == HASH_ALG_AES)
- hash_parms.type = ctx->cipher_type;
+ hash_parms.type = (enum hash_type)ctx->cipher_type;
/* General case AAD padding (CCM and RFC4543 special cases below) */
aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
diff --git a/drivers/crypto/bcm/util.c b/drivers/crypto/bcm/util.c
index d543c010ccd9..a912c6ad3e85 100644
--- a/drivers/crypto/bcm/util.c
+++ b/drivers/crypto/bcm/util.c
@@ -279,7 +279,6 @@ int do_shash(unsigned char *name, unsigned char *result,
sdesc = kmalloc(size, GFP_KERNEL);
if (!sdesc) {
rc = -ENOMEM;
- pr_err("%s: Memory allocation failure\n", __func__);
goto do_shash_err;
}
sdesc->shash.tfm = hash;
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c
deleted file mode 100644
index bfbf8bf77f03..000000000000
--- a/drivers/crypto/bfin_crc.c
+++ /dev/null
@@ -1,743 +0,0 @@
-/*
- * Cryptographic API.
- *
- * Support Blackfin CRC HW acceleration.
- *
- * Copyright 2012 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- */
-
-#include <linux/err.h>
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/crypto.h>
-#include <linux/cryptohash.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/algapi.h>
-#include <crypto/hash.h>
-#include <crypto/internal/hash.h>
-#include <asm/unaligned.h>
-
-#include <asm/dma.h>
-#include <asm/portmux.h>
-#include <asm/io.h>
-
-#include "bfin_crc.h"
-
-#define CRC_CCRYPTO_QUEUE_LENGTH 5
-
-#define DRIVER_NAME "bfin-hmac-crc"
-#define CHKSUM_DIGEST_SIZE 4
-#define CHKSUM_BLOCK_SIZE 1
-
-#define CRC_MAX_DMA_DESC 100
-
-#define CRC_CRYPTO_STATE_UPDATE 1
-#define CRC_CRYPTO_STATE_FINALUPDATE 2
-#define CRC_CRYPTO_STATE_FINISH 3
-
-struct bfin_crypto_crc {
- struct list_head list;
- struct device *dev;
- spinlock_t lock;
-
- int irq;
- int dma_ch;
- u32 poly;
- struct crc_register *regs;
-
- struct ahash_request *req; /* current request in operation */
- struct dma_desc_array *sg_cpu; /* virt addr of sg dma descriptors */
- dma_addr_t sg_dma; /* phy addr of sg dma descriptors */
- u8 *sg_mid_buf;
- dma_addr_t sg_mid_dma; /* phy addr of sg mid buffer */
-
- struct tasklet_struct done_task;
- struct crypto_queue queue; /* waiting requests */
-
- u8 busy:1; /* crc device in operation flag */
-};
-
-static struct bfin_crypto_crc_list {
- struct list_head dev_list;
- spinlock_t lock;
-} crc_list;
-
-struct bfin_crypto_crc_reqctx {
- struct bfin_crypto_crc *crc;
-
- unsigned int total; /* total request bytes */
- size_t sg_buflen; /* bytes for this update */
- unsigned int sg_nents;
- struct scatterlist *sg; /* sg list head for this update*/
- struct scatterlist bufsl[2]; /* chained sg list */
-
- size_t bufnext_len;
- size_t buflast_len;
- u8 bufnext[CHKSUM_DIGEST_SIZE]; /* extra bytes for next udpate */
- u8 buflast[CHKSUM_DIGEST_SIZE]; /* extra bytes from last udpate */
-
- u8 flag;
-};
-
-struct bfin_crypto_crc_ctx {
- struct bfin_crypto_crc *crc;
- u32 key;
-};
-
-/*
- * get element in scatter list by given index
- */
-static struct scatterlist *sg_get(struct scatterlist *sg_list, unsigned int nents,
- unsigned int index)
-{
- struct scatterlist *sg = NULL;
- int i;
-
- for_each_sg(sg_list, sg, nents, i)
- if (i == index)
- break;
-
- return sg;
-}
-
-static int bfin_crypto_crc_init_hw(struct bfin_crypto_crc *crc, u32 key)
-{
- writel(0, &crc->regs->datacntrld);
- writel(MODE_CALC_CRC << OPMODE_OFFSET, &crc->regs->control);
- writel(key, &crc->regs->curresult);
-
- /* setup CRC interrupts */
- writel(CMPERRI | DCNTEXPI, &crc->regs->status);
- writel(CMPERRI | DCNTEXPI, &crc->regs->intrenset);
-
- return 0;
-}
-
-static int bfin_crypto_crc_init(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
- struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
- struct bfin_crypto_crc *crc;
-
- dev_dbg(ctx->crc->dev, "crc_init\n");
- spin_lock_bh(&crc_list.lock);
- list_for_each_entry(crc, &crc_list.dev_list, list) {
- crc_ctx->crc = crc;
- break;
- }
- spin_unlock_bh(&crc_list.lock);
-
- if (sg_nents(req->src) > CRC_MAX_DMA_DESC) {
- dev_dbg(ctx->crc->dev, "init: requested sg list is too big > %d\n",
- CRC_MAX_DMA_DESC);
- return -EINVAL;
- }
-
- ctx->crc = crc;
- ctx->bufnext_len = 0;
- ctx->buflast_len = 0;
- ctx->sg_buflen = 0;
- ctx->total = 0;
- ctx->flag = 0;
-
- /* init crc results */
- put_unaligned_le32(crc_ctx->key, req->result);
-
- dev_dbg(ctx->crc->dev, "init: digest size: %d\n",
- crypto_ahash_digestsize(tfm));
-
- return bfin_crypto_crc_init_hw(crc, crc_ctx->key);
-}
-
-static void bfin_crypto_crc_config_dma(struct bfin_crypto_crc *crc)
-{
- struct scatterlist *sg;
- struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(crc->req);
- int i = 0, j = 0;
- unsigned long dma_config;
- unsigned int dma_count;
- unsigned int dma_addr;
- unsigned int mid_dma_count = 0;
- int dma_mod;
-
- dma_map_sg(crc->dev, ctx->sg, ctx->sg_nents, DMA_TO_DEVICE);
-
- for_each_sg(ctx->sg, sg, ctx->sg_nents, j) {
- dma_addr = sg_dma_address(sg);
- /* deduce extra bytes in last sg */
- if (sg_is_last(sg))
- dma_count = sg_dma_len(sg) - ctx->bufnext_len;
- else
- dma_count = sg_dma_len(sg);
-
- if (mid_dma_count) {
- /* Append last middle dma buffer to 4 bytes with first
- bytes in current sg buffer. Move addr of current
- sg and deduce the length of current sg.
- */
- memcpy(crc->sg_mid_buf +(i << 2) + mid_dma_count,
- sg_virt(sg),
- CHKSUM_DIGEST_SIZE - mid_dma_count);
- dma_addr += CHKSUM_DIGEST_SIZE - mid_dma_count;
- dma_count -= CHKSUM_DIGEST_SIZE - mid_dma_count;
-
- dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 |
- DMAEN | PSIZE_32 | WDSIZE_32;
-
- /* setup new dma descriptor for next middle dma */
- crc->sg_cpu[i].start_addr = crc->sg_mid_dma + (i << 2);
- crc->sg_cpu[i].cfg = dma_config;
- crc->sg_cpu[i].x_count = 1;
- crc->sg_cpu[i].x_modify = CHKSUM_DIGEST_SIZE;
- dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
- "cfg:0x%x, x_count:0x%x, x_modify:0x%x\n",
- i, crc->sg_cpu[i].start_addr,
- crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
- crc->sg_cpu[i].x_modify);
- i++;
- }
-
- dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | DMAEN | PSIZE_32;
- /* chop current sg dma len to multiple of 32 bits */
- mid_dma_count = dma_count % 4;
- dma_count &= ~0x3;
-
- if (dma_addr % 4 == 0) {
- dma_config |= WDSIZE_32;
- dma_count >>= 2;
- dma_mod = 4;
- } else if (dma_addr % 2 == 0) {
- dma_config |= WDSIZE_16;
- dma_count >>= 1;
- dma_mod = 2;
- } else {
- dma_config |= WDSIZE_8;
- dma_mod = 1;
- }
-
- crc->sg_cpu[i].start_addr = dma_addr;
- crc->sg_cpu[i].cfg = dma_config;
- crc->sg_cpu[i].x_count = dma_count;
- crc->sg_cpu[i].x_modify = dma_mod;
- dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
- "cfg:0x%x, x_count:0x%x, x_modify:0x%x\n",
- i, crc->sg_cpu[i].start_addr,
- crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
- crc->sg_cpu[i].x_modify);
- i++;
-
- if (mid_dma_count) {
- /* copy extra bytes to next middle dma buffer */
- memcpy(crc->sg_mid_buf + (i << 2),
- (u8*)sg_virt(sg) + (dma_count << 2),
- mid_dma_count);
- }
- }
-
- dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | DMAEN | PSIZE_32 | WDSIZE_32;
- /* For final update req, append the buffer for next update as well*/
- if (ctx->bufnext_len && (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE ||
- ctx->flag == CRC_CRYPTO_STATE_FINISH)) {
- crc->sg_cpu[i].start_addr = dma_map_single(crc->dev, ctx->bufnext,
- CHKSUM_DIGEST_SIZE, DMA_TO_DEVICE);
- crc->sg_cpu[i].cfg = dma_config;
- crc->sg_cpu[i].x_count = 1;
- crc->sg_cpu[i].x_modify = CHKSUM_DIGEST_SIZE;
- dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
- "cfg:0x%x, x_count:0x%x, x_modify:0x%x\n",
- i, crc->sg_cpu[i].start_addr,
- crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
- crc->sg_cpu[i].x_modify);
- i++;
- }
-
- if (i == 0)
- return;
-
- /* Set the last descriptor to stop mode */
- crc->sg_cpu[i - 1].cfg &= ~(DMAFLOW | NDSIZE);
- crc->sg_cpu[i - 1].cfg |= DI_EN;
- set_dma_curr_desc_addr(crc->dma_ch, (unsigned long *)crc->sg_dma);
- set_dma_x_count(crc->dma_ch, 0);
- set_dma_x_modify(crc->dma_ch, 0);
- set_dma_config(crc->dma_ch, dma_config);
-}
-
-static int bfin_crypto_crc_handle_queue(struct bfin_crypto_crc *crc,
- struct ahash_request *req)
-{
- struct crypto_async_request *async_req, *backlog;
- struct bfin_crypto_crc_reqctx *ctx;
- struct scatterlist *sg;
- int ret = 0;
- int nsg, i, j;
- unsigned int nextlen;
- unsigned long flags;
- u32 reg;
-
- spin_lock_irqsave(&crc->lock, flags);
- if (req)
- ret = ahash_enqueue_request(&crc->queue, req);
- if (crc->busy) {
- spin_unlock_irqrestore(&crc->lock, flags);
- return ret;
- }
- backlog = crypto_get_backlog(&crc->queue);
- async_req = crypto_dequeue_request(&crc->queue);
- if (async_req)
- crc->busy = 1;
- spin_unlock_irqrestore(&crc->lock, flags);
-
- if (!async_req)
- return ret;
-
- if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
-
- req = ahash_request_cast(async_req);
- crc->req = req;
- ctx = ahash_request_ctx(req);
- ctx->sg = NULL;
- ctx->sg_buflen = 0;
- ctx->sg_nents = 0;
-
- dev_dbg(crc->dev, "handling new req, flag=%u, nbytes: %d\n",
- ctx->flag, req->nbytes);
-
- if (ctx->flag == CRC_CRYPTO_STATE_FINISH) {
- if (ctx->bufnext_len == 0) {
- crc->busy = 0;
- return 0;
- }
-
- /* Pack last crc update buffer to 32bit */
- memset(ctx->bufnext + ctx->bufnext_len, 0,
- CHKSUM_DIGEST_SIZE - ctx->bufnext_len);
- } else {
- /* Pack small data which is less than 32bit to buffer for next update. */
- if (ctx->bufnext_len + req->nbytes < CHKSUM_DIGEST_SIZE) {
- memcpy(ctx->bufnext + ctx->bufnext_len,
- sg_virt(req->src), req->nbytes);
- ctx->bufnext_len += req->nbytes;
- if (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE &&
- ctx->bufnext_len) {
- goto finish_update;
- } else {
- crc->busy = 0;
- return 0;
- }
- }
-
- if (ctx->bufnext_len) {
- /* Chain in extra bytes of last update */
- ctx->buflast_len = ctx->bufnext_len;
- memcpy(ctx->buflast, ctx->bufnext, ctx->buflast_len);
-
- nsg = ctx->sg_buflen ? 2 : 1;
- sg_init_table(ctx->bufsl, nsg);
- sg_set_buf(ctx->bufsl, ctx->buflast, ctx->buflast_len);
- if (nsg > 1)
- sg_chain(ctx->bufsl, nsg, req->src);
- ctx->sg = ctx->bufsl;
- } else
- ctx->sg = req->src;
-
- /* Chop crc buffer size to multiple of 32 bit */
- nsg = sg_nents(ctx->sg);
- ctx->sg_nents = nsg;
- ctx->sg_buflen = ctx->buflast_len + req->nbytes;
- ctx->bufnext_len = ctx->sg_buflen % 4;
- ctx->sg_buflen &= ~0x3;
-
- if (ctx->bufnext_len) {
- /* copy extra bytes to buffer for next update */
- memset(ctx->bufnext, 0, CHKSUM_DIGEST_SIZE);
- nextlen = ctx->bufnext_len;
- for (i = nsg - 1; i >= 0; i--) {
- sg = sg_get(ctx->sg, nsg, i);
- j = min(nextlen, sg_dma_len(sg));
- memcpy(ctx->bufnext + nextlen - j,
- sg_virt(sg) + sg_dma_len(sg) - j, j);
- if (j == sg_dma_len(sg))
- ctx->sg_nents--;
- nextlen -= j;
- if (nextlen == 0)
- break;
- }
- }
- }
-
-finish_update:
- if (ctx->bufnext_len && (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE ||
- ctx->flag == CRC_CRYPTO_STATE_FINISH))
- ctx->sg_buflen += CHKSUM_DIGEST_SIZE;
-
- /* set CRC data count before start DMA */
- writel(ctx->sg_buflen >> 2, &crc->regs->datacnt);
-
- /* setup and enable CRC DMA */
- bfin_crypto_crc_config_dma(crc);
-
- /* finally kick off CRC operation */
- reg = readl(&crc->regs->control);
- writel(reg | BLKEN, &crc->regs->control);
-
- return -EINPROGRESS;
-}
-
-static int bfin_crypto_crc_update(struct ahash_request *req)
-{
- struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
-
- if (!req->nbytes)
- return 0;
-
- dev_dbg(ctx->crc->dev, "crc_update\n");
- ctx->total += req->nbytes;
- ctx->flag = CRC_CRYPTO_STATE_UPDATE;
-
- return bfin_crypto_crc_handle_queue(ctx->crc, req);
-}
-
-static int bfin_crypto_crc_final(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
- struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
-
- dev_dbg(ctx->crc->dev, "crc_final\n");
- ctx->flag = CRC_CRYPTO_STATE_FINISH;
- crc_ctx->key = 0;
-
- return bfin_crypto_crc_handle_queue(ctx->crc, req);
-}
-
-static int bfin_crypto_crc_finup(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
- struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
-
- dev_dbg(ctx->crc->dev, "crc_finishupdate\n");
- ctx->total += req->nbytes;
- ctx->flag = CRC_CRYPTO_STATE_FINALUPDATE;
- crc_ctx->key = 0;
-
- return bfin_crypto_crc_handle_queue(ctx->crc, req);
-}
-
-static int bfin_crypto_crc_digest(struct ahash_request *req)
-{
- int ret;
-
- ret = bfin_crypto_crc_init(req);
- if (ret)
- return ret;
-
- return bfin_crypto_crc_finup(req);
-}
-
-static int bfin_crypto_crc_setkey(struct crypto_ahash *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
-
- dev_dbg(crc_ctx->crc->dev, "crc_setkey\n");
- if (keylen != CHKSUM_DIGEST_SIZE) {
- crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
-
- crc_ctx->key = get_unaligned_le32(key);
-
- return 0;
-}
-
-static int bfin_crypto_crc_cra_init(struct crypto_tfm *tfm)
-{
- struct bfin_crypto_crc_ctx *crc_ctx = crypto_tfm_ctx(tfm);
-
- crc_ctx->key = 0;
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct bfin_crypto_crc_reqctx));
-
- return 0;
-}
-
-static void bfin_crypto_crc_cra_exit(struct crypto_tfm *tfm)
-{
-}
-
-static struct ahash_alg algs = {
- .init = bfin_crypto_crc_init,
- .update = bfin_crypto_crc_update,
- .final = bfin_crypto_crc_final,
- .finup = bfin_crypto_crc_finup,
- .digest = bfin_crypto_crc_digest,
- .setkey = bfin_crypto_crc_setkey,
- .halg.digestsize = CHKSUM_DIGEST_SIZE,
- .halg.base = {
- .cra_name = "hmac(crc32)",
- .cra_driver_name = DRIVER_NAME,
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_OPTIONAL_KEY,
- .cra_blocksize = CHKSUM_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct bfin_crypto_crc_ctx),
- .cra_alignmask = 3,
- .cra_module = THIS_MODULE,
- .cra_init = bfin_crypto_crc_cra_init,
- .cra_exit = bfin_crypto_crc_cra_exit,
- }
-};
-
-static void bfin_crypto_crc_done_task(unsigned long data)
-{
- struct bfin_crypto_crc *crc = (struct bfin_crypto_crc *)data;
-
- bfin_crypto_crc_handle_queue(crc, NULL);
-}
-
-static irqreturn_t bfin_crypto_crc_handler(int irq, void *dev_id)
-{
- struct bfin_crypto_crc *crc = dev_id;
- u32 reg;
-
- if (readl(&crc->regs->status) & DCNTEXP) {
- writel(DCNTEXP, &crc->regs->status);
-
- /* prepare results */
- put_unaligned_le32(readl(&crc->regs->result),
- crc->req->result);
-
- reg = readl(&crc->regs->control);
- writel(reg & ~BLKEN, &crc->regs->control);
- crc->busy = 0;
-
- if (crc->req->base.complete)
- crc->req->base.complete(&crc->req->base, 0);
-
- tasklet_schedule(&crc->done_task);
-
- return IRQ_HANDLED;
- } else
- return IRQ_NONE;
-}
-
-#ifdef CONFIG_PM
-/**
- * bfin_crypto_crc_suspend - suspend crc device
- * @pdev: device being suspended
- * @state: requested suspend state
- */
-static int bfin_crypto_crc_suspend(struct platform_device *pdev, pm_message_t state)
-{
- struct bfin_crypto_crc *crc = platform_get_drvdata(pdev);
- int i = 100000;
-
- while ((readl(&crc->regs->control) & BLKEN) && --i)
- cpu_relax();
-
- if (i == 0)
- return -EBUSY;
-
- return 0;
-}
-#else
-# define bfin_crypto_crc_suspend NULL
-#endif
-
-#define bfin_crypto_crc_resume NULL
-
-/**
- * bfin_crypto_crc_probe - Initialize module
- *
- */
-static int bfin_crypto_crc_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct resource *res;
- struct bfin_crypto_crc *crc;
- unsigned int timeout = 100000;
- int ret;
-
- crc = devm_kzalloc(dev, sizeof(*crc), GFP_KERNEL);
- if (!crc) {
- dev_err(&pdev->dev, "fail to malloc bfin_crypto_crc\n");
- return -ENOMEM;
- }
-
- crc->dev = dev;
-
- INIT_LIST_HEAD(&crc->list);
- spin_lock_init(&crc->lock);
- tasklet_init(&crc->done_task, bfin_crypto_crc_done_task, (unsigned long)crc);
- crypto_init_queue(&crc->queue, CRC_CCRYPTO_QUEUE_LENGTH);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- crc->regs = devm_ioremap_resource(dev, res);
- if (IS_ERR((void *)crc->regs)) {
- dev_err(&pdev->dev, "Cannot map CRC IO\n");
- return PTR_ERR((void *)crc->regs);
- }
-
- crc->irq = platform_get_irq(pdev, 0);
- if (crc->irq < 0) {
- dev_err(&pdev->dev, "No CRC DCNTEXP IRQ specified\n");
- return -ENOENT;
- }
-
- ret = devm_request_irq(dev, crc->irq, bfin_crypto_crc_handler,
- IRQF_SHARED, dev_name(dev), crc);
- if (ret) {
- dev_err(&pdev->dev, "Unable to request blackfin crc irq\n");
- return ret;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "No CRC DMA channel specified\n");
- return -ENOENT;
- }
- crc->dma_ch = res->start;
-
- ret = request_dma(crc->dma_ch, dev_name(dev));
- if (ret) {
- dev_err(&pdev->dev, "Unable to attach Blackfin CRC DMA channel\n");
- return ret;
- }
-
- crc->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &crc->sg_dma, GFP_KERNEL);
- if (crc->sg_cpu == NULL) {
- ret = -ENOMEM;
- goto out_error_dma;
- }
- /*
- * need at most CRC_MAX_DMA_DESC sg + CRC_MAX_DMA_DESC middle +
- * 1 last + 1 next dma descriptors
- */
- crc->sg_mid_buf = (u8 *)(crc->sg_cpu + ((CRC_MAX_DMA_DESC + 1) << 1));
- crc->sg_mid_dma = crc->sg_dma + sizeof(struct dma_desc_array)
- * ((CRC_MAX_DMA_DESC + 1) << 1);
-
- writel(0, &crc->regs->control);
- crc->poly = (u32)pdev->dev.platform_data;
- writel(crc->poly, &crc->regs->poly);
-
- while (!(readl(&crc->regs->status) & LUTDONE) && (--timeout) > 0)
- cpu_relax();
-
- if (timeout == 0)
- dev_info(&pdev->dev, "init crc poly timeout\n");
-
- platform_set_drvdata(pdev, crc);
-
- spin_lock(&crc_list.lock);
- list_add(&crc->list, &crc_list.dev_list);
- spin_unlock(&crc_list.lock);
-
- if (list_is_singular(&crc_list.dev_list)) {
- ret = crypto_register_ahash(&algs);
- if (ret) {
- dev_err(&pdev->dev,
- "Can't register crypto ahash device\n");
- goto out_error_dma;
- }
- }
-
- dev_info(&pdev->dev, "initialized\n");
-
- return 0;
-
-out_error_dma:
- if (crc->sg_cpu)
- dma_free_coherent(&pdev->dev, PAGE_SIZE, crc->sg_cpu, crc->sg_dma);
- free_dma(crc->dma_ch);
-
- return ret;
-}
-
-/**
- * bfin_crypto_crc_remove - Initialize module
- *
- */
-static int bfin_crypto_crc_remove(struct platform_device *pdev)
-{
- struct bfin_crypto_crc *crc = platform_get_drvdata(pdev);
-
- if (!crc)
- return -ENODEV;
-
- spin_lock(&crc_list.lock);
- list_del(&crc->list);
- spin_unlock(&crc_list.lock);
-
- crypto_unregister_ahash(&algs);
- tasklet_kill(&crc->done_task);
- free_dma(crc->dma_ch);
-
- return 0;
-}
-
-static struct platform_driver bfin_crypto_crc_driver = {
- .probe = bfin_crypto_crc_probe,
- .remove = bfin_crypto_crc_remove,
- .suspend = bfin_crypto_crc_suspend,
- .resume = bfin_crypto_crc_resume,
- .driver = {
- .name = DRIVER_NAME,
- },
-};
-
-/**
- * bfin_crypto_crc_mod_init - Initialize module
- *
- * Checks the module params and registers the platform driver.
- * Real work is in the platform probe function.
- */
-static int __init bfin_crypto_crc_mod_init(void)
-{
- int ret;
-
- pr_info("Blackfin hardware CRC crypto driver\n");
-
- INIT_LIST_HEAD(&crc_list.dev_list);
- spin_lock_init(&crc_list.lock);
-
- ret = platform_driver_register(&bfin_crypto_crc_driver);
- if (ret) {
- pr_err("unable to register driver\n");
- return ret;
- }
-
- return 0;
-}
-
-/**
- * bfin_crypto_crc_mod_exit - Deinitialize module
- */
-static void __exit bfin_crypto_crc_mod_exit(void)
-{
- platform_driver_unregister(&bfin_crypto_crc_driver);
-}
-
-module_init(bfin_crypto_crc_mod_init);
-module_exit(bfin_crypto_crc_mod_exit);
-
-MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
-MODULE_DESCRIPTION("Blackfin CRC hardware crypto driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/bfin_crc.h b/drivers/crypto/bfin_crc.h
deleted file mode 100644
index 786ef746d109..000000000000
--- a/drivers/crypto/bfin_crc.h
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * bfin_crc.h - interface to Blackfin CRC controllers
- *
- * Copyright 2012 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#ifndef __BFIN_CRC_H__
-#define __BFIN_CRC_H__
-
-/* Function driver which use hardware crc must initialize the structure */
-struct crc_info {
- /* Input data address */
- unsigned char *in_addr;
- /* Output data address */
- unsigned char *out_addr;
- /* Input or output bytes */
- unsigned long datasize;
- union {
- /* CRC to compare with that of input buffer */
- unsigned long crc_compare;
- /* Value to compare with input data */
- unsigned long val_verify;
- /* Value to fill */
- unsigned long val_fill;
- };
- /* Value to program the 32b CRC Polynomial */
- unsigned long crc_poly;
- union {
- /* CRC calculated from the input data */
- unsigned long crc_result;
- /* First failed position to verify input data */
- unsigned long pos_verify;
- };
- /* CRC mirror flags */
- unsigned int bitmirr:1;
- unsigned int bytmirr:1;
- unsigned int w16swp:1;
- unsigned int fdsel:1;
- unsigned int rsltmirr:1;
- unsigned int polymirr:1;
- unsigned int cmpmirr:1;
-};
-
-/* Userspace interface */
-#define CRC_IOC_MAGIC 'C'
-#define CRC_IOC_CALC_CRC _IOWR('C', 0x01, unsigned int)
-#define CRC_IOC_MEMCPY_CRC _IOWR('C', 0x02, unsigned int)
-#define CRC_IOC_VERIFY_VAL _IOWR('C', 0x03, unsigned int)
-#define CRC_IOC_FILL_VAL _IOWR('C', 0x04, unsigned int)
-
-
-#ifdef __KERNEL__
-
-#include <linux/types.h>
-#include <linux/spinlock.h>
-
-struct crc_register {
- u32 control;
- u32 datacnt;
- u32 datacntrld;
- u32 __pad_1[2];
- u32 compare;
- u32 fillval;
- u32 datafifo;
- u32 intren;
- u32 intrenset;
- u32 intrenclr;
- u32 poly;
- u32 __pad_2[4];
- u32 status;
- u32 datacntcap;
- u32 __pad_3;
- u32 result;
- u32 curresult;
- u32 __pad_4[3];
- u32 revid;
-};
-
-/* CRC_STATUS Masks */
-#define CMPERR 0x00000002 /* Compare error */
-#define DCNTEXP 0x00000010 /* datacnt register expired */
-#define IBR 0x00010000 /* Input buffer ready */
-#define OBR 0x00020000 /* Output buffer ready */
-#define IRR 0x00040000 /* Immediate result readt */
-#define LUTDONE 0x00080000 /* Look-up table generation done */
-#define FSTAT 0x00700000 /* FIFO status */
-#define MAX_FIFO 4 /* Max fifo size */
-
-/* CRC_CONTROL Masks */
-#define BLKEN 0x00000001 /* Block enable */
-#define OPMODE 0x000000F0 /* Operation mode */
-#define OPMODE_OFFSET 4 /* Operation mode mask offset*/
-#define MODE_DMACPY_CRC 1 /* MTM CRC compute and compare */
-#define MODE_DATA_FILL 2 /* MTM data fill */
-#define MODE_CALC_CRC 3 /* MSM CRC compute and compare */
-#define MODE_DATA_VERIFY 4 /* MSM data verify */
-#define AUTOCLRZ 0x00000100 /* Auto clear to zero */
-#define AUTOCLRF 0x00000200 /* Auto clear to one */
-#define OBRSTALL 0x00001000 /* Stall on output buffer ready */
-#define IRRSTALL 0x00002000 /* Stall on immediate result ready */
-#define BITMIRR 0x00010000 /* Mirror bits within each byte of 32-bit input data */
-#define BITMIRR_OFFSET 16 /* Mirror bits offset */
-#define BYTMIRR 0x00020000 /* Mirror bytes of 32-bit input data */
-#define BYTMIRR_OFFSET 17 /* Mirror bytes offset */
-#define W16SWP 0x00040000 /* Mirror uppper and lower 16-bit word of 32-bit input data */
-#define W16SWP_OFFSET 18 /* Mirror 16-bit word offset */
-#define FDSEL 0x00080000 /* FIFO is written after input data is mirrored */
-#define FDSEL_OFFSET 19 /* Mirror FIFO offset */
-#define RSLTMIRR 0x00100000 /* CRC result registers are mirrored. */
-#define RSLTMIRR_OFFSET 20 /* Mirror CRC result offset. */
-#define POLYMIRR 0x00200000 /* CRC poly register is mirrored. */
-#define POLYMIRR_OFFSET 21 /* Mirror CRC poly offset. */
-#define CMPMIRR 0x00400000 /* CRC compare register is mirrored. */
-#define CMPMIRR_OFFSET 22 /* Mirror CRC compare offset. */
-
-/* CRC_INTREN Masks */
-#define CMPERRI 0x02 /* CRC_ERROR_INTR */
-#define DCNTEXPI 0x10 /* CRC_STATUS_INTR */
-
-#endif
-
-#endif
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 2188235be02d..7207a535942d 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -328,6 +328,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+ unsigned int ivsize = crypto_aead_ivsize(aead);
u32 *desc;
int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
ctx->cdata.keylen;
@@ -349,7 +350,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
}
desc = ctx->sh_desc_enc;
- cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize);
+ cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
desc_bytes(desc), ctx->dir);
@@ -366,7 +367,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
}
desc = ctx->sh_desc_dec;
- cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize);
+ cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
desc_bytes(desc), ctx->dir);
@@ -387,6 +388,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+ unsigned int ivsize = crypto_aead_ivsize(aead);
u32 *desc;
int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
ctx->cdata.keylen;
@@ -408,7 +410,8 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
}
desc = ctx->sh_desc_enc;
- cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize);
+ cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ false);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
desc_bytes(desc), ctx->dir);
@@ -425,7 +428,8 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
}
desc = ctx->sh_desc_dec;
- cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize);
+ cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ false);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
desc_bytes(desc), ctx->dir);
@@ -447,6 +451,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+ unsigned int ivsize = crypto_aead_ivsize(aead);
u32 *desc;
int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
ctx->cdata.keylen;
@@ -468,7 +473,8 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
}
desc = ctx->sh_desc_enc;
- cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize);
+ cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ false);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
desc_bytes(desc), ctx->dir);
@@ -485,7 +491,8 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
}
desc = ctx->sh_desc_dec;
- cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize);
+ cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ false);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
desc_bytes(desc), ctx->dir);
@@ -563,9 +570,11 @@ static int aead_setkey(struct crypto_aead *aead,
skip_split_key:
ctx->cdata.keylen = keys.enckeylen;
+ memzero_explicit(&keys, sizeof(keys));
return aead_set_sh_desc(aead);
badkey:
crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index ceb93fbb76e6..8ae7a1be7dfd 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -625,10 +625,13 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
* @desc: pointer to buffer used for descriptor construction
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @ivsize: initialization vector size
* @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_qi: true when called from caam/qi
*/
void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
- unsigned int icvsize)
+ unsigned int ivsize, unsigned int icvsize,
+ const bool is_qi)
{
u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1,
*zero_assoc_jump_cmd2;
@@ -650,11 +653,35 @@ void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
OP_ALG_ENCRYPT);
+ if (is_qi) {
+ u32 *wait_load_cmd;
+
+ /* REG3 = assoclen */
+ append_seq_load(desc, 4, LDST_CLASS_DECO |
+ LDST_SRCDST_WORD_DECO_MATH3 |
+ (4 << LDST_OFFSET_SHIFT));
+
+ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_CALM | JUMP_COND_NCP |
+ JUMP_COND_NOP | JUMP_COND_NIP |
+ JUMP_COND_NIFP);
+ set_jump_tgt_here(desc, wait_load_cmd);
+
+ append_math_sub_imm_u32(desc, VARSEQOUTLEN, SEQINLEN, IMM,
+ ivsize);
+ } else {
+ append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0,
+ CAAM_CMD_SZ);
+ }
+
/* if assoclen + cryptlen is ZERO, skip to ICV write */
- append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
JUMP_COND_MATH_Z);
+ if (is_qi)
+ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+
/* if assoclen is ZERO, skip reading the assoc data */
append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
@@ -686,8 +713,11 @@ void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
- /* jump the zero-payload commands */
- append_jump(desc, JUMP_TEST_ALL | 2);
+ /* jump to ICV writing */
+ if (is_qi)
+ append_jump(desc, JUMP_TEST_ALL | 4);
+ else
+ append_jump(desc, JUMP_TEST_ALL | 2);
/* zero-payload commands */
set_jump_tgt_here(desc, zero_payload_jump_cmd);
@@ -695,10 +725,18 @@ void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
/* read assoc data */
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
+ if (is_qi)
+ /* jump to ICV writing */
+ append_jump(desc, JUMP_TEST_ALL | 2);
/* There is no input data */
set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
+ if (is_qi)
+ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
+ FIFOLD_TYPE_LAST1);
+
/* write ICV */
append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT);
@@ -715,10 +753,13 @@ EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
* @desc: pointer to buffer used for descriptor construction
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @ivsize: initialization vector size
* @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_qi: true when called from caam/qi
*/
void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
- unsigned int icvsize)
+ unsigned int ivsize, unsigned int icvsize,
+ const bool is_qi)
{
u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1;
@@ -739,6 +780,24 @@ void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+ if (is_qi) {
+ u32 *wait_load_cmd;
+
+ /* REG3 = assoclen */
+ append_seq_load(desc, 4, LDST_CLASS_DECO |
+ LDST_SRCDST_WORD_DECO_MATH3 |
+ (4 << LDST_OFFSET_SHIFT));
+
+ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_CALM | JUMP_COND_NCP |
+ JUMP_COND_NOP | JUMP_COND_NIP |
+ JUMP_COND_NIFP);
+ set_jump_tgt_here(desc, wait_load_cmd);
+
+ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+ }
+
/* if assoclen is ZERO, skip reading the assoc data */
append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
@@ -791,10 +850,13 @@ EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
* @desc: pointer to buffer used for descriptor construction
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @ivsize: initialization vector size
* @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_qi: true when called from caam/qi
*/
void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
- unsigned int icvsize)
+ unsigned int ivsize, unsigned int icvsize,
+ const bool is_qi)
{
u32 *key_jump_cmd;
@@ -815,7 +877,29 @@ void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
OP_ALG_ENCRYPT);
- append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
+ if (is_qi) {
+ u32 *wait_load_cmd;
+
+ /* REG3 = assoclen */
+ append_seq_load(desc, 4, LDST_CLASS_DECO |
+ LDST_SRCDST_WORD_DECO_MATH3 |
+ (4 << LDST_OFFSET_SHIFT));
+
+ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_CALM | JUMP_COND_NCP |
+ JUMP_COND_NOP | JUMP_COND_NIP |
+ JUMP_COND_NIFP);
+ set_jump_tgt_here(desc, wait_load_cmd);
+
+ /* Read salt and IV */
+ append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
+ cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV);
+ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+ }
+
+ append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
/* Read assoc data */
@@ -823,7 +907,7 @@ void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
/* Skip IV */
- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
/* Will read cryptlen bytes */
append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
@@ -862,10 +946,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap);
* @desc: pointer to buffer used for descriptor construction
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @ivsize: initialization vector size
* @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_qi: true when called from caam/qi
*/
void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
- unsigned int icvsize)
+ unsigned int ivsize, unsigned int icvsize,
+ const bool is_qi)
{
u32 *key_jump_cmd;
@@ -887,7 +974,29 @@ void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
OP_ALG_DECRYPT | OP_ALG_ICV_ON);
- append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
+ if (is_qi) {
+ u32 *wait_load_cmd;
+
+ /* REG3 = assoclen */
+ append_seq_load(desc, 4, LDST_CLASS_DECO |
+ LDST_SRCDST_WORD_DECO_MATH3 |
+ (4 << LDST_OFFSET_SHIFT));
+
+ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_CALM | JUMP_COND_NCP |
+ JUMP_COND_NOP | JUMP_COND_NIP |
+ JUMP_COND_NIFP);
+ set_jump_tgt_here(desc, wait_load_cmd);
+
+ /* Read salt and IV */
+ append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
+ cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV);
+ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+ }
+
+ append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
/* Read assoc data */
@@ -895,7 +1004,7 @@ void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
/* Skip IV */
- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
/* Will read cryptlen bytes */
append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
@@ -934,10 +1043,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap);
* @desc: pointer to buffer used for descriptor construction
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @ivsize: initialization vector size
* @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_qi: true when called from caam/qi
*/
void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
- unsigned int icvsize)
+ unsigned int ivsize, unsigned int icvsize,
+ const bool is_qi)
{
u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
@@ -958,6 +1070,18 @@ void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
OP_ALG_ENCRYPT);
+ if (is_qi) {
+ /* assoclen is not needed, skip it */
+ append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
+
+ /* Read salt and IV */
+ append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
+ cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV);
+ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+ }
+
/* assoclen + cryptlen = seqinlen */
append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
@@ -1004,10 +1128,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap);
* @desc: pointer to buffer used for descriptor construction
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @ivsize: initialization vector size
* @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_qi: true when called from caam/qi
*/
void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
- unsigned int icvsize)
+ unsigned int ivsize, unsigned int icvsize,
+ const bool is_qi)
{
u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
@@ -1028,6 +1155,18 @@ void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+ if (is_qi) {
+ /* assoclen is not needed, skip it */
+ append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
+
+ /* Read salt and IV */
+ append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
+ cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV);
+ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+ }
+
/* assoclen + cryptlen = seqoutlen */
append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
index 5f9445ae2114..a917af5776ce 100644
--- a/drivers/crypto/caam/caamalg_desc.h
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -27,14 +27,20 @@
#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
+#define DESC_QI_GCM_ENC_LEN (DESC_GCM_ENC_LEN + 6 * CAAM_CMD_SZ)
+#define DESC_QI_GCM_DEC_LEN (DESC_GCM_DEC_LEN + 3 * CAAM_CMD_SZ)
#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
+#define DESC_QI_RFC4106_ENC_LEN (DESC_RFC4106_ENC_LEN + 5 * CAAM_CMD_SZ)
+#define DESC_QI_RFC4106_DEC_LEN (DESC_RFC4106_DEC_LEN + 5 * CAAM_CMD_SZ)
#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
+#define DESC_QI_RFC4543_ENC_LEN (DESC_RFC4543_ENC_LEN + 4 * CAAM_CMD_SZ)
+#define DESC_QI_RFC4543_DEC_LEN (DESC_RFC4543_DEC_LEN + 4 * CAAM_CMD_SZ)
#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
@@ -67,22 +73,28 @@ void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
const bool is_qi, int era);
void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
- unsigned int icvsize);
+ unsigned int ivsize, unsigned int icvsize,
+ const bool is_qi);
void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
- unsigned int icvsize);
+ unsigned int ivsize, unsigned int icvsize,
+ const bool is_qi);
void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
- unsigned int icvsize);
+ unsigned int ivsize, unsigned int icvsize,
+ const bool is_qi);
void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
- unsigned int icvsize);
+ unsigned int ivsize, unsigned int icvsize,
+ const bool is_qi);
void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
- unsigned int icvsize);
+ unsigned int ivsize, unsigned int icvsize,
+ const bool is_qi);
void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
- unsigned int icvsize);
+ unsigned int ivsize, unsigned int icvsize,
+ const bool is_qi);
void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
unsigned int ivsize, const bool is_rfc3686,
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 4aecc9435f69..cacda0831390 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -278,12 +278,317 @@ skip_split_key:
}
}
+ memzero_explicit(&keys, sizeof(keys));
return ret;
badkey:
crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
+static int gcm_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ /*
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
+ ctx->authsize, true);
+
+ /*
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
+ ctx->authsize, true);
+
+ return 0;
+}
+
+static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ gcm_set_sh_desc(authenc);
+
+ return 0;
+}
+
+static int gcm_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ int ret;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+ memcpy(ctx->key, key, keylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
+ ctx->cdata.keylen = keylen;
+
+ ret = gcm_set_sh_desc(aead);
+ if (ret)
+ return ret;
+
+ /* Now update the driver contexts with the new shared descriptor */
+ if (ctx->drv_ctx[ENCRYPT]) {
+ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
+ ctx->sh_desc_enc);
+ if (ret) {
+ dev_err(jrdev, "driver enc context update failed\n");
+ return ret;
+ }
+ }
+
+ if (ctx->drv_ctx[DECRYPT]) {
+ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
+ ctx->sh_desc_dec);
+ if (ret) {
+ dev_err(jrdev, "driver dec context update failed\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int rfc4106_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ ctx->cdata.key_virt = ctx->key;
+
+ /*
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
+ ctx->authsize, true);
+
+ /*
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
+ ctx->authsize, true);
+
+ return 0;
+}
+
+static int rfc4106_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ rfc4106_set_sh_desc(authenc);
+
+ return 0;
+}
+
+static int rfc4106_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ int ret;
+
+ if (keylen < 4)
+ return -EINVAL;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+ memcpy(ctx->key, key, keylen);
+ /*
+ * The last four bytes of the key material are used as the salt value
+ * in the nonce. Update the AES key length.
+ */
+ ctx->cdata.keylen = keylen - 4;
+ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
+ ctx->dir);
+
+ ret = rfc4106_set_sh_desc(aead);
+ if (ret)
+ return ret;
+
+ /* Now update the driver contexts with the new shared descriptor */
+ if (ctx->drv_ctx[ENCRYPT]) {
+ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
+ ctx->sh_desc_enc);
+ if (ret) {
+ dev_err(jrdev, "driver enc context update failed\n");
+ return ret;
+ }
+ }
+
+ if (ctx->drv_ctx[DECRYPT]) {
+ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
+ ctx->sh_desc_dec);
+ if (ret) {
+ dev_err(jrdev, "driver dec context update failed\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int rfc4543_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ ctx->cdata.key_virt = ctx->key;
+
+ /*
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ cnstr_shdsc_rfc4543_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
+ ctx->authsize, true);
+
+ /*
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ cnstr_shdsc_rfc4543_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
+ ctx->authsize, true);
+
+ return 0;
+}
+
+static int rfc4543_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ rfc4543_set_sh_desc(authenc);
+
+ return 0;
+}
+
+static int rfc4543_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ int ret;
+
+ if (keylen < 4)
+ return -EINVAL;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+ memcpy(ctx->key, key, keylen);
+ /*
+ * The last four bytes of the key material are used as the salt value
+ * in the nonce. Update the AES key length.
+ */
+ ctx->cdata.keylen = keylen - 4;
+ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
+ ctx->dir);
+
+ ret = rfc4543_set_sh_desc(aead);
+ if (ret)
+ return ret;
+
+ /* Now update the driver contexts with the new shared descriptor */
+ if (ctx->drv_ctx[ENCRYPT]) {
+ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
+ ctx->sh_desc_enc);
+ if (ret) {
+ dev_err(jrdev, "driver enc context update failed\n");
+ return ret;
+ }
+ }
+
+ if (ctx->drv_ctx[DECRYPT]) {
+ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
+ ctx->sh_desc_dec);
+ if (ret) {
+ dev_err(jrdev, "driver dec context update failed\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
const u8 *key, unsigned int keylen)
{
@@ -562,8 +867,18 @@ static void aead_done(struct caam_drv_req *drv_req, u32 status)
qidev = caam_ctx->qidev;
if (unlikely(status)) {
+ u32 ssrc = status & JRSTA_SSRC_MASK;
+ u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
+
caam_jr_strstatus(qidev, status);
- ecode = -EIO;
+ /*
+ * verify hw auth check passed else return -EBADMSG
+ */
+ if (ssrc == JRSTA_SSRC_CCB_ERROR &&
+ err_id == JRSTA_CCBERR_ERRID_ICVCHK)
+ ecode = -EBADMSG;
+ else
+ ecode = -EIO;
}
edesc = container_of(drv_req, typeof(*edesc), drv_req);
@@ -807,6 +1122,22 @@ static int aead_decrypt(struct aead_request *req)
return aead_crypt(req, false);
}
+static int ipsec_gcm_encrypt(struct aead_request *req)
+{
+ if (req->assoclen < 8)
+ return -EINVAL;
+
+ return aead_crypt(req, true);
+}
+
+static int ipsec_gcm_decrypt(struct aead_request *req)
+{
+ if (req->assoclen < 8)
+ return -EINVAL;
+
+ return aead_crypt(req, false);
+}
+
static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
{
struct ablkcipher_edesc *edesc;
@@ -1327,6 +1658,61 @@ static struct caam_alg_template driver_algs[] = {
};
static struct caam_aead_alg driver_aeads[] = {
+ {
+ .aead = {
+ .base = {
+ .cra_name = "rfc4106(gcm(aes))",
+ .cra_driver_name = "rfc4106-gcm-aes-caam-qi",
+ .cra_blocksize = 1,
+ },
+ .setkey = rfc4106_setkey,
+ .setauthsize = rfc4106_setauthsize,
+ .encrypt = ipsec_gcm_encrypt,
+ .decrypt = ipsec_gcm_decrypt,
+ .ivsize = 8,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "rfc4543(gcm(aes))",
+ .cra_driver_name = "rfc4543-gcm-aes-caam-qi",
+ .cra_blocksize = 1,
+ },
+ .setkey = rfc4543_setkey,
+ .setauthsize = rfc4543_setauthsize,
+ .encrypt = ipsec_gcm_encrypt,
+ .decrypt = ipsec_gcm_decrypt,
+ .ivsize = 8,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ },
+ },
+ /* Galois Counter Mode */
+ {
+ .aead = {
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "gcm-aes-caam-qi",
+ .cra_blocksize = 1,
+ },
+ .setkey = gcm_setkey,
+ .setauthsize = gcm_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = 12,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ }
+ },
/* single-pass ipsec_esp descriptor */
{
.aead = {
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index e843cf410373..e4cc636e1104 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -337,7 +337,8 @@ static int caam_remove(struct platform_device *pdev)
/* shut clocks off before finalizing shutdown */
clk_disable_unprepare(ctrlpriv->caam_ipg);
- clk_disable_unprepare(ctrlpriv->caam_mem);
+ if (ctrlpriv->caam_mem)
+ clk_disable_unprepare(ctrlpriv->caam_mem);
clk_disable_unprepare(ctrlpriv->caam_aclk);
if (ctrlpriv->caam_emi_slow)
clk_disable_unprepare(ctrlpriv->caam_emi_slow);
@@ -466,14 +467,17 @@ static int caam_probe(struct platform_device *pdev)
}
ctrlpriv->caam_ipg = clk;
- clk = caam_drv_identify_clk(&pdev->dev, "mem");
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- dev_err(&pdev->dev,
- "can't identify CAAM mem clk: %d\n", ret);
- return ret;
+ if (!of_machine_is_compatible("fsl,imx7d") &&
+ !of_machine_is_compatible("fsl,imx7s")) {
+ clk = caam_drv_identify_clk(&pdev->dev, "mem");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(&pdev->dev,
+ "can't identify CAAM mem clk: %d\n", ret);
+ return ret;
+ }
+ ctrlpriv->caam_mem = clk;
}
- ctrlpriv->caam_mem = clk;
clk = caam_drv_identify_clk(&pdev->dev, "aclk");
if (IS_ERR(clk)) {
@@ -484,7 +488,9 @@ static int caam_probe(struct platform_device *pdev)
}
ctrlpriv->caam_aclk = clk;
- if (!of_machine_is_compatible("fsl,imx6ul")) {
+ if (!of_machine_is_compatible("fsl,imx6ul") &&
+ !of_machine_is_compatible("fsl,imx7d") &&
+ !of_machine_is_compatible("fsl,imx7s")) {
clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
@@ -501,11 +507,13 @@ static int caam_probe(struct platform_device *pdev)
return ret;
}
- ret = clk_prepare_enable(ctrlpriv->caam_mem);
- if (ret < 0) {
- dev_err(&pdev->dev, "can't enable CAAM secure mem clock: %d\n",
- ret);
- goto disable_caam_ipg;
+ if (ctrlpriv->caam_mem) {
+ ret = clk_prepare_enable(ctrlpriv->caam_mem);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't enable CAAM secure mem clock: %d\n",
+ ret);
+ goto disable_caam_ipg;
+ }
}
ret = clk_prepare_enable(ctrlpriv->caam_aclk);
@@ -815,9 +823,6 @@ static int caam_probe(struct platform_device *pdev)
return 0;
caam_remove:
-#ifdef CONFIG_DEBUG_FS
- debugfs_remove_recursive(ctrlpriv->dfs_root);
-#endif
caam_remove(pdev);
return ret;
@@ -829,7 +834,8 @@ disable_caam_emi_slow:
disable_caam_aclk:
clk_disable_unprepare(ctrlpriv->caam_aclk);
disable_caam_mem:
- clk_disable_unprepare(ctrlpriv->caam_mem);
+ if (ctrlpriv->caam_mem)
+ clk_disable_unprepare(ctrlpriv->caam_mem);
disable_caam_ipg:
clk_disable_unprepare(ctrlpriv->caam_ipg);
return ret;
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index f9a44f485aac..b9480828da38 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -579,8 +579,15 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
fd = &dqrr->fd;
status = be32_to_cpu(fd->status);
- if (unlikely(status))
- dev_err(qidev, "Error: %#x in CAAM response FD\n", status);
+ if (unlikely(status)) {
+ u32 ssrc = status & JRSTA_SSRC_MASK;
+ u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
+
+ if (ssrc != JRSTA_SSRC_CCB_ERROR ||
+ err_id != JRSTA_CCBERR_ERRID_ICVCHK)
+ dev_err(qidev, "Error: %#x in CAAM response FD\n",
+ status);
+ }
if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) {
dev_err(qidev, "Non-compound FD from CAAM\n");
diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c
index 34a6d8bf229e..06ad85ab5e86 100644
--- a/drivers/crypto/cavium/cpt/cptpf_main.c
+++ b/drivers/crypto/cavium/cpt/cptpf_main.c
@@ -436,7 +436,7 @@ static int cpt_device_init(struct cpt_device *cpt)
/* Reset the PF when probed first */
cpt_reset(cpt);
- mdelay(100);
+ msleep(100);
/*Check BIST status*/
bist = (u64)cpt_check_bist_status(cpt);
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 60fc0fa26fd3..26687f318de6 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -46,7 +46,7 @@ static int ccp_aes_cmac_complete(struct crypto_async_request *async_req,
}
/* Update result area if supplied */
- if (req->result)
+ if (req->result && rctx->final)
memcpy(req->result, rctx->iv, digest_size);
e_free:
diff --git a/drivers/crypto/ccp/ccp-crypto-rsa.c b/drivers/crypto/ccp/ccp-crypto-rsa.c
index e6db8672d89c..05850dfd7940 100644
--- a/drivers/crypto/ccp/ccp-crypto-rsa.c
+++ b/drivers/crypto/ccp/ccp-crypto-rsa.c
@@ -60,10 +60,9 @@ static int ccp_rsa_complete(struct crypto_async_request *async_req, int ret)
static unsigned int ccp_rsa_maxsize(struct crypto_akcipher *tfm)
{
- if (ccp_version() > CCP_VERSION(3, 0))
- return CCP5_RSA_MAXMOD;
- else
- return CCP_RSA_MAXMOD;
+ struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ return ctx->u.rsa.n_len;
}
static int ccp_rsa_crypt(struct akcipher_request *req, bool encrypt)
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index 8b9b16d433f7..871c9628a2ee 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -47,7 +47,7 @@ static int ccp_sha_complete(struct crypto_async_request *async_req, int ret)
}
/* Update result area if supplied */
- if (req->result)
+ if (req->result && rctx->final)
memcpy(req->result, rctx->ctx, digest_size);
e_free:
diff --git a/drivers/crypto/ccp/ccp-debugfs.c b/drivers/crypto/ccp/ccp-debugfs.c
index 59d4ca4e72d8..1a734bd2070a 100644
--- a/drivers/crypto/ccp/ccp-debugfs.c
+++ b/drivers/crypto/ccp/ccp-debugfs.c
@@ -278,7 +278,7 @@ static const struct file_operations ccp_debugfs_stats_ops = {
};
static struct dentry *ccp_debugfs_dir;
-static DEFINE_RWLOCK(ccp_debugfs_lock);
+static DEFINE_MUTEX(ccp_debugfs_lock);
#define MAX_NAME_LEN 20
@@ -290,16 +290,15 @@ void ccp5_debugfs_setup(struct ccp_device *ccp)
struct dentry *debugfs_stats;
struct dentry *debugfs_q_instance;
struct dentry *debugfs_q_stats;
- unsigned long flags;
int i;
if (!debugfs_initialized())
return;
- write_lock_irqsave(&ccp_debugfs_lock, flags);
+ mutex_lock(&ccp_debugfs_lock);
if (!ccp_debugfs_dir)
ccp_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
- write_unlock_irqrestore(&ccp_debugfs_lock, flags);
+ mutex_unlock(&ccp_debugfs_lock);
if (!ccp_debugfs_dir)
return;
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index 8b9da58459df..67155cb21636 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -38,7 +38,7 @@ static unsigned int dma_chan_attr = CCP_DMA_DFLT;
module_param(dma_chan_attr, uint, 0444);
MODULE_PARM_DESC(dma_chan_attr, "Set DMA channel visibility: 0 (default) = device defaults, 1 = make private, 2 = make public");
-unsigned int ccp_get_dma_chan_attr(struct ccp_device *ccp)
+static unsigned int ccp_get_dma_chan_attr(struct ccp_device *ccp)
{
switch (dma_chan_attr) {
case CCP_DMA_DFLT:
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 406b95329b3d..0ea43cdeb05f 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -178,14 +178,18 @@ static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
return 0;
}
-static void ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
- struct scatterlist *sg, unsigned int sg_offset,
- unsigned int len)
+static int ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
+ struct scatterlist *sg, unsigned int sg_offset,
+ unsigned int len)
{
WARN_ON(!wa->address);
+ if (len > (wa->length - wa_offset))
+ return -EINVAL;
+
scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
0);
+ return 0;
}
static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
@@ -205,8 +209,11 @@ static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
unsigned int len)
{
u8 *p, *q;
+ int rc;
- ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len);
+ rc = ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len);
+ if (rc)
+ return rc;
p = wa->address + wa_offset;
q = p + len - 1;
@@ -509,7 +516,9 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
return ret;
dm_offset = CCP_SB_BYTES - aes->key_len;
- ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
+ ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
+ if (ret)
+ goto e_key;
ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
@@ -528,7 +537,9 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
goto e_key;
dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
- ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+ ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+ if (ret)
+ goto e_ctx;
ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
@@ -556,8 +567,10 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
goto e_src;
}
- ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0,
- aes->cmac_key_len);
+ ret = ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0,
+ aes->cmac_key_len);
+ if (ret)
+ goto e_src;
ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
@@ -666,7 +679,9 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
return ret;
dm_offset = CCP_SB_BYTES - aes->key_len;
- ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
+ ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
+ if (ret)
+ goto e_key;
ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
@@ -685,7 +700,9 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
goto e_key;
dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len;
- ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+ ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+ if (ret)
+ goto e_ctx;
ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
CCP_PASSTHRU_BYTESWAP_256BIT);
@@ -777,7 +794,9 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
goto e_dst;
}
- ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+ ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+ if (ret)
+ goto e_dst;
ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
CCP_PASSTHRU_BYTESWAP_256BIT);
@@ -820,7 +839,9 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
DMA_BIDIRECTIONAL);
if (ret)
goto e_tag;
- ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE);
+ ret = ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE);
+ if (ret)
+ goto e_tag;
ret = memcmp(tag.address, final_wa.address, AES_BLOCK_SIZE);
ccp_dm_free(&tag);
@@ -914,7 +935,9 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
return ret;
dm_offset = CCP_SB_BYTES - aes->key_len;
- ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
+ ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
+ if (ret)
+ goto e_key;
ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
@@ -935,7 +958,9 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
if (aes->mode != CCP_AES_MODE_ECB) {
/* Load the AES context - convert to LE */
dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
- ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+ ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+ if (ret)
+ goto e_ctx;
ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
@@ -1113,8 +1138,12 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
* big endian to little endian.
*/
dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
- ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
- ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len);
+ ret = ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
+ if (ret)
+ goto e_key;
+ ret = ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len);
+ if (ret)
+ goto e_key;
} else {
/* Version 5 CCPs use a 512-bit space for the key: each portion
* occupies 256 bits, or one entire slot, and is zero-padded.
@@ -1123,9 +1152,13 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
dm_offset = CCP_SB_BYTES;
pad = dm_offset - xts->key_len;
- ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len);
- ccp_set_dm_area(&key, dm_offset + pad, xts->key, xts->key_len,
- xts->key_len);
+ ret = ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len);
+ if (ret)
+ goto e_key;
+ ret = ccp_set_dm_area(&key, dm_offset + pad, xts->key,
+ xts->key_len, xts->key_len);
+ if (ret)
+ goto e_key;
}
ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
CCP_PASSTHRU_BYTESWAP_256BIT);
@@ -1144,7 +1177,9 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
if (ret)
goto e_key;
- ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len);
+ ret = ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len);
+ if (ret)
+ goto e_ctx;
ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
CCP_PASSTHRU_BYTESWAP_NOOP);
if (ret) {
@@ -1287,12 +1322,18 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
dm_offset = CCP_SB_BYTES - des3->key_len; /* Basic offset */
len_singlekey = des3->key_len / 3;
- ccp_set_dm_area(&key, dm_offset + 2 * len_singlekey,
- des3->key, 0, len_singlekey);
- ccp_set_dm_area(&key, dm_offset + len_singlekey,
- des3->key, len_singlekey, len_singlekey);
- ccp_set_dm_area(&key, dm_offset,
- des3->key, 2 * len_singlekey, len_singlekey);
+ ret = ccp_set_dm_area(&key, dm_offset + 2 * len_singlekey,
+ des3->key, 0, len_singlekey);
+ if (ret)
+ goto e_key;
+ ret = ccp_set_dm_area(&key, dm_offset + len_singlekey,
+ des3->key, len_singlekey, len_singlekey);
+ if (ret)
+ goto e_key;
+ ret = ccp_set_dm_area(&key, dm_offset,
+ des3->key, 2 * len_singlekey, len_singlekey);
+ if (ret)
+ goto e_key;
/* Copy the key to the SB */
ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
@@ -1320,7 +1361,10 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
/* Load the context into the LSB */
dm_offset = CCP_SB_BYTES - des3->iv_len;
- ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0, des3->iv_len);
+ ret = ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0,
+ des3->iv_len);
+ if (ret)
+ goto e_ctx;
if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
load_mode = CCP_PASSTHRU_BYTESWAP_NOOP;
@@ -1604,8 +1648,10 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
}
} else {
/* Restore the context */
- ccp_set_dm_area(&ctx, 0, sha->ctx, 0,
- sb_count * CCP_SB_BYTES);
+ ret = ccp_set_dm_area(&ctx, 0, sha->ctx, 0,
+ sb_count * CCP_SB_BYTES);
+ if (ret)
+ goto e_ctx;
}
ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
@@ -1927,7 +1973,9 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
if (ret)
return ret;
- ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len);
+ ret = ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len);
+ if (ret)
+ goto e_mask;
ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
CCP_PASSTHRU_BYTESWAP_NOOP);
if (ret) {
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index b3afb6cc9d72..d95ec526587a 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -367,8 +367,6 @@ e_free:
void *psp_copy_user_blob(u64 __user uaddr, u32 len)
{
- void *data;
-
if (!uaddr || !len)
return ERR_PTR(-EINVAL);
@@ -376,18 +374,7 @@ void *psp_copy_user_blob(u64 __user uaddr, u32 len)
if (len > SEV_FW_BLOB_MAX_SIZE)
return ERR_PTR(-EINVAL);
- data = kmalloc(len, GFP_KERNEL);
- if (!data)
- return ERR_PTR(-ENOMEM);
-
- if (copy_from_user(data, (void __user *)(uintptr_t)uaddr, len))
- goto e_free;
-
- return data;
-
-e_free:
- kfree(data);
- return ERR_PTR(-EFAULT);
+ return memdup_user((void __user *)(uintptr_t)uaddr, len);
}
EXPORT_SYMBOL_GPL(psp_copy_user_blob);
diff --git a/drivers/crypto/ccp/sp-dev.c b/drivers/crypto/ccp/sp-dev.c
index eb0da6572720..e0459002eb71 100644
--- a/drivers/crypto/ccp/sp-dev.c
+++ b/drivers/crypto/ccp/sp-dev.c
@@ -252,12 +252,12 @@ struct sp_device *sp_get_psp_master_device(void)
goto unlock;
list_for_each_entry(i, &sp_units, entry) {
- if (i->psp_data)
+ if (i->psp_data && i->get_psp_master_device) {
+ ret = i->get_psp_master_device();
break;
+ }
}
- if (i->get_psp_master_device)
- ret = i->get_psp_master_device();
unlock:
write_unlock_irqrestore(&sp_unit_lock, flags);
return ret;
diff --git a/drivers/crypto/ccree/Makefile b/drivers/crypto/ccree/Makefile
new file mode 100644
index 000000000000..bdc27970f95f
--- /dev/null
+++ b/drivers/crypto/ccree/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o
+ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_hash.o cc_aead.o cc_ivgen.o cc_sram_mgr.o
+ccree-$(CONFIG_CRYPTO_FIPS) += cc_fips.o
+ccree-$(CONFIG_DEBUG_FS) += cc_debugfs.o
+ccree-$(CONFIG_PM) += cc_pm.o
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
new file mode 100644
index 000000000000..03f4b9fce556
--- /dev/null
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -0,0 +1,2718 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/des.h>
+#include <linux/rtnetlink.h>
+#include "cc_driver.h"
+#include "cc_buffer_mgr.h"
+#include "cc_aead.h"
+#include "cc_request_mgr.h"
+#include "cc_hash.h"
+#include "cc_sram_mgr.h"
+
+#define template_aead template_u.aead
+
+#define MAX_AEAD_SETKEY_SEQ 12
+#define MAX_AEAD_PROCESS_SEQ 23
+
+#define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE)
+#define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE)
+
+#define AES_CCM_RFC4309_NONCE_SIZE 3
+#define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
+
+/* Value of each ICV_CMP byte (of 8) in case of success */
+#define ICV_VERIF_OK 0x01
+
+struct cc_aead_handle {
+ cc_sram_addr_t sram_workspace_addr;
+ struct list_head aead_list;
+};
+
+struct cc_hmac_s {
+ u8 *padded_authkey;
+ u8 *ipad_opad; /* IPAD, OPAD*/
+ dma_addr_t padded_authkey_dma_addr;
+ dma_addr_t ipad_opad_dma_addr;
+};
+
+struct cc_xcbc_s {
+ u8 *xcbc_keys; /* K1,K2,K3 */
+ dma_addr_t xcbc_keys_dma_addr;
+};
+
+struct cc_aead_ctx {
+ struct cc_drvdata *drvdata;
+ u8 ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
+ u8 *enckey;
+ dma_addr_t enckey_dma_addr;
+ union {
+ struct cc_hmac_s hmac;
+ struct cc_xcbc_s xcbc;
+ } auth_state;
+ unsigned int enc_keylen;
+ unsigned int auth_keylen;
+ unsigned int authsize; /* Actual (reduced?) size of the MAC/ICv */
+ enum drv_cipher_mode cipher_mode;
+ enum cc_flow_mode flow_mode;
+ enum drv_hash_mode auth_mode;
+};
+
+static inline bool valid_assoclen(struct aead_request *req)
+{
+ return ((req->assoclen == 16) || (req->assoclen == 20));
+}
+
+static void cc_aead_exit(struct crypto_aead *tfm)
+{
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ dev_dbg(dev, "Clearing context @%p for %s\n", crypto_aead_ctx(tfm),
+ crypto_tfm_alg_name(&tfm->base));
+
+ /* Unmap enckey buffer */
+ if (ctx->enckey) {
+ dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey,
+ ctx->enckey_dma_addr);
+ dev_dbg(dev, "Freed enckey DMA buffer enckey_dma_addr=%pad\n",
+ &ctx->enckey_dma_addr);
+ ctx->enckey_dma_addr = 0;
+ ctx->enckey = NULL;
+ }
+
+ if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
+ struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
+
+ if (xcbc->xcbc_keys) {
+ dma_free_coherent(dev, CC_AES_128_BIT_KEY_SIZE * 3,
+ xcbc->xcbc_keys,
+ xcbc->xcbc_keys_dma_addr);
+ }
+ dev_dbg(dev, "Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=%pad\n",
+ &xcbc->xcbc_keys_dma_addr);
+ xcbc->xcbc_keys_dma_addr = 0;
+ xcbc->xcbc_keys = NULL;
+ } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC auth. */
+ struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
+
+ if (hmac->ipad_opad) {
+ dma_free_coherent(dev, 2 * MAX_HMAC_DIGEST_SIZE,
+ hmac->ipad_opad,
+ hmac->ipad_opad_dma_addr);
+ dev_dbg(dev, "Freed ipad_opad DMA buffer ipad_opad_dma_addr=%pad\n",
+ &hmac->ipad_opad_dma_addr);
+ hmac->ipad_opad_dma_addr = 0;
+ hmac->ipad_opad = NULL;
+ }
+ if (hmac->padded_authkey) {
+ dma_free_coherent(dev, MAX_HMAC_BLOCK_SIZE,
+ hmac->padded_authkey,
+ hmac->padded_authkey_dma_addr);
+ dev_dbg(dev, "Freed padded_authkey DMA buffer padded_authkey_dma_addr=%pad\n",
+ &hmac->padded_authkey_dma_addr);
+ hmac->padded_authkey_dma_addr = 0;
+ hmac->padded_authkey = NULL;
+ }
+ }
+}
+
+static int cc_aead_init(struct crypto_aead *tfm)
+{
+ struct aead_alg *alg = crypto_aead_alg(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_crypto_alg *cc_alg =
+ container_of(alg, struct cc_crypto_alg, aead_alg);
+ struct device *dev = drvdata_to_dev(cc_alg->drvdata);
+
+ dev_dbg(dev, "Initializing context @%p for %s\n", ctx,
+ crypto_tfm_alg_name(&tfm->base));
+
+ /* Initialize modes in instance */
+ ctx->cipher_mode = cc_alg->cipher_mode;
+ ctx->flow_mode = cc_alg->flow_mode;
+ ctx->auth_mode = cc_alg->auth_mode;
+ ctx->drvdata = cc_alg->drvdata;
+ crypto_aead_set_reqsize(tfm, sizeof(struct aead_req_ctx));
+
+ /* Allocate key buffer, cache line aligned */
+ ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE,
+ &ctx->enckey_dma_addr, GFP_KERNEL);
+ if (!ctx->enckey) {
+ dev_err(dev, "Failed allocating key buffer\n");
+ goto init_failed;
+ }
+ dev_dbg(dev, "Allocated enckey buffer in context ctx->enckey=@%p\n",
+ ctx->enckey);
+
+ /* Set default authlen value */
+
+ if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
+ struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
+ const unsigned int key_size = CC_AES_128_BIT_KEY_SIZE * 3;
+
+ /* Allocate dma-coherent buffer for XCBC's K1+K2+K3 */
+ /* (and temporary for user key - up to 256b) */
+ xcbc->xcbc_keys = dma_alloc_coherent(dev, key_size,
+ &xcbc->xcbc_keys_dma_addr,
+ GFP_KERNEL);
+ if (!xcbc->xcbc_keys) {
+ dev_err(dev, "Failed allocating buffer for XCBC keys\n");
+ goto init_failed;
+ }
+ } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC authentication */
+ struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
+ const unsigned int digest_size = 2 * MAX_HMAC_DIGEST_SIZE;
+ dma_addr_t *pkey_dma = &hmac->padded_authkey_dma_addr;
+
+ /* Allocate dma-coherent buffer for IPAD + OPAD */
+ hmac->ipad_opad = dma_alloc_coherent(dev, digest_size,
+ &hmac->ipad_opad_dma_addr,
+ GFP_KERNEL);
+
+ if (!hmac->ipad_opad) {
+ dev_err(dev, "Failed allocating IPAD/OPAD buffer\n");
+ goto init_failed;
+ }
+
+ dev_dbg(dev, "Allocated authkey buffer in context ctx->authkey=@%p\n",
+ hmac->ipad_opad);
+
+ hmac->padded_authkey = dma_alloc_coherent(dev,
+ MAX_HMAC_BLOCK_SIZE,
+ pkey_dma,
+ GFP_KERNEL);
+
+ if (!hmac->padded_authkey) {
+ dev_err(dev, "failed to allocate padded_authkey\n");
+ goto init_failed;
+ }
+ } else {
+ ctx->auth_state.hmac.ipad_opad = NULL;
+ ctx->auth_state.hmac.padded_authkey = NULL;
+ }
+
+ return 0;
+
+init_failed:
+ cc_aead_exit(tfm);
+ return -ENOMEM;
+}
+
+static void cc_aead_complete(struct device *dev, void *cc_req, int err)
+{
+ struct aead_request *areq = (struct aead_request *)cc_req;
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ cc_unmap_aead_request(dev, areq);
+
+ /* Restore ordinary iv pointer */
+ areq->iv = areq_ctx->backup_iv;
+
+ if (err)
+ goto done;
+
+ if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr,
+ ctx->authsize) != 0) {
+ dev_dbg(dev, "Payload authentication failure, (auth-size=%d, cipher=%d)\n",
+ ctx->authsize, ctx->cipher_mode);
+ /* In case of payload authentication failure, MUST NOT
+ * revealed the decrypted message --> zero its memory.
+ */
+ cc_zero_sgl(areq->dst, areq_ctx->cryptlen);
+ err = -EBADMSG;
+ }
+ } else { /*ENCRYPT*/
+ if (areq_ctx->is_icv_fragmented) {
+ u32 skip = areq->cryptlen + areq_ctx->dst_offset;
+
+ cc_copy_sg_portion(dev, areq_ctx->mac_buf,
+ areq_ctx->dst_sgl, skip,
+ (skip + ctx->authsize),
+ CC_SG_FROM_BUF);
+ }
+
+ /* If an IV was generated, copy it back to the user provided
+ * buffer.
+ */
+ if (areq_ctx->backup_giv) {
+ if (ctx->cipher_mode == DRV_CIPHER_CTR)
+ memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv +
+ CTR_RFC3686_NONCE_SIZE,
+ CTR_RFC3686_IV_SIZE);
+ else if (ctx->cipher_mode == DRV_CIPHER_CCM)
+ memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv +
+ CCM_BLOCK_IV_OFFSET, CCM_BLOCK_IV_SIZE);
+ }
+ }
+done:
+ aead_request_complete(areq, err);
+}
+
+static unsigned int xcbc_setkey(struct cc_hw_desc *desc,
+ struct cc_aead_ctx *ctx)
+{
+ /* Load the AES key */
+ hw_desc_init(&desc[0]);
+ /* We are using for the source/user key the same buffer
+ * as for the output keys, * because after this key loading it
+ * is not needed anymore
+ */
+ set_din_type(&desc[0], DMA_DLLI,
+ ctx->auth_state.xcbc.xcbc_keys_dma_addr, ctx->auth_keylen,
+ NS_BIT);
+ set_cipher_mode(&desc[0], DRV_CIPHER_ECB);
+ set_cipher_config0(&desc[0], DRV_CRYPTO_DIRECTION_ENCRYPT);
+ set_key_size_aes(&desc[0], ctx->auth_keylen);
+ set_flow_mode(&desc[0], S_DIN_to_AES);
+ set_setup_mode(&desc[0], SETUP_LOAD_KEY0);
+
+ hw_desc_init(&desc[1]);
+ set_din_const(&desc[1], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[1], DIN_AES_DOUT);
+ set_dout_dlli(&desc[1], ctx->auth_state.xcbc.xcbc_keys_dma_addr,
+ AES_KEYSIZE_128, NS_BIT, 0);
+
+ hw_desc_init(&desc[2]);
+ set_din_const(&desc[2], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[2], DIN_AES_DOUT);
+ set_dout_dlli(&desc[2], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
+ + AES_KEYSIZE_128),
+ AES_KEYSIZE_128, NS_BIT, 0);
+
+ hw_desc_init(&desc[3]);
+ set_din_const(&desc[3], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[3], DIN_AES_DOUT);
+ set_dout_dlli(&desc[3], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
+ + 2 * AES_KEYSIZE_128),
+ AES_KEYSIZE_128, NS_BIT, 0);
+
+ return 4;
+}
+
+static int hmac_setkey(struct cc_hw_desc *desc, struct cc_aead_ctx *ctx)
+{
+ unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
+ unsigned int digest_ofs = 0;
+ unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
+ DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
+ unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
+ CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
+ struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
+
+ unsigned int idx = 0;
+ int i;
+
+ /* calc derived HMAC key */
+ for (i = 0; i < 2; i++) {
+ /* Load hash initial state */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], hash_mode);
+ set_din_sram(&desc[idx],
+ cc_larval_digest_addr(ctx->drvdata,
+ ctx->auth_mode),
+ digest_size);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load the hash current length*/
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], hash_mode);
+ set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ /* Prepare ipad key */
+ hw_desc_init(&desc[idx]);
+ set_xor_val(&desc[idx], hmac_pad_const[i]);
+ set_cipher_mode(&desc[idx], hash_mode);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+ idx++;
+
+ /* Perform HASH update */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ hmac->padded_authkey_dma_addr,
+ SHA256_BLOCK_SIZE, NS_BIT);
+ set_cipher_mode(&desc[idx], hash_mode);
+ set_xor_active(&desc[idx]);
+ set_flow_mode(&desc[idx], DIN_HASH);
+ idx++;
+
+ /* Get the digset */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], hash_mode);
+ set_dout_dlli(&desc[idx],
+ (hmac->ipad_opad_dma_addr + digest_ofs),
+ digest_size, NS_BIT, 0);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
+ idx++;
+
+ digest_ofs += digest_size;
+ }
+
+ return idx;
+}
+
+static int validate_keys_sizes(struct cc_aead_ctx *ctx)
+{
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ dev_dbg(dev, "enc_keylen=%u authkeylen=%u\n",
+ ctx->enc_keylen, ctx->auth_keylen);
+
+ switch (ctx->auth_mode) {
+ case DRV_HASH_SHA1:
+ case DRV_HASH_SHA256:
+ break;
+ case DRV_HASH_XCBC_MAC:
+ if (ctx->auth_keylen != AES_KEYSIZE_128 &&
+ ctx->auth_keylen != AES_KEYSIZE_192 &&
+ ctx->auth_keylen != AES_KEYSIZE_256)
+ return -ENOTSUPP;
+ break;
+ case DRV_HASH_NULL: /* Not authenc (e.g., CCM) - no auth_key) */
+ if (ctx->auth_keylen > 0)
+ return -EINVAL;
+ break;
+ default:
+ dev_err(dev, "Invalid auth_mode=%d\n", ctx->auth_mode);
+ return -EINVAL;
+ }
+ /* Check cipher key size */
+ if (ctx->flow_mode == S_DIN_to_DES) {
+ if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
+ dev_err(dev, "Invalid cipher(3DES) key size: %u\n",
+ ctx->enc_keylen);
+ return -EINVAL;
+ }
+ } else { /* Default assumed to be AES ciphers */
+ if (ctx->enc_keylen != AES_KEYSIZE_128 &&
+ ctx->enc_keylen != AES_KEYSIZE_192 &&
+ ctx->enc_keylen != AES_KEYSIZE_256) {
+ dev_err(dev, "Invalid cipher(AES) key size: %u\n",
+ ctx->enc_keylen);
+ return -EINVAL;
+ }
+ }
+
+ return 0; /* All tests of keys sizes passed */
+}
+
+/* This function prepers the user key so it can pass to the hmac processing
+ * (copy to intenral buffer or hash in case of key longer than block
+ */
+static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ dma_addr_t key_dma_addr = 0;
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ u32 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->auth_mode);
+ struct cc_crypto_req cc_req = {};
+ unsigned int blocksize;
+ unsigned int digestsize;
+ unsigned int hashmode;
+ unsigned int idx = 0;
+ int rc = 0;
+ struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
+ dma_addr_t padded_authkey_dma_addr =
+ ctx->auth_state.hmac.padded_authkey_dma_addr;
+
+ switch (ctx->auth_mode) { /* auth_key required and >0 */
+ case DRV_HASH_SHA1:
+ blocksize = SHA1_BLOCK_SIZE;
+ digestsize = SHA1_DIGEST_SIZE;
+ hashmode = DRV_HASH_HW_SHA1;
+ break;
+ case DRV_HASH_SHA256:
+ default:
+ blocksize = SHA256_BLOCK_SIZE;
+ digestsize = SHA256_DIGEST_SIZE;
+ hashmode = DRV_HASH_HW_SHA256;
+ }
+
+ if (keylen != 0) {
+ key_dma_addr = dma_map_single(dev, (void *)key, keylen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, key_dma_addr)) {
+ dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
+ key, keylen);
+ return -ENOMEM;
+ }
+ if (keylen > blocksize) {
+ /* Load hash initial state */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], hashmode);
+ set_din_sram(&desc[idx], larval_addr, digestsize);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load the hash current length*/
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], hashmode);
+ set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
+ set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ key_dma_addr, keylen, NS_BIT);
+ set_flow_mode(&desc[idx], DIN_HASH);
+ idx++;
+
+ /* Get hashed key */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], hashmode);
+ set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
+ digestsize, NS_BIT, 0);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
+ set_cipher_config0(&desc[idx],
+ HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+ idx++;
+
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0, (blocksize - digestsize));
+ set_flow_mode(&desc[idx], BYPASS);
+ set_dout_dlli(&desc[idx], (padded_authkey_dma_addr +
+ digestsize), (blocksize - digestsize),
+ NS_BIT, 0);
+ idx++;
+ } else {
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
+ keylen, NS_BIT);
+ set_flow_mode(&desc[idx], BYPASS);
+ set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
+ keylen, NS_BIT, 0);
+ idx++;
+
+ if ((blocksize - keylen) != 0) {
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0,
+ (blocksize - keylen));
+ set_flow_mode(&desc[idx], BYPASS);
+ set_dout_dlli(&desc[idx],
+ (padded_authkey_dma_addr +
+ keylen),
+ (blocksize - keylen), NS_BIT, 0);
+ idx++;
+ }
+ }
+ } else {
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0, (blocksize - keylen));
+ set_flow_mode(&desc[idx], BYPASS);
+ set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
+ blocksize, NS_BIT, 0);
+ idx++;
+ }
+
+ rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
+ if (rc)
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+
+ if (key_dma_addr)
+ dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
+
+ return rc;
+}
+
+static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct rtattr *rta = (struct rtattr *)key;
+ struct cc_crypto_req cc_req = {};
+ struct crypto_authenc_key_param *param;
+ struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
+ int rc = -EINVAL;
+ unsigned int seq_len = 0;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
+ ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
+
+ /* STAT_PHASE_0: Init and sanity checks */
+
+ if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
+ if (!RTA_OK(rta, keylen))
+ goto badkey;
+ if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ goto badkey;
+ if (RTA_PAYLOAD(rta) < sizeof(*param))
+ goto badkey;
+ param = RTA_DATA(rta);
+ ctx->enc_keylen = be32_to_cpu(param->enckeylen);
+ key += RTA_ALIGN(rta->rta_len);
+ keylen -= RTA_ALIGN(rta->rta_len);
+ if (keylen < ctx->enc_keylen)
+ goto badkey;
+ ctx->auth_keylen = keylen - ctx->enc_keylen;
+
+ if (ctx->cipher_mode == DRV_CIPHER_CTR) {
+ /* the nonce is stored in bytes at end of key */
+ if (ctx->enc_keylen <
+ (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
+ goto badkey;
+ /* Copy nonce from last 4 bytes in CTR key to
+ * first 4 bytes in CTR IV
+ */
+ memcpy(ctx->ctr_nonce, key + ctx->auth_keylen +
+ ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE,
+ CTR_RFC3686_NONCE_SIZE);
+ /* Set CTR key size */
+ ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
+ }
+ } else { /* non-authenc - has just one key */
+ ctx->enc_keylen = keylen;
+ ctx->auth_keylen = 0;
+ }
+
+ rc = validate_keys_sizes(ctx);
+ if (rc)
+ goto badkey;
+
+ /* STAT_PHASE_1: Copy key to ctx */
+
+ /* Get key material */
+ memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen);
+ if (ctx->enc_keylen == 24)
+ memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
+ if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
+ memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen);
+ } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
+ rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen);
+ if (rc)
+ goto badkey;
+ }
+
+ /* STAT_PHASE_2: Create sequence */
+
+ switch (ctx->auth_mode) {
+ case DRV_HASH_SHA1:
+ case DRV_HASH_SHA256:
+ seq_len = hmac_setkey(desc, ctx);
+ break;
+ case DRV_HASH_XCBC_MAC:
+ seq_len = xcbc_setkey(desc, ctx);
+ break;
+ case DRV_HASH_NULL: /* non-authenc modes, e.g., CCM */
+ break; /* No auth. key setup */
+ default:
+ dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
+ rc = -ENOTSUPP;
+ goto badkey;
+ }
+
+ /* STAT_PHASE_3: Submit sequence to HW */
+
+ if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
+ rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, seq_len);
+ if (rc) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ goto setkey_error;
+ }
+ }
+
+ /* Update STAT_PHASE_3 */
+ return rc;
+
+badkey:
+ crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+
+setkey_error:
+ return rc;
+}
+
+static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ if (keylen < 3)
+ return -EINVAL;
+
+ keylen -= 3;
+ memcpy(ctx->ctr_nonce, key + keylen, 3);
+
+ return cc_aead_setkey(tfm, key, keylen);
+}
+
+static int cc_aead_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ /* Unsupported auth. sizes */
+ if (authsize == 0 ||
+ authsize > crypto_aead_maxauthsize(authenc)) {
+ return -ENOTSUPP;
+ }
+
+ ctx->authsize = authsize;
+ dev_dbg(dev, "authlen=%d\n", ctx->authsize);
+
+ return 0;
+}
+
+static int cc_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ switch (authsize) {
+ case 8:
+ case 12:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return cc_aead_setauthsize(authenc, authsize);
+}
+
+static int cc_ccm_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ switch (authsize) {
+ case 4:
+ case 6:
+ case 8:
+ case 10:
+ case 12:
+ case 14:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return cc_aead_setauthsize(authenc, authsize);
+}
+
+static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
+ struct cc_hw_desc desc[], unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
+ enum cc_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
+ unsigned int idx = *seq_size;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ switch (assoc_dma_type) {
+ case CC_DMA_BUF_DLLI:
+ dev_dbg(dev, "ASSOC buffer type DLLI\n");
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
+ areq->assoclen, NS_BIT);
+ set_flow_mode(&desc[idx], flow_mode);
+ if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
+ areq_ctx->cryptlen > 0)
+ set_din_not_last_indication(&desc[idx]);
+ break;
+ case CC_DMA_BUF_MLLI:
+ dev_dbg(dev, "ASSOC buffer type MLLI\n");
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_MLLI, areq_ctx->assoc.sram_addr,
+ areq_ctx->assoc.mlli_nents, NS_BIT);
+ set_flow_mode(&desc[idx], flow_mode);
+ if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
+ areq_ctx->cryptlen > 0)
+ set_din_not_last_indication(&desc[idx]);
+ break;
+ case CC_DMA_BUF_NULL:
+ default:
+ dev_err(dev, "Invalid ASSOC buffer type\n");
+ }
+
+ *seq_size = (++idx);
+}
+
+static void cc_proc_authen_desc(struct aead_request *areq,
+ unsigned int flow_mode,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size, int direct)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
+ enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
+ unsigned int idx = *seq_size;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ switch (data_dma_type) {
+ case CC_DMA_BUF_DLLI:
+ {
+ struct scatterlist *cipher =
+ (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+ areq_ctx->dst_sgl : areq_ctx->src_sgl;
+
+ unsigned int offset =
+ (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+ areq_ctx->dst_offset : areq_ctx->src_offset;
+ dev_dbg(dev, "AUTHENC: SRC/DST buffer type DLLI\n");
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ (sg_dma_address(cipher) + offset),
+ areq_ctx->cryptlen, NS_BIT);
+ set_flow_mode(&desc[idx], flow_mode);
+ break;
+ }
+ case CC_DMA_BUF_MLLI:
+ {
+ /* DOUBLE-PASS flow (as default)
+ * assoc. + iv + data -compact in one table
+ * if assoclen is ZERO only IV perform
+ */
+ cc_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
+ u32 mlli_nents = areq_ctx->assoc.mlli_nents;
+
+ if (areq_ctx->is_single_pass) {
+ if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
+ mlli_addr = areq_ctx->dst.sram_addr;
+ mlli_nents = areq_ctx->dst.mlli_nents;
+ } else {
+ mlli_addr = areq_ctx->src.sram_addr;
+ mlli_nents = areq_ctx->src.mlli_nents;
+ }
+ }
+
+ dev_dbg(dev, "AUTHENC: SRC/DST buffer type MLLI\n");
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_MLLI, mlli_addr, mlli_nents,
+ NS_BIT);
+ set_flow_mode(&desc[idx], flow_mode);
+ break;
+ }
+ case CC_DMA_BUF_NULL:
+ default:
+ dev_err(dev, "AUTHENC: Invalid SRC/DST buffer type\n");
+ }
+
+ *seq_size = (++idx);
+}
+
+static void cc_proc_cipher_desc(struct aead_request *areq,
+ unsigned int flow_mode,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ unsigned int idx = *seq_size;
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
+ enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ if (areq_ctx->cryptlen == 0)
+ return; /*null processing*/
+
+ switch (data_dma_type) {
+ case CC_DMA_BUF_DLLI:
+ dev_dbg(dev, "CIPHER: SRC/DST buffer type DLLI\n");
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ (sg_dma_address(areq_ctx->src_sgl) +
+ areq_ctx->src_offset), areq_ctx->cryptlen,
+ NS_BIT);
+ set_dout_dlli(&desc[idx],
+ (sg_dma_address(areq_ctx->dst_sgl) +
+ areq_ctx->dst_offset),
+ areq_ctx->cryptlen, NS_BIT, 0);
+ set_flow_mode(&desc[idx], flow_mode);
+ break;
+ case CC_DMA_BUF_MLLI:
+ dev_dbg(dev, "CIPHER: SRC/DST buffer type MLLI\n");
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_MLLI, areq_ctx->src.sram_addr,
+ areq_ctx->src.mlli_nents, NS_BIT);
+ set_dout_mlli(&desc[idx], areq_ctx->dst.sram_addr,
+ areq_ctx->dst.mlli_nents, NS_BIT, 0);
+ set_flow_mode(&desc[idx], flow_mode);
+ break;
+ case CC_DMA_BUF_NULL:
+ default:
+ dev_err(dev, "CIPHER: Invalid SRC/DST buffer type\n");
+ }
+
+ *seq_size = (++idx);
+}
+
+static void cc_proc_digest_desc(struct aead_request *req,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ unsigned int idx = *seq_size;
+ unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
+ DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
+ int direct = req_ctx->gen_ctx.op_type;
+
+ /* Get final ICV result */
+ if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
+ hw_desc_init(&desc[idx]);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_dout_dlli(&desc[idx], req_ctx->icv_dma_addr, ctx->authsize,
+ NS_BIT, 1);
+ set_queue_last_ind(ctx->drvdata, &desc[idx]);
+ if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
+ set_aes_not_hash_mode(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ } else {
+ set_cipher_config0(&desc[idx],
+ HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+ set_cipher_mode(&desc[idx], hash_mode);
+ }
+ } else { /*Decrypt*/
+ /* Get ICV out from hardware */
+ hw_desc_init(&desc[idx]);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr,
+ ctx->authsize, NS_BIT, 1);
+ set_queue_last_ind(ctx->drvdata, &desc[idx]);
+ set_cipher_config0(&desc[idx],
+ HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+ set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
+ if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
+ set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ set_aes_not_hash_mode(&desc[idx]);
+ } else {
+ set_cipher_mode(&desc[idx], hash_mode);
+ }
+ }
+
+ *seq_size = (++idx);
+}
+
+static void cc_set_cipher_desc(struct aead_request *req,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ unsigned int hw_iv_size = req_ctx->hw_iv_size;
+ unsigned int idx = *seq_size;
+ int direct = req_ctx->gen_ctx.op_type;
+
+ /* Setup cipher state */
+ hw_desc_init(&desc[idx]);
+ set_cipher_config0(&desc[idx], direct);
+ set_flow_mode(&desc[idx], ctx->flow_mode);
+ set_din_type(&desc[idx], DMA_DLLI, req_ctx->gen_ctx.iv_dma_addr,
+ hw_iv_size, NS_BIT);
+ if (ctx->cipher_mode == DRV_CIPHER_CTR)
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+ else
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ set_cipher_mode(&desc[idx], ctx->cipher_mode);
+ idx++;
+
+ /* Setup enc. key */
+ hw_desc_init(&desc[idx]);
+ set_cipher_config0(&desc[idx], direct);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ set_flow_mode(&desc[idx], ctx->flow_mode);
+ if (ctx->flow_mode == S_DIN_to_AES) {
+ set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
+ ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
+ ctx->enc_keylen), NS_BIT);
+ set_key_size_aes(&desc[idx], ctx->enc_keylen);
+ } else {
+ set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
+ ctx->enc_keylen, NS_BIT);
+ set_key_size_des(&desc[idx], ctx->enc_keylen);
+ }
+ set_cipher_mode(&desc[idx], ctx->cipher_mode);
+ idx++;
+
+ *seq_size = idx;
+}
+
+static void cc_proc_cipher(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size, unsigned int data_flow_mode)
+{
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ int direct = req_ctx->gen_ctx.op_type;
+ unsigned int idx = *seq_size;
+
+ if (req_ctx->cryptlen == 0)
+ return; /*null processing*/
+
+ cc_set_cipher_desc(req, desc, &idx);
+ cc_proc_cipher_desc(req, data_flow_mode, desc, &idx);
+ if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
+ /* We must wait for DMA to write all cipher */
+ hw_desc_init(&desc[idx]);
+ set_din_no_dma(&desc[idx], 0, 0xfffff0);
+ set_dout_no_dma(&desc[idx], 0, 0, 1);
+ idx++;
+ }
+
+ *seq_size = idx;
+}
+
+static void cc_set_hmac_desc(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
+ DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
+ unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
+ CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
+ unsigned int idx = *seq_size;
+
+ /* Loading hash ipad xor key state */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], hash_mode);
+ set_din_type(&desc[idx], DMA_DLLI,
+ ctx->auth_state.hmac.ipad_opad_dma_addr, digest_size,
+ NS_BIT);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load init. digest len (64 bytes) */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], hash_mode);
+ set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
+ ctx->drvdata->hash_len_sz);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ *seq_size = idx;
+}
+
+static void cc_set_xcbc_desc(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ unsigned int idx = *seq_size;
+
+ /* Loading MAC state */
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0, CC_AES_BLOCK_SIZE);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_aes_not_hash_mode(&desc[idx]);
+ idx++;
+
+ /* Setup XCBC MAC K1 */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ ctx->auth_state.xcbc.xcbc_keys_dma_addr,
+ AES_KEYSIZE_128, NS_BIT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_aes_not_hash_mode(&desc[idx]);
+ idx++;
+
+ /* Setup XCBC MAC K2 */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
+ AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_aes_not_hash_mode(&desc[idx]);
+ idx++;
+
+ /* Setup XCBC MAC K3 */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
+ 2 * AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_aes_not_hash_mode(&desc[idx]);
+ idx++;
+
+ *seq_size = idx;
+}
+
+static void cc_proc_header_desc(struct aead_request *req,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ unsigned int idx = *seq_size;
+ /* Hash associated data */
+ if (req->assoclen > 0)
+ cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
+
+ /* Hash IV */
+ *seq_size = idx;
+}
+
+static void cc_proc_scheme_desc(struct aead_request *req,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_handle *aead_handle = ctx->drvdata->aead_handle;
+ unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
+ DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
+ unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
+ CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
+ unsigned int idx = *seq_size;
+
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], hash_mode);
+ set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
+ ctx->drvdata->hash_len_sz);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
+ set_cipher_do(&desc[idx], DO_PAD);
+ idx++;
+
+ /* Get final ICV result */
+ hw_desc_init(&desc[idx]);
+ set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
+ digest_size);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+ set_cipher_mode(&desc[idx], hash_mode);
+ idx++;
+
+ /* Loading hash opad xor key state */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], hash_mode);
+ set_din_type(&desc[idx], DMA_DLLI,
+ (ctx->auth_state.hmac.ipad_opad_dma_addr + digest_size),
+ digest_size, NS_BIT);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load init. digest len (64 bytes) */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], hash_mode);
+ set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
+ ctx->drvdata->hash_len_sz);
+ set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ /* Perform HASH update */
+ hw_desc_init(&desc[idx]);
+ set_din_sram(&desc[idx], aead_handle->sram_workspace_addr,
+ digest_size);
+ set_flow_mode(&desc[idx], DIN_HASH);
+ idx++;
+
+ *seq_size = idx;
+}
+
+static void cc_mlli_to_sram(struct aead_request *req,
+ struct cc_hw_desc desc[], unsigned int *seq_size)
+{
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ if (req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
+ req_ctx->data_buff_type == CC_DMA_BUF_MLLI ||
+ !req_ctx->is_single_pass) {
+ dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
+ (unsigned int)ctx->drvdata->mlli_sram_addr,
+ req_ctx->mlli_params.mlli_len);
+ /* Copy MLLI table host-to-sram */
+ hw_desc_init(&desc[*seq_size]);
+ set_din_type(&desc[*seq_size], DMA_DLLI,
+ req_ctx->mlli_params.mlli_dma_addr,
+ req_ctx->mlli_params.mlli_len, NS_BIT);
+ set_dout_sram(&desc[*seq_size],
+ ctx->drvdata->mlli_sram_addr,
+ req_ctx->mlli_params.mlli_len);
+ set_flow_mode(&desc[*seq_size], BYPASS);
+ (*seq_size)++;
+ }
+}
+
+static enum cc_flow_mode cc_get_data_flow(enum drv_crypto_direction direct,
+ enum cc_flow_mode setup_flow_mode,
+ bool is_single_pass)
+{
+ enum cc_flow_mode data_flow_mode;
+
+ if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
+ if (setup_flow_mode == S_DIN_to_AES)
+ data_flow_mode = is_single_pass ?
+ AES_to_HASH_and_DOUT : DIN_AES_DOUT;
+ else
+ data_flow_mode = is_single_pass ?
+ DES_to_HASH_and_DOUT : DIN_DES_DOUT;
+ } else { /* Decrypt */
+ if (setup_flow_mode == S_DIN_to_AES)
+ data_flow_mode = is_single_pass ?
+ AES_and_HASH : DIN_AES_DOUT;
+ else
+ data_flow_mode = is_single_pass ?
+ DES_and_HASH : DIN_DES_DOUT;
+ }
+
+ return data_flow_mode;
+}
+
+static void cc_hmac_authenc(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ int direct = req_ctx->gen_ctx.op_type;
+ unsigned int data_flow_mode =
+ cc_get_data_flow(direct, ctx->flow_mode,
+ req_ctx->is_single_pass);
+
+ if (req_ctx->is_single_pass) {
+ /**
+ * Single-pass flow
+ */
+ cc_set_hmac_desc(req, desc, seq_size);
+ cc_set_cipher_desc(req, desc, seq_size);
+ cc_proc_header_desc(req, desc, seq_size);
+ cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
+ cc_proc_scheme_desc(req, desc, seq_size);
+ cc_proc_digest_desc(req, desc, seq_size);
+ return;
+ }
+
+ /**
+ * Double-pass flow
+ * Fallback for unsupported single-pass modes,
+ * i.e. using assoc. data of non-word-multiple
+ */
+ if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
+ /* encrypt first.. */
+ cc_proc_cipher(req, desc, seq_size, data_flow_mode);
+ /* authenc after..*/
+ cc_set_hmac_desc(req, desc, seq_size);
+ cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
+ cc_proc_scheme_desc(req, desc, seq_size);
+ cc_proc_digest_desc(req, desc, seq_size);
+
+ } else { /*DECRYPT*/
+ /* authenc first..*/
+ cc_set_hmac_desc(req, desc, seq_size);
+ cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
+ cc_proc_scheme_desc(req, desc, seq_size);
+ /* decrypt after.. */
+ cc_proc_cipher(req, desc, seq_size, data_flow_mode);
+ /* read the digest result with setting the completion bit
+ * must be after the cipher operation
+ */
+ cc_proc_digest_desc(req, desc, seq_size);
+ }
+}
+
+static void
+cc_xcbc_authenc(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ int direct = req_ctx->gen_ctx.op_type;
+ unsigned int data_flow_mode =
+ cc_get_data_flow(direct, ctx->flow_mode,
+ req_ctx->is_single_pass);
+
+ if (req_ctx->is_single_pass) {
+ /**
+ * Single-pass flow
+ */
+ cc_set_xcbc_desc(req, desc, seq_size);
+ cc_set_cipher_desc(req, desc, seq_size);
+ cc_proc_header_desc(req, desc, seq_size);
+ cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
+ cc_proc_digest_desc(req, desc, seq_size);
+ return;
+ }
+
+ /**
+ * Double-pass flow
+ * Fallback for unsupported single-pass modes,
+ * i.e. using assoc. data of non-word-multiple
+ */
+ if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
+ /* encrypt first.. */
+ cc_proc_cipher(req, desc, seq_size, data_flow_mode);
+ /* authenc after.. */
+ cc_set_xcbc_desc(req, desc, seq_size);
+ cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
+ cc_proc_digest_desc(req, desc, seq_size);
+ } else { /*DECRYPT*/
+ /* authenc first.. */
+ cc_set_xcbc_desc(req, desc, seq_size);
+ cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
+ /* decrypt after..*/
+ cc_proc_cipher(req, desc, seq_size, data_flow_mode);
+ /* read the digest result with setting the completion bit
+ * must be after the cipher operation
+ */
+ cc_proc_digest_desc(req, desc, seq_size);
+ }
+}
+
+static int validate_data_size(struct cc_aead_ctx *ctx,
+ enum drv_crypto_direction direct,
+ struct aead_request *req)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ unsigned int assoclen = req->assoclen;
+ unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
+ (req->cryptlen - ctx->authsize) : req->cryptlen;
+
+ if (direct == DRV_CRYPTO_DIRECTION_DECRYPT &&
+ req->cryptlen < ctx->authsize)
+ goto data_size_err;
+
+ areq_ctx->is_single_pass = true; /*defaulted to fast flow*/
+
+ switch (ctx->flow_mode) {
+ case S_DIN_to_AES:
+ if (ctx->cipher_mode == DRV_CIPHER_CBC &&
+ !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE))
+ goto data_size_err;
+ if (ctx->cipher_mode == DRV_CIPHER_CCM)
+ break;
+ if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
+ if (areq_ctx->plaintext_authenticate_only)
+ areq_ctx->is_single_pass = false;
+ break;
+ }
+
+ if (!IS_ALIGNED(assoclen, sizeof(u32)))
+ areq_ctx->is_single_pass = false;
+
+ if (ctx->cipher_mode == DRV_CIPHER_CTR &&
+ !IS_ALIGNED(cipherlen, sizeof(u32)))
+ areq_ctx->is_single_pass = false;
+
+ break;
+ case S_DIN_to_DES:
+ if (!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE))
+ goto data_size_err;
+ if (!IS_ALIGNED(assoclen, DES_BLOCK_SIZE))
+ areq_ctx->is_single_pass = false;
+ break;
+ default:
+ dev_err(dev, "Unexpected flow mode (%d)\n", ctx->flow_mode);
+ goto data_size_err;
+ }
+
+ return 0;
+
+data_size_err:
+ return -EINVAL;
+}
+
+static unsigned int format_ccm_a0(u8 *pa0_buff, u32 header_size)
+{
+ unsigned int len = 0;
+
+ if (header_size == 0)
+ return 0;
+
+ if (header_size < ((1UL << 16) - (1UL << 8))) {
+ len = 2;
+
+ pa0_buff[0] = (header_size >> 8) & 0xFF;
+ pa0_buff[1] = header_size & 0xFF;
+ } else {
+ len = 6;
+
+ pa0_buff[0] = 0xFF;
+ pa0_buff[1] = 0xFE;
+ pa0_buff[2] = (header_size >> 24) & 0xFF;
+ pa0_buff[3] = (header_size >> 16) & 0xFF;
+ pa0_buff[4] = (header_size >> 8) & 0xFF;
+ pa0_buff[5] = header_size & 0xFF;
+ }
+
+ return len;
+}
+
+static int set_msg_len(u8 *block, unsigned int msglen, unsigned int csize)
+{
+ __be32 data;
+
+ memset(block, 0, csize);
+ block += csize;
+
+ if (csize >= 4)
+ csize = 4;
+ else if (msglen > (1 << (8 * csize)))
+ return -EOVERFLOW;
+
+ data = cpu_to_be32(msglen);
+ memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
+
+ return 0;
+}
+
+static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ unsigned int idx = *seq_size;
+ unsigned int cipher_flow_mode;
+ dma_addr_t mac_result;
+
+ if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ cipher_flow_mode = AES_to_HASH_and_DOUT;
+ mac_result = req_ctx->mac_buf_dma_addr;
+ } else { /* Encrypt */
+ cipher_flow_mode = AES_and_HASH;
+ mac_result = req_ctx->icv_dma_addr;
+ }
+
+ /* load key */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
+ set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
+ ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
+ ctx->enc_keylen), NS_BIT);
+ set_key_size_aes(&desc[idx], ctx->enc_keylen);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* load ctr state */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
+ set_key_size_aes(&desc[idx], ctx->enc_keylen);
+ set_din_type(&desc[idx], DMA_DLLI,
+ req_ctx->gen_ctx.iv_dma_addr, AES_BLOCK_SIZE, NS_BIT);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* load MAC key */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
+ set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
+ ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
+ ctx->enc_keylen), NS_BIT);
+ set_key_size_aes(&desc[idx], ctx->enc_keylen);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_aes_not_hash_mode(&desc[idx]);
+ idx++;
+
+ /* load MAC state */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
+ set_key_size_aes(&desc[idx], ctx->enc_keylen);
+ set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
+ AES_BLOCK_SIZE, NS_BIT);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_aes_not_hash_mode(&desc[idx]);
+ idx++;
+
+ /* process assoc data */
+ if (req->assoclen > 0) {
+ cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
+ } else {
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ sg_dma_address(&req_ctx->ccm_adata_sg),
+ AES_BLOCK_SIZE + req_ctx->ccm_hdr_size, NS_BIT);
+ set_flow_mode(&desc[idx], DIN_HASH);
+ idx++;
+ }
+
+ /* process the cipher */
+ if (req_ctx->cryptlen)
+ cc_proc_cipher_desc(req, cipher_flow_mode, desc, &idx);
+
+ /* Read temporal MAC */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
+ set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, ctx->authsize,
+ NS_BIT, 0);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_aes_not_hash_mode(&desc[idx]);
+ idx++;
+
+ /* load AES-CTR state (for last MAC calculation)*/
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
+ set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+ set_din_type(&desc[idx], DMA_DLLI, req_ctx->ccm_iv0_dma_addr,
+ AES_BLOCK_SIZE, NS_BIT);
+ set_key_size_aes(&desc[idx], ctx->enc_keylen);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ hw_desc_init(&desc[idx]);
+ set_din_no_dma(&desc[idx], 0, 0xfffff0);
+ set_dout_no_dma(&desc[idx], 0, 0, 1);
+ idx++;
+
+ /* encrypt the "T" value and store MAC in mac_state */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
+ ctx->authsize, NS_BIT);
+ set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
+ set_queue_last_ind(ctx->drvdata, &desc[idx]);
+ set_flow_mode(&desc[idx], DIN_AES_DOUT);
+ idx++;
+
+ *seq_size = idx;
+ return 0;
+}
+
+static int config_ccm_adata(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ //unsigned int size_of_a = 0, rem_a_size = 0;
+ unsigned int lp = req->iv[0];
+ /* Note: The code assume that req->iv[0] already contains the value
+ * of L' of RFC3610
+ */
+ unsigned int l = lp + 1; /* This is L' of RFC 3610. */
+ unsigned int m = ctx->authsize; /* This is M' of RFC 3610. */
+ u8 *b0 = req_ctx->ccm_config + CCM_B0_OFFSET;
+ u8 *a0 = req_ctx->ccm_config + CCM_A0_OFFSET;
+ u8 *ctr_count_0 = req_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
+ unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
+ DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+ req->cryptlen :
+ (req->cryptlen - ctx->authsize);
+ int rc;
+
+ memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
+ memset(req_ctx->ccm_config, 0, AES_BLOCK_SIZE * 3);
+
+ /* taken from crypto/ccm.c */
+ /* 2 <= L <= 8, so 1 <= L' <= 7. */
+ if (l < 2 || l > 8) {
+ dev_err(dev, "illegal iv value %X\n", req->iv[0]);
+ return -EINVAL;
+ }
+ memcpy(b0, req->iv, AES_BLOCK_SIZE);
+
+ /* format control info per RFC 3610 and
+ * NIST Special Publication 800-38C
+ */
+ *b0 |= (8 * ((m - 2) / 2));
+ if (req->assoclen > 0)
+ *b0 |= 64; /* Enable bit 6 if Adata exists. */
+
+ rc = set_msg_len(b0 + 16 - l, cryptlen, l); /* Write L'. */
+ if (rc) {
+ dev_err(dev, "message len overflow detected");
+ return rc;
+ }
+ /* END of "taken from crypto/ccm.c" */
+
+ /* l(a) - size of associated data. */
+ req_ctx->ccm_hdr_size = format_ccm_a0(a0, req->assoclen);
+
+ memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
+ req->iv[15] = 1;
+
+ memcpy(ctr_count_0, req->iv, AES_BLOCK_SIZE);
+ ctr_count_0[15] = 0;
+
+ return 0;
+}
+
+static void cc_proc_rfc4309_ccm(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+
+ /* L' */
+ memset(areq_ctx->ctr_iv, 0, AES_BLOCK_SIZE);
+ /* For RFC 4309, always use 4 bytes for message length
+ * (at most 2^32-1 bytes).
+ */
+ areq_ctx->ctr_iv[0] = 3;
+
+ /* In RFC 4309 there is an 11-bytes nonce+IV part,
+ * that we build here.
+ */
+ memcpy(areq_ctx->ctr_iv + CCM_BLOCK_NONCE_OFFSET, ctx->ctr_nonce,
+ CCM_BLOCK_NONCE_SIZE);
+ memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
+ CCM_BLOCK_IV_SIZE);
+ req->iv = areq_ctx->ctr_iv;
+ req->assoclen -= CCM_BLOCK_IV_SIZE;
+}
+
+static void cc_set_ghash_desc(struct aead_request *req,
+ struct cc_hw_desc desc[], unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ unsigned int idx = *seq_size;
+
+ /* load key to AES*/
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
+ set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+ set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
+ ctx->enc_keylen, NS_BIT);
+ set_key_size_aes(&desc[idx], ctx->enc_keylen);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* process one zero block to generate hkey */
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
+ set_dout_dlli(&desc[idx], req_ctx->hkey_dma_addr, AES_BLOCK_SIZE,
+ NS_BIT, 0);
+ set_flow_mode(&desc[idx], DIN_AES_DOUT);
+ idx++;
+
+ /* Memory Barrier */
+ hw_desc_init(&desc[idx]);
+ set_din_no_dma(&desc[idx], 0, 0xfffff0);
+ set_dout_no_dma(&desc[idx], 0, 0, 1);
+ idx++;
+
+ /* Load GHASH subkey */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, req_ctx->hkey_dma_addr,
+ AES_BLOCK_SIZE, NS_BIT);
+ set_dout_no_dma(&desc[idx], 0, 0, 1);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_aes_not_hash_mode(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
+ set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ /* Configure Hash Engine to work with GHASH.
+ * Since it was not possible to extend HASH submodes to add GHASH,
+ * The following command is necessary in order to
+ * select GHASH (according to HW designers)
+ */
+ hw_desc_init(&desc[idx]);
+ set_din_no_dma(&desc[idx], 0, 0xfffff0);
+ set_dout_no_dma(&desc[idx], 0, 0, 1);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_aes_not_hash_mode(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
+ set_cipher_do(&desc[idx], 1); //1=AES_SK RKEK
+ set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+ set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ /* Load GHASH initial STATE (which is 0). (for any hash there is an
+ * initial state)
+ */
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
+ set_dout_no_dma(&desc[idx], 0, 0, 1);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_aes_not_hash_mode(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
+ set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ *seq_size = idx;
+}
+
+static void cc_set_gctr_desc(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ unsigned int idx = *seq_size;
+
+ /* load key to AES*/
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
+ set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+ set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
+ ctx->enc_keylen, NS_BIT);
+ set_key_size_aes(&desc[idx], ctx->enc_keylen);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ if (req_ctx->cryptlen && !req_ctx->plaintext_authenticate_only) {
+ /* load AES/CTR initial CTR value inc by 2*/
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
+ set_key_size_aes(&desc[idx], ctx->enc_keylen);
+ set_din_type(&desc[idx], DMA_DLLI,
+ req_ctx->gcm_iv_inc2_dma_addr, AES_BLOCK_SIZE,
+ NS_BIT);
+ set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+ }
+
+ *seq_size = idx;
+}
+
+static void cc_proc_gcm_result(struct aead_request *req,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ dma_addr_t mac_result;
+ unsigned int idx = *seq_size;
+
+ if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ mac_result = req_ctx->mac_buf_dma_addr;
+ } else { /* Encrypt */
+ mac_result = req_ctx->icv_dma_addr;
+ }
+
+ /* process(ghash) gcm_block_len */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_block_len_dma_addr,
+ AES_BLOCK_SIZE, NS_BIT);
+ set_flow_mode(&desc[idx], DIN_HASH);
+ idx++;
+
+ /* Store GHASH state after GHASH(Associated Data + Cipher +LenBlock) */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
+ set_din_no_dma(&desc[idx], 0, 0xfffff0);
+ set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, AES_BLOCK_SIZE,
+ NS_BIT, 0);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_aes_not_hash_mode(&desc[idx]);
+
+ idx++;
+
+ /* load AES/CTR initial CTR value inc by 1*/
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
+ set_key_size_aes(&desc[idx], ctx->enc_keylen);
+ set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_iv_inc1_dma_addr,
+ AES_BLOCK_SIZE, NS_BIT);
+ set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* Memory Barrier */
+ hw_desc_init(&desc[idx]);
+ set_din_no_dma(&desc[idx], 0, 0xfffff0);
+ set_dout_no_dma(&desc[idx], 0, 0, 1);
+ idx++;
+
+ /* process GCTR on stored GHASH and store MAC in mac_state*/
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
+ set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
+ AES_BLOCK_SIZE, NS_BIT);
+ set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
+ set_queue_last_ind(ctx->drvdata, &desc[idx]);
+ set_flow_mode(&desc[idx], DIN_AES_DOUT);
+ idx++;
+
+ *seq_size = idx;
+}
+
+static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ unsigned int cipher_flow_mode;
+
+ if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ cipher_flow_mode = AES_and_HASH;
+ } else { /* Encrypt */
+ cipher_flow_mode = AES_to_HASH_and_DOUT;
+ }
+
+ //in RFC4543 no data to encrypt. just copy data from src to dest.
+ if (req_ctx->plaintext_authenticate_only) {
+ cc_proc_cipher_desc(req, BYPASS, desc, seq_size);
+ cc_set_ghash_desc(req, desc, seq_size);
+ /* process(ghash) assoc data */
+ cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
+ cc_set_gctr_desc(req, desc, seq_size);
+ cc_proc_gcm_result(req, desc, seq_size);
+ return 0;
+ }
+
+ // for gcm and rfc4106.
+ cc_set_ghash_desc(req, desc, seq_size);
+ /* process(ghash) assoc data */
+ if (req->assoclen > 0)
+ cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
+ cc_set_gctr_desc(req, desc, seq_size);
+ /* process(gctr+ghash) */
+ if (req_ctx->cryptlen)
+ cc_proc_cipher_desc(req, cipher_flow_mode, desc, seq_size);
+ cc_proc_gcm_result(req, desc, seq_size);
+
+ return 0;
+}
+
+static int config_gcm_context(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
+ DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+ req->cryptlen :
+ (req->cryptlen - ctx->authsize);
+ __be32 counter = cpu_to_be32(2);
+
+ dev_dbg(dev, "%s() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n",
+ __func__, cryptlen, req->assoclen, ctx->authsize);
+
+ memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
+
+ memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
+
+ memcpy(req->iv + 12, &counter, 4);
+ memcpy(req_ctx->gcm_iv_inc2, req->iv, 16);
+
+ counter = cpu_to_be32(1);
+ memcpy(req->iv + 12, &counter, 4);
+ memcpy(req_ctx->gcm_iv_inc1, req->iv, 16);
+
+ if (!req_ctx->plaintext_authenticate_only) {
+ __be64 temp64;
+
+ temp64 = cpu_to_be64(req->assoclen * 8);
+ memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
+ temp64 = cpu_to_be64(cryptlen * 8);
+ memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
+ } else {
+ /* rfc4543=> all data(AAD,IV,Plain) are considered additional
+ * data that is nothing is encrypted.
+ */
+ __be64 temp64;
+
+ temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE +
+ cryptlen) * 8);
+ memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
+ temp64 = 0;
+ memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
+ }
+
+ return 0;
+}
+
+static void cc_proc_rfc4_gcm(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+
+ memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET,
+ ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE);
+ memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
+ GCM_BLOCK_RFC4_IV_SIZE);
+ req->iv = areq_ctx->ctr_iv;
+ req->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
+}
+
+static int cc_proc_aead(struct aead_request *req,
+ enum drv_crypto_direction direct)
+{
+ int rc = 0;
+ int seq_len = 0;
+ struct cc_hw_desc desc[MAX_AEAD_PROCESS_SEQ];
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ struct cc_crypto_req cc_req = {};
+
+ dev_dbg(dev, "%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
+ ((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Enc" : "Dec"),
+ ctx, req, req->iv, sg_virt(req->src), req->src->offset,
+ sg_virt(req->dst), req->dst->offset, req->cryptlen);
+
+ /* STAT_PHASE_0: Init and sanity checks */
+
+ /* Check data length according to mode */
+ if (validate_data_size(ctx, direct, req)) {
+ dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
+ req->cryptlen, req->assoclen);
+ crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
+ return -EINVAL;
+ }
+
+ /* Setup request structure */
+ cc_req.user_cb = (void *)cc_aead_complete;
+ cc_req.user_arg = (void *)req;
+
+ /* Setup request context */
+ areq_ctx->gen_ctx.op_type = direct;
+ areq_ctx->req_authsize = ctx->authsize;
+ areq_ctx->cipher_mode = ctx->cipher_mode;
+
+ /* STAT_PHASE_1: Map buffers */
+
+ if (ctx->cipher_mode == DRV_CIPHER_CTR) {
+ /* Build CTR IV - Copy nonce from last 4 bytes in
+ * CTR key to first 4 bytes in CTR IV
+ */
+ memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce,
+ CTR_RFC3686_NONCE_SIZE);
+ if (!areq_ctx->backup_giv) /*User none-generated IV*/
+ memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE,
+ req->iv, CTR_RFC3686_IV_SIZE);
+ /* Initialize counter portion of counter block */
+ *(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE +
+ CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
+
+ /* Replace with counter iv */
+ req->iv = areq_ctx->ctr_iv;
+ areq_ctx->hw_iv_size = CTR_RFC3686_BLOCK_SIZE;
+ } else if ((ctx->cipher_mode == DRV_CIPHER_CCM) ||
+ (ctx->cipher_mode == DRV_CIPHER_GCTR)) {
+ areq_ctx->hw_iv_size = AES_BLOCK_SIZE;
+ if (areq_ctx->ctr_iv != req->iv) {
+ memcpy(areq_ctx->ctr_iv, req->iv,
+ crypto_aead_ivsize(tfm));
+ req->iv = areq_ctx->ctr_iv;
+ }
+ } else {
+ areq_ctx->hw_iv_size = crypto_aead_ivsize(tfm);
+ }
+
+ if (ctx->cipher_mode == DRV_CIPHER_CCM) {
+ rc = config_ccm_adata(req);
+ if (rc) {
+ dev_dbg(dev, "config_ccm_adata() returned with a failure %d!",
+ rc);
+ goto exit;
+ }
+ } else {
+ areq_ctx->ccm_hdr_size = ccm_header_size_null;
+ }
+
+ if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
+ rc = config_gcm_context(req);
+ if (rc) {
+ dev_dbg(dev, "config_gcm_context() returned with a failure %d!",
+ rc);
+ goto exit;
+ }
+ }
+
+ rc = cc_map_aead_request(ctx->drvdata, req);
+ if (rc) {
+ dev_err(dev, "map_request() failed\n");
+ goto exit;
+ }
+
+ /* do we need to generate IV? */
+ if (areq_ctx->backup_giv) {
+ /* set the DMA mapped IV address*/
+ if (ctx->cipher_mode == DRV_CIPHER_CTR) {
+ cc_req.ivgen_dma_addr[0] =
+ areq_ctx->gen_ctx.iv_dma_addr +
+ CTR_RFC3686_NONCE_SIZE;
+ cc_req.ivgen_dma_addr_len = 1;
+ } else if (ctx->cipher_mode == DRV_CIPHER_CCM) {
+ /* In ccm, the IV needs to exist both inside B0 and
+ * inside the counter.It is also copied to iv_dma_addr
+ * for other reasons (like returning it to the user).
+ * So, using 3 (identical) IV outputs.
+ */
+ cc_req.ivgen_dma_addr[0] =
+ areq_ctx->gen_ctx.iv_dma_addr +
+ CCM_BLOCK_IV_OFFSET;
+ cc_req.ivgen_dma_addr[1] =
+ sg_dma_address(&areq_ctx->ccm_adata_sg) +
+ CCM_B0_OFFSET + CCM_BLOCK_IV_OFFSET;
+ cc_req.ivgen_dma_addr[2] =
+ sg_dma_address(&areq_ctx->ccm_adata_sg) +
+ CCM_CTR_COUNT_0_OFFSET + CCM_BLOCK_IV_OFFSET;
+ cc_req.ivgen_dma_addr_len = 3;
+ } else {
+ cc_req.ivgen_dma_addr[0] =
+ areq_ctx->gen_ctx.iv_dma_addr;
+ cc_req.ivgen_dma_addr_len = 1;
+ }
+
+ /* set the IV size (8/16 B long)*/
+ cc_req.ivgen_size = crypto_aead_ivsize(tfm);
+ }
+
+ /* STAT_PHASE_2: Create sequence */
+
+ /* Load MLLI tables to SRAM if necessary */
+ cc_mlli_to_sram(req, desc, &seq_len);
+
+ /*TODO: move seq len by reference */
+ switch (ctx->auth_mode) {
+ case DRV_HASH_SHA1:
+ case DRV_HASH_SHA256:
+ cc_hmac_authenc(req, desc, &seq_len);
+ break;
+ case DRV_HASH_XCBC_MAC:
+ cc_xcbc_authenc(req, desc, &seq_len);
+ break;
+ case DRV_HASH_NULL:
+ if (ctx->cipher_mode == DRV_CIPHER_CCM)
+ cc_ccm(req, desc, &seq_len);
+ if (ctx->cipher_mode == DRV_CIPHER_GCTR)
+ cc_gcm(req, desc, &seq_len);
+ break;
+ default:
+ dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
+ cc_unmap_aead_request(dev, req);
+ rc = -ENOTSUPP;
+ goto exit;
+ }
+
+ /* STAT_PHASE_3: Lock HW and push sequence */
+
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, seq_len, &req->base);
+
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_aead_request(dev, req);
+ }
+
+exit:
+ return rc;
+}
+
+static int cc_aead_encrypt(struct aead_request *req)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ int rc;
+
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
+ areq_ctx->backup_giv = NULL;
+ areq_ctx->is_gcm4543 = false;
+
+ areq_ctx->plaintext_authenticate_only = false;
+
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+ if (rc != -EINPROGRESS && rc != -EBUSY)
+ req->iv = areq_ctx->backup_iv;
+
+ return rc;
+}
+
+static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
+{
+ /* Very similar to cc_aead_encrypt() above. */
+
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ int rc = -EINVAL;
+
+ if (!valid_assoclen(req)) {
+ dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+ goto out;
+ }
+
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
+ areq_ctx->backup_giv = NULL;
+ areq_ctx->is_gcm4543 = true;
+
+ cc_proc_rfc4309_ccm(req);
+
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+ if (rc != -EINPROGRESS && rc != -EBUSY)
+ req->iv = areq_ctx->backup_iv;
+out:
+ return rc;
+}
+
+static int cc_aead_decrypt(struct aead_request *req)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ int rc;
+
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
+ areq_ctx->backup_giv = NULL;
+ areq_ctx->is_gcm4543 = false;
+
+ areq_ctx->plaintext_authenticate_only = false;
+
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+ if (rc != -EINPROGRESS && rc != -EBUSY)
+ req->iv = areq_ctx->backup_iv;
+
+ return rc;
+}
+
+static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ int rc = -EINVAL;
+
+ if (!valid_assoclen(req)) {
+ dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+ goto out;
+ }
+
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
+ areq_ctx->backup_giv = NULL;
+
+ areq_ctx->is_gcm4543 = true;
+ cc_proc_rfc4309_ccm(req);
+
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+ if (rc != -EINPROGRESS && rc != -EBUSY)
+ req->iv = areq_ctx->backup_iv;
+
+out:
+ return rc;
+}
+
+static int cc_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ dev_dbg(dev, "%s() keylen %d, key %p\n", __func__, keylen, key);
+
+ if (keylen < 4)
+ return -EINVAL;
+
+ keylen -= 4;
+ memcpy(ctx->ctr_nonce, key + keylen, 4);
+
+ return cc_aead_setkey(tfm, key, keylen);
+}
+
+static int cc_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ dev_dbg(dev, "%s() keylen %d, key %p\n", __func__, keylen, key);
+
+ if (keylen < 4)
+ return -EINVAL;
+
+ keylen -= 4;
+ memcpy(ctx->ctr_nonce, key + keylen, 4);
+
+ return cc_aead_setkey(tfm, key, keylen);
+}
+
+static int cc_gcm_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ switch (authsize) {
+ case 4:
+ case 8:
+ case 12:
+ case 13:
+ case 14:
+ case 15:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return cc_aead_setauthsize(authenc, authsize);
+}
+
+static int cc_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ dev_dbg(dev, "authsize %d\n", authsize);
+
+ switch (authsize) {
+ case 8:
+ case 12:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return cc_aead_setauthsize(authenc, authsize);
+}
+
+static int cc_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ dev_dbg(dev, "authsize %d\n", authsize);
+
+ if (authsize != 16)
+ return -EINVAL;
+
+ return cc_aead_setauthsize(authenc, authsize);
+}
+
+static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
+{
+ /* Very similar to cc_aead_encrypt() above. */
+
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ int rc = -EINVAL;
+
+ if (!valid_assoclen(req)) {
+ dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+ goto out;
+ }
+
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
+ areq_ctx->backup_giv = NULL;
+
+ areq_ctx->plaintext_authenticate_only = false;
+
+ cc_proc_rfc4_gcm(req);
+ areq_ctx->is_gcm4543 = true;
+
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+ if (rc != -EINPROGRESS && rc != -EBUSY)
+ req->iv = areq_ctx->backup_iv;
+out:
+ return rc;
+}
+
+static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
+{
+ /* Very similar to cc_aead_encrypt() above. */
+
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ int rc;
+
+ //plaintext is not encryped with rfc4543
+ areq_ctx->plaintext_authenticate_only = true;
+
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
+ areq_ctx->backup_giv = NULL;
+
+ cc_proc_rfc4_gcm(req);
+ areq_ctx->is_gcm4543 = true;
+
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+ if (rc != -EINPROGRESS && rc != -EBUSY)
+ req->iv = areq_ctx->backup_iv;
+
+ return rc;
+}
+
+static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
+{
+ /* Very similar to cc_aead_decrypt() above. */
+
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ int rc = -EINVAL;
+
+ if (!valid_assoclen(req)) {
+ dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
+ goto out;
+ }
+
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
+ areq_ctx->backup_giv = NULL;
+
+ areq_ctx->plaintext_authenticate_only = false;
+
+ cc_proc_rfc4_gcm(req);
+ areq_ctx->is_gcm4543 = true;
+
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+ if (rc != -EINPROGRESS && rc != -EBUSY)
+ req->iv = areq_ctx->backup_iv;
+out:
+ return rc;
+}
+
+static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
+{
+ /* Very similar to cc_aead_decrypt() above. */
+
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ int rc;
+
+ //plaintext is not decryped with rfc4543
+ areq_ctx->plaintext_authenticate_only = true;
+
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
+ areq_ctx->backup_giv = NULL;
+
+ cc_proc_rfc4_gcm(req);
+ areq_ctx->is_gcm4543 = true;
+
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+ if (rc != -EINPROGRESS && rc != -EBUSY)
+ req->iv = areq_ctx->backup_iv;
+
+ return rc;
+}
+
+/* aead alg */
+static struct cc_alg_template aead_algs[] = {
+ {
+ .name = "authenc(hmac(sha1),cbc(aes))",
+ .driver_name = "authenc-hmac-sha1-cbc-aes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_SHA1,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "authenc(hmac(sha1),cbc(des3_ede))",
+ .driver_name = "authenc-hmac-sha1-cbc-des3-ccree",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_DES,
+ .auth_mode = DRV_HASH_SHA1,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "authenc(hmac(sha256),cbc(aes))",
+ .driver_name = "authenc-hmac-sha256-cbc-aes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_SHA256,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "authenc(hmac(sha256),cbc(des3_ede))",
+ .driver_name = "authenc-hmac-sha256-cbc-des3-ccree",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_DES,
+ .auth_mode = DRV_HASH_SHA256,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "authenc(xcbc(aes),cbc(aes))",
+ .driver_name = "authenc-xcbc-aes-cbc-aes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_XCBC_MAC,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
+ .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-ccree",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CTR,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_SHA1,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
+ .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-ccree",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CTR,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_SHA256,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "authenc(xcbc(aes),rfc3686(ctr(aes)))",
+ .driver_name = "authenc-xcbc-aes-rfc3686-ctr-aes-ccree",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CTR,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_XCBC_MAC,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "ccm(aes)",
+ .driver_name = "ccm-aes-ccree",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_ccm_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CCM,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_NULL,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "rfc4309(ccm(aes))",
+ .driver_name = "rfc4309-ccm-aes-ccree",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = cc_rfc4309_ccm_setkey,
+ .setauthsize = cc_rfc4309_ccm_setauthsize,
+ .encrypt = cc_rfc4309_ccm_encrypt,
+ .decrypt = cc_rfc4309_ccm_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
+ .ivsize = CCM_BLOCK_IV_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CCM,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_NULL,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "gcm(aes)",
+ .driver_name = "gcm-aes-ccree",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_gcm_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
+ .ivsize = 12,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_GCTR,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_NULL,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "rfc4106(gcm(aes))",
+ .driver_name = "rfc4106-gcm-aes-ccree",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = cc_rfc4106_gcm_setkey,
+ .setauthsize = cc_rfc4106_gcm_setauthsize,
+ .encrypt = cc_rfc4106_gcm_encrypt,
+ .decrypt = cc_rfc4106_gcm_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
+ .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_GCTR,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_NULL,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "rfc4543(gcm(aes))",
+ .driver_name = "rfc4543-gcm-aes-ccree",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = cc_rfc4543_gcm_setkey,
+ .setauthsize = cc_rfc4543_gcm_setauthsize,
+ .encrypt = cc_rfc4543_gcm_encrypt,
+ .decrypt = cc_rfc4543_gcm_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
+ .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_GCTR,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_NULL,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+};
+
+static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
+ struct device *dev)
+{
+ struct cc_crypto_alg *t_alg;
+ struct aead_alg *alg;
+
+ t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
+ if (!t_alg)
+ return ERR_PTR(-ENOMEM);
+
+ alg = &tmpl->template_aead;
+
+ snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
+ snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ tmpl->driver_name);
+ alg->base.cra_module = THIS_MODULE;
+ alg->base.cra_priority = CC_CRA_PRIO;
+
+ alg->base.cra_ctxsize = sizeof(struct cc_aead_ctx);
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
+ tmpl->type;
+ alg->init = cc_aead_init;
+ alg->exit = cc_aead_exit;
+
+ t_alg->aead_alg = *alg;
+
+ t_alg->cipher_mode = tmpl->cipher_mode;
+ t_alg->flow_mode = tmpl->flow_mode;
+ t_alg->auth_mode = tmpl->auth_mode;
+
+ return t_alg;
+}
+
+int cc_aead_free(struct cc_drvdata *drvdata)
+{
+ struct cc_crypto_alg *t_alg, *n;
+ struct cc_aead_handle *aead_handle =
+ (struct cc_aead_handle *)drvdata->aead_handle;
+
+ if (aead_handle) {
+ /* Remove registered algs */
+ list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list,
+ entry) {
+ crypto_unregister_aead(&t_alg->aead_alg);
+ list_del(&t_alg->entry);
+ kfree(t_alg);
+ }
+ kfree(aead_handle);
+ drvdata->aead_handle = NULL;
+ }
+
+ return 0;
+}
+
+int cc_aead_alloc(struct cc_drvdata *drvdata)
+{
+ struct cc_aead_handle *aead_handle;
+ struct cc_crypto_alg *t_alg;
+ int rc = -ENOMEM;
+ int alg;
+ struct device *dev = drvdata_to_dev(drvdata);
+
+ aead_handle = kmalloc(sizeof(*aead_handle), GFP_KERNEL);
+ if (!aead_handle) {
+ rc = -ENOMEM;
+ goto fail0;
+ }
+
+ INIT_LIST_HEAD(&aead_handle->aead_list);
+ drvdata->aead_handle = aead_handle;
+
+ aead_handle->sram_workspace_addr = cc_sram_alloc(drvdata,
+ MAX_HMAC_DIGEST_SIZE);
+
+ if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) {
+ dev_err(dev, "SRAM pool exhausted\n");
+ rc = -ENOMEM;
+ goto fail1;
+ }
+
+ /* Linux crypto */
+ for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) {
+ if (aead_algs[alg].min_hw_rev > drvdata->hw_rev)
+ continue;
+
+ t_alg = cc_create_aead_alg(&aead_algs[alg], dev);
+ if (IS_ERR(t_alg)) {
+ rc = PTR_ERR(t_alg);
+ dev_err(dev, "%s alg allocation failed\n",
+ aead_algs[alg].driver_name);
+ goto fail1;
+ }
+ t_alg->drvdata = drvdata;
+ rc = crypto_register_aead(&t_alg->aead_alg);
+ if (rc) {
+ dev_err(dev, "%s alg registration failed\n",
+ t_alg->aead_alg.base.cra_driver_name);
+ goto fail2;
+ } else {
+ list_add_tail(&t_alg->entry, &aead_handle->aead_list);
+ dev_dbg(dev, "Registered %s\n",
+ t_alg->aead_alg.base.cra_driver_name);
+ }
+ }
+
+ return 0;
+
+fail2:
+ kfree(t_alg);
+fail1:
+ cc_aead_free(drvdata);
+fail0:
+ return rc;
+}
diff --git a/drivers/crypto/ccree/cc_aead.h b/drivers/crypto/ccree/cc_aead.h
new file mode 100644
index 000000000000..5edf3b351fa4
--- /dev/null
+++ b/drivers/crypto/ccree/cc_aead.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+/* \file cc_aead.h
+ * ARM CryptoCell AEAD Crypto API
+ */
+
+#ifndef __CC_AEAD_H__
+#define __CC_AEAD_H__
+
+#include <linux/kernel.h>
+#include <crypto/algapi.h>
+#include <crypto/ctr.h>
+
+/* mac_cmp - HW writes 8 B but all bytes hold the same value */
+#define ICV_CMP_SIZE 8
+#define CCM_CONFIG_BUF_SIZE (AES_BLOCK_SIZE * 3)
+#define MAX_MAC_SIZE SHA256_DIGEST_SIZE
+
+/* defines for AES GCM configuration buffer */
+#define GCM_BLOCK_LEN_SIZE 8
+
+#define GCM_BLOCK_RFC4_IV_OFFSET 4
+#define GCM_BLOCK_RFC4_IV_SIZE 8 /* IV size for rfc's */
+#define GCM_BLOCK_RFC4_NONCE_OFFSET 0
+#define GCM_BLOCK_RFC4_NONCE_SIZE 4
+
+/* Offsets into AES CCM configuration buffer */
+#define CCM_B0_OFFSET 0
+#define CCM_A0_OFFSET 16
+#define CCM_CTR_COUNT_0_OFFSET 32
+/* CCM B0 and CTR_COUNT constants. */
+#define CCM_BLOCK_NONCE_OFFSET 1 /* Nonce offset inside B0 and CTR_COUNT */
+#define CCM_BLOCK_NONCE_SIZE 3 /* Nonce size inside B0 and CTR_COUNT */
+#define CCM_BLOCK_IV_OFFSET 4 /* IV offset inside B0 and CTR_COUNT */
+#define CCM_BLOCK_IV_SIZE 8 /* IV size inside B0 and CTR_COUNT */
+
+enum aead_ccm_header_size {
+ ccm_header_size_null = -1,
+ ccm_header_size_zero = 0,
+ ccm_header_size_2 = 2,
+ ccm_header_size_6 = 6,
+ ccm_header_size_max = S32_MAX
+};
+
+struct aead_req_ctx {
+ /* Allocate cache line although only 4 bytes are needed to
+ * assure next field falls @ cache line
+ * Used for both: digest HW compare and CCM/GCM MAC value
+ */
+ u8 mac_buf[MAX_MAC_SIZE] ____cacheline_aligned;
+ u8 ctr_iv[AES_BLOCK_SIZE] ____cacheline_aligned;
+
+ //used in gcm
+ u8 gcm_iv_inc1[AES_BLOCK_SIZE] ____cacheline_aligned;
+ u8 gcm_iv_inc2[AES_BLOCK_SIZE] ____cacheline_aligned;
+ u8 hkey[AES_BLOCK_SIZE] ____cacheline_aligned;
+ struct {
+ u8 len_a[GCM_BLOCK_LEN_SIZE] ____cacheline_aligned;
+ u8 len_c[GCM_BLOCK_LEN_SIZE];
+ } gcm_len_block;
+
+ u8 ccm_config[CCM_CONFIG_BUF_SIZE] ____cacheline_aligned;
+ /* HW actual size input */
+ unsigned int hw_iv_size ____cacheline_aligned;
+ /* used to prevent cache coherence problem */
+ u8 backup_mac[MAX_MAC_SIZE];
+ u8 *backup_iv; /*store iv for generated IV flow*/
+ u8 *backup_giv; /*store iv for rfc3686(ctr) flow*/
+ dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */
+ /* buffer for internal ccm configurations */
+ dma_addr_t ccm_iv0_dma_addr;
+ dma_addr_t icv_dma_addr; /* Phys. address of ICV */
+
+ //used in gcm
+ /* buffer for internal gcm configurations */
+ dma_addr_t gcm_iv_inc1_dma_addr;
+ /* buffer for internal gcm configurations */
+ dma_addr_t gcm_iv_inc2_dma_addr;
+ dma_addr_t hkey_dma_addr; /* Phys. address of hkey */
+ dma_addr_t gcm_block_len_dma_addr; /* Phys. address of gcm block len */
+ bool is_gcm4543;
+
+ u8 *icv_virt_addr; /* Virt. address of ICV */
+ struct async_gen_req_ctx gen_ctx;
+ struct cc_mlli assoc;
+ struct cc_mlli src;
+ struct cc_mlli dst;
+ struct scatterlist *src_sgl;
+ struct scatterlist *dst_sgl;
+ unsigned int src_offset;
+ unsigned int dst_offset;
+ enum cc_req_dma_buf_type assoc_buff_type;
+ enum cc_req_dma_buf_type data_buff_type;
+ struct mlli_params mlli_params;
+ unsigned int cryptlen;
+ struct scatterlist ccm_adata_sg;
+ enum aead_ccm_header_size ccm_hdr_size;
+ unsigned int req_authsize;
+ enum drv_cipher_mode cipher_mode;
+ bool is_icv_fragmented;
+ bool is_single_pass;
+ bool plaintext_authenticate_only; //for gcm_rfc4543
+};
+
+int cc_aead_alloc(struct cc_drvdata *drvdata);
+int cc_aead_free(struct cc_drvdata *drvdata);
+
+#endif /*__CC_AEAD_H__*/
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
new file mode 100644
index 000000000000..b32577477b4c
--- /dev/null
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -0,0 +1,1651 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include <crypto/internal/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
+#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
+
+#include "cc_buffer_mgr.h"
+#include "cc_lli_defs.h"
+#include "cc_cipher.h"
+#include "cc_hash.h"
+#include "cc_aead.h"
+
+enum dma_buffer_type {
+ DMA_NULL_TYPE = -1,
+ DMA_SGL_TYPE = 1,
+ DMA_BUFF_TYPE = 2,
+};
+
+struct buff_mgr_handle {
+ struct dma_pool *mlli_buffs_pool;
+};
+
+union buffer_array_entry {
+ struct scatterlist *sgl;
+ dma_addr_t buffer_dma;
+};
+
+struct buffer_array {
+ unsigned int num_of_buffers;
+ union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI];
+ unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI];
+ int nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
+ int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
+ enum dma_buffer_type type[MAX_NUM_OF_BUFFERS_IN_MLLI];
+ bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
+ u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
+};
+
+static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type)
+{
+ switch (type) {
+ case CC_DMA_BUF_NULL:
+ return "BUF_NULL";
+ case CC_DMA_BUF_DLLI:
+ return "BUF_DLLI";
+ case CC_DMA_BUF_MLLI:
+ return "BUF_MLLI";
+ default:
+ return "BUF_INVALID";
+ }
+}
+
+/**
+ * cc_copy_mac() - Copy MAC to temporary location
+ *
+ * @dev: device object
+ * @req: aead request object
+ * @dir: [IN] copy from/to sgl
+ */
+static void cc_copy_mac(struct device *dev, struct aead_request *req,
+ enum cc_sg_cpy_direct dir)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ u32 skip = req->assoclen + req->cryptlen;
+
+ if (areq_ctx->is_gcm4543)
+ skip += crypto_aead_ivsize(tfm);
+
+ cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src,
+ (skip - areq_ctx->req_authsize), skip, dir);
+}
+
+/**
+ * cc_get_sgl_nents() - Get scatterlist number of entries.
+ *
+ * @sg_list: SG list
+ * @nbytes: [IN] Total SGL data bytes.
+ * @lbytes: [OUT] Returns the amount of bytes at the last entry
+ */
+static unsigned int cc_get_sgl_nents(struct device *dev,
+ struct scatterlist *sg_list,
+ unsigned int nbytes, u32 *lbytes,
+ bool *is_chained)
+{
+ unsigned int nents = 0;
+
+ while (nbytes && sg_list) {
+ if (sg_list->length) {
+ nents++;
+ /* get the number of bytes in the last entry */
+ *lbytes = nbytes;
+ nbytes -= (sg_list->length > nbytes) ?
+ nbytes : sg_list->length;
+ sg_list = sg_next(sg_list);
+ } else {
+ sg_list = (struct scatterlist *)sg_page(sg_list);
+ if (is_chained)
+ *is_chained = true;
+ }
+ }
+ dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
+ return nents;
+}
+
+/**
+ * cc_zero_sgl() - Zero scatter scatter list data.
+ *
+ * @sgl:
+ */
+void cc_zero_sgl(struct scatterlist *sgl, u32 data_len)
+{
+ struct scatterlist *current_sg = sgl;
+ int sg_index = 0;
+
+ while (sg_index <= data_len) {
+ if (!current_sg) {
+ /* reached the end of the sgl --> just return back */
+ return;
+ }
+ memset(sg_virt(current_sg), 0, current_sg->length);
+ sg_index += current_sg->length;
+ current_sg = sg_next(current_sg);
+ }
+}
+
+/**
+ * cc_copy_sg_portion() - Copy scatter list data,
+ * from to_skip to end, to dest and vice versa
+ *
+ * @dest:
+ * @sg:
+ * @to_skip:
+ * @end:
+ * @direct:
+ */
+void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
+ u32 to_skip, u32 end, enum cc_sg_cpy_direct direct)
+{
+ u32 nents, lbytes;
+
+ nents = cc_get_sgl_nents(dev, sg, end, &lbytes, NULL);
+ sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
+ (direct == CC_SG_TO_BUF));
+}
+
+static int cc_render_buff_to_mlli(struct device *dev, dma_addr_t buff_dma,
+ u32 buff_size, u32 *curr_nents,
+ u32 **mlli_entry_pp)
+{
+ u32 *mlli_entry_p = *mlli_entry_pp;
+ u32 new_nents;
+
+ /* Verify there is no memory overflow*/
+ new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1);
+ if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES)
+ return -ENOMEM;
+
+ /*handle buffer longer than 64 kbytes */
+ while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) {
+ cc_lli_set_addr(mlli_entry_p, buff_dma);
+ cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
+ dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
+ *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
+ mlli_entry_p[LLI_WORD1_OFFSET]);
+ buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
+ buff_size -= CC_MAX_MLLI_ENTRY_SIZE;
+ mlli_entry_p = mlli_entry_p + 2;
+ (*curr_nents)++;
+ }
+ /*Last entry */
+ cc_lli_set_addr(mlli_entry_p, buff_dma);
+ cc_lli_set_size(mlli_entry_p, buff_size);
+ dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
+ *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
+ mlli_entry_p[LLI_WORD1_OFFSET]);
+ mlli_entry_p = mlli_entry_p + 2;
+ *mlli_entry_pp = mlli_entry_p;
+ (*curr_nents)++;
+ return 0;
+}
+
+static int cc_render_sg_to_mlli(struct device *dev, struct scatterlist *sgl,
+ u32 sgl_data_len, u32 sgl_offset,
+ u32 *curr_nents, u32 **mlli_entry_pp)
+{
+ struct scatterlist *curr_sgl = sgl;
+ u32 *mlli_entry_p = *mlli_entry_pp;
+ s32 rc = 0;
+
+ for ( ; (curr_sgl && sgl_data_len);
+ curr_sgl = sg_next(curr_sgl)) {
+ u32 entry_data_len =
+ (sgl_data_len > sg_dma_len(curr_sgl) - sgl_offset) ?
+ sg_dma_len(curr_sgl) - sgl_offset :
+ sgl_data_len;
+ sgl_data_len -= entry_data_len;
+ rc = cc_render_buff_to_mlli(dev, sg_dma_address(curr_sgl) +
+ sgl_offset, entry_data_len,
+ curr_nents, &mlli_entry_p);
+ if (rc)
+ return rc;
+
+ sgl_offset = 0;
+ }
+ *mlli_entry_pp = mlli_entry_p;
+ return 0;
+}
+
+static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data,
+ struct mlli_params *mlli_params, gfp_t flags)
+{
+ u32 *mlli_p;
+ u32 total_nents = 0, prev_total_nents = 0;
+ int rc = 0, i;
+
+ dev_dbg(dev, "NUM of SG's = %d\n", sg_data->num_of_buffers);
+
+ /* Allocate memory from the pointed pool */
+ mlli_params->mlli_virt_addr =
+ dma_pool_alloc(mlli_params->curr_pool, flags,
+ &mlli_params->mlli_dma_addr);
+ if (!mlli_params->mlli_virt_addr) {
+ dev_err(dev, "dma_pool_alloc() failed\n");
+ rc = -ENOMEM;
+ goto build_mlli_exit;
+ }
+ /* Point to start of MLLI */
+ mlli_p = (u32 *)mlli_params->mlli_virt_addr;
+ /* go over all SG's and link it to one MLLI table */
+ for (i = 0; i < sg_data->num_of_buffers; i++) {
+ union buffer_array_entry *entry = &sg_data->entry[i];
+ u32 tot_len = sg_data->total_data_len[i];
+ u32 offset = sg_data->offset[i];
+
+ if (sg_data->type[i] == DMA_SGL_TYPE)
+ rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len,
+ offset, &total_nents,
+ &mlli_p);
+ else /*DMA_BUFF_TYPE*/
+ rc = cc_render_buff_to_mlli(dev, entry->buffer_dma,
+ tot_len, &total_nents,
+ &mlli_p);
+ if (rc)
+ return rc;
+
+ /* set last bit in the current table */
+ if (sg_data->mlli_nents[i]) {
+ /*Calculate the current MLLI table length for the
+ *length field in the descriptor
+ */
+ *sg_data->mlli_nents[i] +=
+ (total_nents - prev_total_nents);
+ prev_total_nents = total_nents;
+ }
+ }
+
+ /* Set MLLI size for the bypass operation */
+ mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
+
+ dev_dbg(dev, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n",
+ mlli_params->mlli_virt_addr, &mlli_params->mlli_dma_addr,
+ mlli_params->mlli_len);
+
+build_mlli_exit:
+ return rc;
+}
+
+static void cc_add_buffer_entry(struct device *dev,
+ struct buffer_array *sgl_data,
+ dma_addr_t buffer_dma, unsigned int buffer_len,
+ bool is_last_entry, u32 *mlli_nents)
+{
+ unsigned int index = sgl_data->num_of_buffers;
+
+ dev_dbg(dev, "index=%u single_buff=%pad buffer_len=0x%08X is_last=%d\n",
+ index, &buffer_dma, buffer_len, is_last_entry);
+ sgl_data->nents[index] = 1;
+ sgl_data->entry[index].buffer_dma = buffer_dma;
+ sgl_data->offset[index] = 0;
+ sgl_data->total_data_len[index] = buffer_len;
+ sgl_data->type[index] = DMA_BUFF_TYPE;
+ sgl_data->is_last[index] = is_last_entry;
+ sgl_data->mlli_nents[index] = mlli_nents;
+ if (sgl_data->mlli_nents[index])
+ *sgl_data->mlli_nents[index] = 0;
+ sgl_data->num_of_buffers++;
+}
+
+static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
+ unsigned int nents, struct scatterlist *sgl,
+ unsigned int data_len, unsigned int data_offset,
+ bool is_last_table, u32 *mlli_nents)
+{
+ unsigned int index = sgl_data->num_of_buffers;
+
+ dev_dbg(dev, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
+ index, nents, sgl, data_len, is_last_table);
+ sgl_data->nents[index] = nents;
+ sgl_data->entry[index].sgl = sgl;
+ sgl_data->offset[index] = data_offset;
+ sgl_data->total_data_len[index] = data_len;
+ sgl_data->type[index] = DMA_SGL_TYPE;
+ sgl_data->is_last[index] = is_last_table;
+ sgl_data->mlli_nents[index] = mlli_nents;
+ if (sgl_data->mlli_nents[index])
+ *sgl_data->mlli_nents[index] = 0;
+ sgl_data->num_of_buffers++;
+}
+
+static int cc_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
+ enum dma_data_direction direction)
+{
+ u32 i, j;
+ struct scatterlist *l_sg = sg;
+
+ for (i = 0; i < nents; i++) {
+ if (!l_sg)
+ break;
+ if (dma_map_sg(dev, l_sg, 1, direction) != 1) {
+ dev_err(dev, "dma_map_page() sg buffer failed\n");
+ goto err;
+ }
+ l_sg = sg_next(l_sg);
+ }
+ return nents;
+
+err:
+ /* Restore mapped parts */
+ for (j = 0; j < i; j++) {
+ if (!sg)
+ break;
+ dma_unmap_sg(dev, sg, 1, direction);
+ sg = sg_next(sg);
+ }
+ return 0;
+}
+
+static int cc_map_sg(struct device *dev, struct scatterlist *sg,
+ unsigned int nbytes, int direction, u32 *nents,
+ u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
+{
+ bool is_chained = false;
+
+ if (sg_is_last(sg)) {
+ /* One entry only case -set to DLLI */
+ if (dma_map_sg(dev, sg, 1, direction) != 1) {
+ dev_err(dev, "dma_map_sg() single buffer failed\n");
+ return -ENOMEM;
+ }
+ dev_dbg(dev, "Mapped sg: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
+ &sg_dma_address(sg), sg_page(sg), sg_virt(sg),
+ sg->offset, sg->length);
+ *lbytes = nbytes;
+ *nents = 1;
+ *mapped_nents = 1;
+ } else { /*sg_is_last*/
+ *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes,
+ &is_chained);
+ if (*nents > max_sg_nents) {
+ *nents = 0;
+ dev_err(dev, "Too many fragments. current %d max %d\n",
+ *nents, max_sg_nents);
+ return -ENOMEM;
+ }
+ if (!is_chained) {
+ /* In case of mmu the number of mapped nents might
+ * be changed from the original sgl nents
+ */
+ *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
+ if (*mapped_nents == 0) {
+ *nents = 0;
+ dev_err(dev, "dma_map_sg() sg buffer failed\n");
+ return -ENOMEM;
+ }
+ } else {
+ /*In this case the driver maps entry by entry so it
+ * must have the same nents before and after map
+ */
+ *mapped_nents = cc_dma_map_sg(dev, sg, *nents,
+ direction);
+ if (*mapped_nents != *nents) {
+ *nents = *mapped_nents;
+ dev_err(dev, "dma_map_sg() sg buffer failed\n");
+ return -ENOMEM;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx,
+ u8 *config_data, struct buffer_array *sg_data,
+ unsigned int assoclen)
+{
+ dev_dbg(dev, " handle additional data config set to DLLI\n");
+ /* create sg for the current buffer */
+ sg_init_one(&areq_ctx->ccm_adata_sg, config_data,
+ AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
+ if (dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE) != 1) {
+ dev_err(dev, "dma_map_sg() config buffer failed\n");
+ return -ENOMEM;
+ }
+ dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
+ &sg_dma_address(&areq_ctx->ccm_adata_sg),
+ sg_page(&areq_ctx->ccm_adata_sg),
+ sg_virt(&areq_ctx->ccm_adata_sg),
+ areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length);
+ /* prepare for case of MLLI */
+ if (assoclen > 0) {
+ cc_add_sg_entry(dev, sg_data, 1, &areq_ctx->ccm_adata_sg,
+ (AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size),
+ 0, false, NULL);
+ }
+ return 0;
+}
+
+static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx,
+ u8 *curr_buff, u32 curr_buff_cnt,
+ struct buffer_array *sg_data)
+{
+ dev_dbg(dev, " handle curr buff %x set to DLLI\n", curr_buff_cnt);
+ /* create sg for the current buffer */
+ sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
+ if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) {
+ dev_err(dev, "dma_map_sg() src buffer failed\n");
+ return -ENOMEM;
+ }
+ dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
+ &sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg),
+ sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset,
+ areq_ctx->buff_sg->length);
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
+ areq_ctx->curr_sg = areq_ctx->buff_sg;
+ areq_ctx->in_nents = 0;
+ /* prepare for case of MLLI */
+ cc_add_sg_entry(dev, sg_data, 1, areq_ctx->buff_sg, curr_buff_cnt, 0,
+ false, NULL);
+ return 0;
+}
+
+void cc_unmap_cipher_request(struct device *dev, void *ctx,
+ unsigned int ivsize, struct scatterlist *src,
+ struct scatterlist *dst)
+{
+ struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
+
+ if (req_ctx->gen_ctx.iv_dma_addr) {
+ dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
+ &req_ctx->gen_ctx.iv_dma_addr, ivsize);
+ dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
+ ivsize,
+ req_ctx->is_giv ? DMA_BIDIRECTIONAL :
+ DMA_TO_DEVICE);
+ }
+ /* Release pool */
+ if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
+ req_ctx->mlli_params.mlli_virt_addr) {
+ dma_pool_free(req_ctx->mlli_params.curr_pool,
+ req_ctx->mlli_params.mlli_virt_addr,
+ req_ctx->mlli_params.mlli_dma_addr);
+ }
+
+ dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
+ dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
+
+ if (src != dst) {
+ dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL);
+ dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
+ }
+}
+
+int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
+ unsigned int ivsize, unsigned int nbytes,
+ void *info, struct scatterlist *src,
+ struct scatterlist *dst, gfp_t flags)
+{
+ struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
+ struct mlli_params *mlli_params = &req_ctx->mlli_params;
+ struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
+ struct device *dev = drvdata_to_dev(drvdata);
+ struct buffer_array sg_data;
+ u32 dummy = 0;
+ int rc = 0;
+ u32 mapped_nents = 0;
+
+ req_ctx->dma_buf_type = CC_DMA_BUF_DLLI;
+ mlli_params->curr_pool = NULL;
+ sg_data.num_of_buffers = 0;
+
+ /* Map IV buffer */
+ if (ivsize) {
+ dump_byte_array("iv", (u8 *)info, ivsize);
+ req_ctx->gen_ctx.iv_dma_addr =
+ dma_map_single(dev, (void *)info,
+ ivsize,
+ req_ctx->is_giv ? DMA_BIDIRECTIONAL :
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
+ dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
+ ivsize, info);
+ return -ENOMEM;
+ }
+ dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
+ ivsize, info, &req_ctx->gen_ctx.iv_dma_addr);
+ } else {
+ req_ctx->gen_ctx.iv_dma_addr = 0;
+ }
+
+ /* Map the src SGL */
+ rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
+ if (rc) {
+ rc = -ENOMEM;
+ goto cipher_exit;
+ }
+ if (mapped_nents > 1)
+ req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
+
+ if (src == dst) {
+ /* Handle inplace operation */
+ if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
+ req_ctx->out_nents = 0;
+ cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
+ nbytes, 0, true,
+ &req_ctx->in_mlli_nents);
+ }
+ } else {
+ /* Map the dst sg */
+ if (cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
+ &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
+ &dummy, &mapped_nents)) {
+ rc = -ENOMEM;
+ goto cipher_exit;
+ }
+ if (mapped_nents > 1)
+ req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
+
+ if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
+ cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
+ nbytes, 0, true,
+ &req_ctx->in_mlli_nents);
+ cc_add_sg_entry(dev, &sg_data, req_ctx->out_nents, dst,
+ nbytes, 0, true,
+ &req_ctx->out_mlli_nents);
+ }
+ }
+
+ if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
+ mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
+ rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
+ if (rc)
+ goto cipher_exit;
+ }
+
+ dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n",
+ cc_dma_buf_type(req_ctx->dma_buf_type));
+
+ return 0;
+
+cipher_exit:
+ cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
+ return rc;
+}
+
+void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ unsigned int hw_iv_size = areq_ctx->hw_iv_size;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct cc_drvdata *drvdata = dev_get_drvdata(dev);
+ u32 dummy;
+ bool chained;
+ u32 size_to_unmap = 0;
+
+ if (areq_ctx->mac_buf_dma_addr) {
+ dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
+ MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
+ }
+
+ if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
+ if (areq_ctx->hkey_dma_addr) {
+ dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
+ AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
+ }
+
+ if (areq_ctx->gcm_block_len_dma_addr) {
+ dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
+ }
+
+ if (areq_ctx->gcm_iv_inc1_dma_addr) {
+ dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
+ }
+
+ if (areq_ctx->gcm_iv_inc2_dma_addr) {
+ dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
+ }
+ }
+
+ if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
+ if (areq_ctx->ccm_iv0_dma_addr) {
+ dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
+ }
+
+ dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
+ }
+ if (areq_ctx->gen_ctx.iv_dma_addr) {
+ dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
+ hw_iv_size, DMA_BIDIRECTIONAL);
+ }
+
+ /*In case a pool was set, a table was
+ *allocated and should be released
+ */
+ if (areq_ctx->mlli_params.curr_pool) {
+ dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
+ &areq_ctx->mlli_params.mlli_dma_addr,
+ areq_ctx->mlli_params.mlli_virt_addr);
+ dma_pool_free(areq_ctx->mlli_params.curr_pool,
+ areq_ctx->mlli_params.mlli_virt_addr,
+ areq_ctx->mlli_params.mlli_dma_addr);
+ }
+
+ dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
+ sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
+ req->assoclen, req->cryptlen);
+ size_to_unmap = req->assoclen + req->cryptlen;
+ if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
+ size_to_unmap += areq_ctx->req_authsize;
+ if (areq_ctx->is_gcm4543)
+ size_to_unmap += crypto_aead_ivsize(tfm);
+
+ dma_unmap_sg(dev, req->src,
+ cc_get_sgl_nents(dev, req->src, size_to_unmap,
+ &dummy, &chained),
+ DMA_BIDIRECTIONAL);
+ if (req->src != req->dst) {
+ dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
+ sg_virt(req->dst));
+ dma_unmap_sg(dev, req->dst,
+ cc_get_sgl_nents(dev, req->dst, size_to_unmap,
+ &dummy, &chained),
+ DMA_BIDIRECTIONAL);
+ }
+ if (drvdata->coherent &&
+ areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
+ req->src == req->dst) {
+ /* copy back mac from temporary location to deal with possible
+ * data memory overriding that caused by cache coherence
+ * problem.
+ */
+ cc_copy_mac(dev, req, CC_SG_FROM_BUF);
+ }
+}
+
+static int cc_get_aead_icv_nents(struct device *dev, struct scatterlist *sgl,
+ unsigned int sgl_nents, unsigned int authsize,
+ u32 last_entry_data_size,
+ bool *is_icv_fragmented)
+{
+ unsigned int icv_max_size = 0;
+ unsigned int icv_required_size = authsize > last_entry_data_size ?
+ (authsize - last_entry_data_size) :
+ authsize;
+ unsigned int nents;
+ unsigned int i;
+
+ if (sgl_nents < MAX_ICV_NENTS_SUPPORTED) {
+ *is_icv_fragmented = false;
+ return 0;
+ }
+
+ for (i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) {
+ if (!sgl)
+ break;
+ sgl = sg_next(sgl);
+ }
+
+ if (sgl)
+ icv_max_size = sgl->length;
+
+ if (last_entry_data_size > authsize) {
+ /* ICV attached to data in last entry (not fragmented!) */
+ nents = 0;
+ *is_icv_fragmented = false;
+ } else if (last_entry_data_size == authsize) {
+ /* ICV placed in whole last entry (not fragmented!) */
+ nents = 1;
+ *is_icv_fragmented = false;
+ } else if (icv_max_size > icv_required_size) {
+ nents = 1;
+ *is_icv_fragmented = true;
+ } else if (icv_max_size == icv_required_size) {
+ nents = 2;
+ *is_icv_fragmented = true;
+ } else {
+ dev_err(dev, "Unsupported num. of ICV fragments (> %d)\n",
+ MAX_ICV_NENTS_SUPPORTED);
+ nents = -1; /*unsupported*/
+ }
+ dev_dbg(dev, "is_frag=%s icv_nents=%u\n",
+ (*is_icv_fragmented ? "true" : "false"), nents);
+
+ return nents;
+}
+
+static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
+ struct aead_request *req,
+ struct buffer_array *sg_data,
+ bool is_last, bool do_chain)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ unsigned int hw_iv_size = areq_ctx->hw_iv_size;
+ struct device *dev = drvdata_to_dev(drvdata);
+ int rc = 0;
+
+ if (!req->iv) {
+ areq_ctx->gen_ctx.iv_dma_addr = 0;
+ goto chain_iv_exit;
+ }
+
+ areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv,
+ hw_iv_size,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
+ dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
+ hw_iv_size, req->iv);
+ rc = -ENOMEM;
+ goto chain_iv_exit;
+ }
+
+ dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
+ hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
+ // TODO: what about CTR?? ask Ron
+ if (do_chain && areq_ctx->plaintext_authenticate_only) {
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
+ unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET;
+ /* Chain to given list */
+ cc_add_buffer_entry(dev, sg_data,
+ (areq_ctx->gen_ctx.iv_dma_addr + iv_ofs),
+ iv_size_to_authenc, is_last,
+ &areq_ctx->assoc.mlli_nents);
+ areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
+ }
+
+chain_iv_exit:
+ return rc;
+}
+
+static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
+ struct aead_request *req,
+ struct buffer_array *sg_data,
+ bool is_last, bool do_chain)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ int rc = 0;
+ u32 mapped_nents = 0;
+ struct scatterlist *current_sg = req->src;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ unsigned int sg_index = 0;
+ u32 size_of_assoc = req->assoclen;
+ struct device *dev = drvdata_to_dev(drvdata);
+
+ if (areq_ctx->is_gcm4543)
+ size_of_assoc += crypto_aead_ivsize(tfm);
+
+ if (!sg_data) {
+ rc = -EINVAL;
+ goto chain_assoc_exit;
+ }
+
+ if (req->assoclen == 0) {
+ areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL;
+ areq_ctx->assoc.nents = 0;
+ areq_ctx->assoc.mlli_nents = 0;
+ dev_dbg(dev, "Chain assoc of length 0: buff_type=%s nents=%u\n",
+ cc_dma_buf_type(areq_ctx->assoc_buff_type),
+ areq_ctx->assoc.nents);
+ goto chain_assoc_exit;
+ }
+
+ //iterate over the sgl to see how many entries are for associated data
+ //it is assumed that if we reach here , the sgl is already mapped
+ sg_index = current_sg->length;
+ //the first entry in the scatter list contains all the associated data
+ if (sg_index > size_of_assoc) {
+ mapped_nents++;
+ } else {
+ while (sg_index <= size_of_assoc) {
+ current_sg = sg_next(current_sg);
+ /* if have reached the end of the sgl, then this is
+ * unexpected
+ */
+ if (!current_sg) {
+ dev_err(dev, "reached end of sg list. unexpected\n");
+ return -EINVAL;
+ }
+ sg_index += current_sg->length;
+ mapped_nents++;
+ }
+ }
+ if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
+ dev_err(dev, "Too many fragments. current %d max %d\n",
+ mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
+ return -ENOMEM;
+ }
+ areq_ctx->assoc.nents = mapped_nents;
+
+ /* in CCM case we have additional entry for
+ * ccm header configurations
+ */
+ if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
+ if ((mapped_nents + 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
+ dev_err(dev, "CCM case.Too many fragments. Current %d max %d\n",
+ (areq_ctx->assoc.nents + 1),
+ LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
+ rc = -ENOMEM;
+ goto chain_assoc_exit;
+ }
+ }
+
+ if (mapped_nents == 1 && areq_ctx->ccm_hdr_size == ccm_header_size_null)
+ areq_ctx->assoc_buff_type = CC_DMA_BUF_DLLI;
+ else
+ areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
+
+ if (do_chain || areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
+ dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n",
+ cc_dma_buf_type(areq_ctx->assoc_buff_type),
+ areq_ctx->assoc.nents);
+ cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
+ req->assoclen, 0, is_last,
+ &areq_ctx->assoc.mlli_nents);
+ areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
+ }
+
+chain_assoc_exit:
+ return rc;
+}
+
+static void cc_prepare_aead_data_dlli(struct aead_request *req,
+ u32 *src_last_bytes, u32 *dst_last_bytes)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
+ unsigned int authsize = areq_ctx->req_authsize;
+
+ areq_ctx->is_icv_fragmented = false;
+ if (req->src == req->dst) {
+ /*INPLACE*/
+ areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
+ (*src_last_bytes - authsize);
+ areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
+ (*src_last_bytes - authsize);
+ } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ /*NON-INPLACE and DECRYPT*/
+ areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
+ (*src_last_bytes - authsize);
+ areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
+ (*src_last_bytes - authsize);
+ } else {
+ /*NON-INPLACE and ENCRYPT*/
+ areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->dst_sgl) +
+ (*dst_last_bytes - authsize);
+ areq_ctx->icv_virt_addr = sg_virt(areq_ctx->dst_sgl) +
+ (*dst_last_bytes - authsize);
+ }
+}
+
+static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
+ struct aead_request *req,
+ struct buffer_array *sg_data,
+ u32 *src_last_bytes, u32 *dst_last_bytes,
+ bool is_last_table)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
+ unsigned int authsize = areq_ctx->req_authsize;
+ int rc = 0, icv_nents;
+ struct device *dev = drvdata_to_dev(drvdata);
+ struct scatterlist *sg;
+
+ if (req->src == req->dst) {
+ /*INPLACE*/
+ cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
+ areq_ctx->src_sgl, areq_ctx->cryptlen,
+ areq_ctx->src_offset, is_last_table,
+ &areq_ctx->src.mlli_nents);
+
+ icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
+ areq_ctx->src.nents,
+ authsize, *src_last_bytes,
+ &areq_ctx->is_icv_fragmented);
+ if (icv_nents < 0) {
+ rc = -ENOTSUPP;
+ goto prepare_data_mlli_exit;
+ }
+
+ if (areq_ctx->is_icv_fragmented) {
+ /* Backup happens only when ICV is fragmented, ICV
+ * verification is made by CPU compare in order to
+ * simplify MAC verification upon request completion
+ */
+ if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ /* In coherent platforms (e.g. ACP)
+ * already copying ICV for any
+ * INPLACE-DECRYPT operation, hence
+ * we must neglect this code.
+ */
+ if (!drvdata->coherent)
+ cc_copy_mac(dev, req, CC_SG_TO_BUF);
+
+ areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
+ } else {
+ areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
+ areq_ctx->icv_dma_addr =
+ areq_ctx->mac_buf_dma_addr;
+ }
+ } else { /* Contig. ICV */
+ sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
+ /*Should hanlde if the sg is not contig.*/
+ areq_ctx->icv_dma_addr = sg_dma_address(sg) +
+ (*src_last_bytes - authsize);
+ areq_ctx->icv_virt_addr = sg_virt(sg) +
+ (*src_last_bytes - authsize);
+ }
+
+ } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ /*NON-INPLACE and DECRYPT*/
+ cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
+ areq_ctx->src_sgl, areq_ctx->cryptlen,
+ areq_ctx->src_offset, is_last_table,
+ &areq_ctx->src.mlli_nents);
+ cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
+ areq_ctx->dst_sgl, areq_ctx->cryptlen,
+ areq_ctx->dst_offset, is_last_table,
+ &areq_ctx->dst.mlli_nents);
+
+ icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
+ areq_ctx->src.nents,
+ authsize, *src_last_bytes,
+ &areq_ctx->is_icv_fragmented);
+ if (icv_nents < 0) {
+ rc = -ENOTSUPP;
+ goto prepare_data_mlli_exit;
+ }
+
+ /* Backup happens only when ICV is fragmented, ICV
+ * verification is made by CPU compare in order to simplify
+ * MAC verification upon request completion
+ */
+ if (areq_ctx->is_icv_fragmented) {
+ cc_copy_mac(dev, req, CC_SG_TO_BUF);
+ areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
+
+ } else { /* Contig. ICV */
+ sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
+ /*Should hanlde if the sg is not contig.*/
+ areq_ctx->icv_dma_addr = sg_dma_address(sg) +
+ (*src_last_bytes - authsize);
+ areq_ctx->icv_virt_addr = sg_virt(sg) +
+ (*src_last_bytes - authsize);
+ }
+
+ } else {
+ /*NON-INPLACE and ENCRYPT*/
+ cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
+ areq_ctx->dst_sgl, areq_ctx->cryptlen,
+ areq_ctx->dst_offset, is_last_table,
+ &areq_ctx->dst.mlli_nents);
+ cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
+ areq_ctx->src_sgl, areq_ctx->cryptlen,
+ areq_ctx->src_offset, is_last_table,
+ &areq_ctx->src.mlli_nents);
+
+ icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->dst_sgl,
+ areq_ctx->dst.nents,
+ authsize, *dst_last_bytes,
+ &areq_ctx->is_icv_fragmented);
+ if (icv_nents < 0) {
+ rc = -ENOTSUPP;
+ goto prepare_data_mlli_exit;
+ }
+
+ if (!areq_ctx->is_icv_fragmented) {
+ sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1];
+ /* Contig. ICV */
+ areq_ctx->icv_dma_addr = sg_dma_address(sg) +
+ (*dst_last_bytes - authsize);
+ areq_ctx->icv_virt_addr = sg_virt(sg) +
+ (*dst_last_bytes - authsize);
+ } else {
+ areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
+ areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
+ }
+ }
+
+prepare_data_mlli_exit:
+ return rc;
+}
+
+static int cc_aead_chain_data(struct cc_drvdata *drvdata,
+ struct aead_request *req,
+ struct buffer_array *sg_data,
+ bool is_last_table, bool do_chain)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct device *dev = drvdata_to_dev(drvdata);
+ enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
+ unsigned int authsize = areq_ctx->req_authsize;
+ unsigned int src_last_bytes = 0, dst_last_bytes = 0;
+ int rc = 0;
+ u32 src_mapped_nents = 0, dst_mapped_nents = 0;
+ u32 offset = 0;
+ /* non-inplace mode */
+ unsigned int size_for_map = req->assoclen + req->cryptlen;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ u32 sg_index = 0;
+ bool chained = false;
+ bool is_gcm4543 = areq_ctx->is_gcm4543;
+ u32 size_to_skip = req->assoclen;
+
+ if (is_gcm4543)
+ size_to_skip += crypto_aead_ivsize(tfm);
+
+ offset = size_to_skip;
+
+ if (!sg_data)
+ return -EINVAL;
+
+ areq_ctx->src_sgl = req->src;
+ areq_ctx->dst_sgl = req->dst;
+
+ if (is_gcm4543)
+ size_for_map += crypto_aead_ivsize(tfm);
+
+ size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+ authsize : 0;
+ src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
+ &src_last_bytes, &chained);
+ sg_index = areq_ctx->src_sgl->length;
+ //check where the data starts
+ while (sg_index <= size_to_skip) {
+ offset -= areq_ctx->src_sgl->length;
+ areq_ctx->src_sgl = sg_next(areq_ctx->src_sgl);
+ //if have reached the end of the sgl, then this is unexpected
+ if (!areq_ctx->src_sgl) {
+ dev_err(dev, "reached end of sg list. unexpected\n");
+ return -EINVAL;
+ }
+ sg_index += areq_ctx->src_sgl->length;
+ src_mapped_nents--;
+ }
+ if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
+ dev_err(dev, "Too many fragments. current %d max %d\n",
+ src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
+ return -ENOMEM;
+ }
+
+ areq_ctx->src.nents = src_mapped_nents;
+
+ areq_ctx->src_offset = offset;
+
+ if (req->src != req->dst) {
+ size_for_map = req->assoclen + req->cryptlen;
+ size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+ authsize : 0;
+ if (is_gcm4543)
+ size_for_map += crypto_aead_ivsize(tfm);
+
+ rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL,
+ &areq_ctx->dst.nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
+ &dst_mapped_nents);
+ if (rc) {
+ rc = -ENOMEM;
+ goto chain_data_exit;
+ }
+ }
+
+ dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
+ &dst_last_bytes, &chained);
+ sg_index = areq_ctx->dst_sgl->length;
+ offset = size_to_skip;
+
+ //check where the data starts
+ while (sg_index <= size_to_skip) {
+ offset -= areq_ctx->dst_sgl->length;
+ areq_ctx->dst_sgl = sg_next(areq_ctx->dst_sgl);
+ //if have reached the end of the sgl, then this is unexpected
+ if (!areq_ctx->dst_sgl) {
+ dev_err(dev, "reached end of sg list. unexpected\n");
+ return -EINVAL;
+ }
+ sg_index += areq_ctx->dst_sgl->length;
+ dst_mapped_nents--;
+ }
+ if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
+ dev_err(dev, "Too many fragments. current %d max %d\n",
+ dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
+ return -ENOMEM;
+ }
+ areq_ctx->dst.nents = dst_mapped_nents;
+ areq_ctx->dst_offset = offset;
+ if (src_mapped_nents > 1 ||
+ dst_mapped_nents > 1 ||
+ do_chain) {
+ areq_ctx->data_buff_type = CC_DMA_BUF_MLLI;
+ rc = cc_prepare_aead_data_mlli(drvdata, req, sg_data,
+ &src_last_bytes,
+ &dst_last_bytes, is_last_table);
+ } else {
+ areq_ctx->data_buff_type = CC_DMA_BUF_DLLI;
+ cc_prepare_aead_data_dlli(req, &src_last_bytes,
+ &dst_last_bytes);
+ }
+
+chain_data_exit:
+ return rc;
+}
+
+static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata,
+ struct aead_request *req)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ u32 curr_mlli_size = 0;
+
+ if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
+ areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
+ curr_mlli_size = areq_ctx->assoc.mlli_nents *
+ LLI_ENTRY_BYTE_SIZE;
+ }
+
+ if (areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
+ /*Inplace case dst nents equal to src nents*/
+ if (req->src == req->dst) {
+ areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents;
+ areq_ctx->src.sram_addr = drvdata->mlli_sram_addr +
+ curr_mlli_size;
+ areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr;
+ if (!areq_ctx->is_single_pass)
+ areq_ctx->assoc.mlli_nents +=
+ areq_ctx->src.mlli_nents;
+ } else {
+ if (areq_ctx->gen_ctx.op_type ==
+ DRV_CRYPTO_DIRECTION_DECRYPT) {
+ areq_ctx->src.sram_addr =
+ drvdata->mlli_sram_addr +
+ curr_mlli_size;
+ areq_ctx->dst.sram_addr =
+ areq_ctx->src.sram_addr +
+ areq_ctx->src.mlli_nents *
+ LLI_ENTRY_BYTE_SIZE;
+ if (!areq_ctx->is_single_pass)
+ areq_ctx->assoc.mlli_nents +=
+ areq_ctx->src.mlli_nents;
+ } else {
+ areq_ctx->dst.sram_addr =
+ drvdata->mlli_sram_addr +
+ curr_mlli_size;
+ areq_ctx->src.sram_addr =
+ areq_ctx->dst.sram_addr +
+ areq_ctx->dst.mlli_nents *
+ LLI_ENTRY_BYTE_SIZE;
+ if (!areq_ctx->is_single_pass)
+ areq_ctx->assoc.mlli_nents +=
+ areq_ctx->dst.mlli_nents;
+ }
+ }
+ }
+}
+
+int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct mlli_params *mlli_params = &areq_ctx->mlli_params;
+ struct device *dev = drvdata_to_dev(drvdata);
+ struct buffer_array sg_data;
+ unsigned int authsize = areq_ctx->req_authsize;
+ struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
+ int rc = 0;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ bool is_gcm4543 = areq_ctx->is_gcm4543;
+ dma_addr_t dma_addr;
+ u32 mapped_nents = 0;
+ u32 dummy = 0; /*used for the assoc data fragments */
+ u32 size_to_map = 0;
+ gfp_t flags = cc_gfp_flags(&req->base);
+
+ mlli_params->curr_pool = NULL;
+ sg_data.num_of_buffers = 0;
+
+ /* copy mac to a temporary location to deal with possible
+ * data memory overriding that caused by cache coherence problem.
+ */
+ if (drvdata->coherent &&
+ areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
+ req->src == req->dst)
+ cc_copy_mac(dev, req, CC_SG_TO_BUF);
+
+ /* cacluate the size for cipher remove ICV in decrypt*/
+ areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
+ DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+ req->cryptlen :
+ (req->cryptlen - authsize);
+
+ dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
+ MAX_MAC_SIZE, areq_ctx->mac_buf);
+ rc = -ENOMEM;
+ goto aead_map_failure;
+ }
+ areq_ctx->mac_buf_dma_addr = dma_addr;
+
+ if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
+ void *addr = areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
+
+ dma_addr = dma_map_single(dev, addr, AES_BLOCK_SIZE,
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
+ AES_BLOCK_SIZE, addr);
+ areq_ctx->ccm_iv0_dma_addr = 0;
+ rc = -ENOMEM;
+ goto aead_map_failure;
+ }
+ areq_ctx->ccm_iv0_dma_addr = dma_addr;
+
+ if (cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
+ &sg_data, req->assoclen)) {
+ rc = -ENOMEM;
+ goto aead_map_failure;
+ }
+ }
+
+ if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
+ dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n",
+ AES_BLOCK_SIZE, areq_ctx->hkey);
+ rc = -ENOMEM;
+ goto aead_map_failure;
+ }
+ areq_ctx->hkey_dma_addr = dma_addr;
+
+ dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block,
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
+ AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
+ rc = -ENOMEM;
+ goto aead_map_failure;
+ }
+ areq_ctx->gcm_block_len_dma_addr = dma_addr;
+
+ dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1,
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
+
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
+ AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1));
+ areq_ctx->gcm_iv_inc1_dma_addr = 0;
+ rc = -ENOMEM;
+ goto aead_map_failure;
+ }
+ areq_ctx->gcm_iv_inc1_dma_addr = dma_addr;
+
+ dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2,
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
+
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
+ AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2));
+ areq_ctx->gcm_iv_inc2_dma_addr = 0;
+ rc = -ENOMEM;
+ goto aead_map_failure;
+ }
+ areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
+ }
+
+ size_to_map = req->cryptlen + req->assoclen;
+ if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
+ size_to_map += authsize;
+
+ if (is_gcm4543)
+ size_to_map += crypto_aead_ivsize(tfm);
+ rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
+ &areq_ctx->src.nents,
+ (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
+ LLI_MAX_NUM_OF_DATA_ENTRIES),
+ &dummy, &mapped_nents);
+ if (rc) {
+ rc = -ENOMEM;
+ goto aead_map_failure;
+ }
+
+ if (areq_ctx->is_single_pass) {
+ /*
+ * Create MLLI table for:
+ * (1) Assoc. data
+ * (2) Src/Dst SGLs
+ * Note: IV is contg. buffer (not an SGL)
+ */
+ rc = cc_aead_chain_assoc(drvdata, req, &sg_data, true, false);
+ if (rc)
+ goto aead_map_failure;
+ rc = cc_aead_chain_iv(drvdata, req, &sg_data, true, false);
+ if (rc)
+ goto aead_map_failure;
+ rc = cc_aead_chain_data(drvdata, req, &sg_data, true, false);
+ if (rc)
+ goto aead_map_failure;
+ } else { /* DOUBLE-PASS flow */
+ /*
+ * Prepare MLLI table(s) in this order:
+ *
+ * If ENCRYPT/DECRYPT (inplace):
+ * (1) MLLI table for assoc
+ * (2) IV entry (chained right after end of assoc)
+ * (3) MLLI for src/dst (inplace operation)
+ *
+ * If ENCRYPT (non-inplace)
+ * (1) MLLI table for assoc
+ * (2) IV entry (chained right after end of assoc)
+ * (3) MLLI for dst
+ * (4) MLLI for src
+ *
+ * If DECRYPT (non-inplace)
+ * (1) MLLI table for assoc
+ * (2) IV entry (chained right after end of assoc)
+ * (3) MLLI for src
+ * (4) MLLI for dst
+ */
+ rc = cc_aead_chain_assoc(drvdata, req, &sg_data, false, true);
+ if (rc)
+ goto aead_map_failure;
+ rc = cc_aead_chain_iv(drvdata, req, &sg_data, false, true);
+ if (rc)
+ goto aead_map_failure;
+ rc = cc_aead_chain_data(drvdata, req, &sg_data, true, true);
+ if (rc)
+ goto aead_map_failure;
+ }
+
+ /* Mlli support -start building the MLLI according to the above
+ * results
+ */
+ if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
+ areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
+ mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
+ rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
+ if (rc)
+ goto aead_map_failure;
+
+ cc_update_aead_mlli_nents(drvdata, req);
+ dev_dbg(dev, "assoc params mn %d\n",
+ areq_ctx->assoc.mlli_nents);
+ dev_dbg(dev, "src params mn %d\n", areq_ctx->src.mlli_nents);
+ dev_dbg(dev, "dst params mn %d\n", areq_ctx->dst.mlli_nents);
+ }
+ return 0;
+
+aead_map_failure:
+ cc_unmap_aead_request(dev, req);
+ return rc;
+}
+
+int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
+ struct scatterlist *src, unsigned int nbytes,
+ bool do_update, gfp_t flags)
+{
+ struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
+ struct device *dev = drvdata_to_dev(drvdata);
+ u8 *curr_buff = cc_hash_buf(areq_ctx);
+ u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
+ struct mlli_params *mlli_params = &areq_ctx->mlli_params;
+ struct buffer_array sg_data;
+ struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
+ u32 dummy = 0;
+ u32 mapped_nents = 0;
+
+ dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n",
+ curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
+ /* Init the type of the dma buffer */
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
+ mlli_params->curr_pool = NULL;
+ sg_data.num_of_buffers = 0;
+ areq_ctx->in_nents = 0;
+
+ if (nbytes == 0 && *curr_buff_cnt == 0) {
+ /* nothing to do */
+ return 0;
+ }
+
+ /*TODO: copy data in case that buffer is enough for operation */
+ /* map the previous buffer */
+ if (*curr_buff_cnt) {
+ if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
+ &sg_data)) {
+ return -ENOMEM;
+ }
+ }
+
+ if (src && nbytes > 0 && do_update) {
+ if (cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
+ &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
+ &dummy, &mapped_nents)) {
+ goto unmap_curr_buff;
+ }
+ if (src && mapped_nents == 1 &&
+ areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
+ memcpy(areq_ctx->buff_sg, src,
+ sizeof(struct scatterlist));
+ areq_ctx->buff_sg->length = nbytes;
+ areq_ctx->curr_sg = areq_ctx->buff_sg;
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
+ } else {
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
+ }
+ }
+
+ /*build mlli */
+ if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
+ mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
+ /* add the src data to the sg_data */
+ cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
+ 0, true, &areq_ctx->mlli_nents);
+ if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
+ goto fail_unmap_din;
+ }
+ /* change the buffer index for the unmap function */
+ areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
+ dev_dbg(dev, "areq_ctx->data_dma_buf_type = %s\n",
+ cc_dma_buf_type(areq_ctx->data_dma_buf_type));
+ return 0;
+
+fail_unmap_din:
+ dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
+
+unmap_curr_buff:
+ if (*curr_buff_cnt)
+ dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
+
+ return -ENOMEM;
+}
+
+int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
+ struct scatterlist *src, unsigned int nbytes,
+ unsigned int block_size, gfp_t flags)
+{
+ struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
+ struct device *dev = drvdata_to_dev(drvdata);
+ u8 *curr_buff = cc_hash_buf(areq_ctx);
+ u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
+ u8 *next_buff = cc_next_buf(areq_ctx);
+ u32 *next_buff_cnt = cc_next_buf_cnt(areq_ctx);
+ struct mlli_params *mlli_params = &areq_ctx->mlli_params;
+ unsigned int update_data_len;
+ u32 total_in_len = nbytes + *curr_buff_cnt;
+ struct buffer_array sg_data;
+ struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
+ unsigned int swap_index = 0;
+ u32 dummy = 0;
+ u32 mapped_nents = 0;
+
+ dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n",
+ curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
+ /* Init the type of the dma buffer */
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
+ mlli_params->curr_pool = NULL;
+ areq_ctx->curr_sg = NULL;
+ sg_data.num_of_buffers = 0;
+ areq_ctx->in_nents = 0;
+
+ if (total_in_len < block_size) {
+ dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
+ curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
+ areq_ctx->in_nents =
+ cc_get_sgl_nents(dev, src, nbytes, &dummy, NULL);
+ sg_copy_to_buffer(src, areq_ctx->in_nents,
+ &curr_buff[*curr_buff_cnt], nbytes);
+ *curr_buff_cnt += nbytes;
+ return 1;
+ }
+
+ /* Calculate the residue size*/
+ *next_buff_cnt = total_in_len & (block_size - 1);
+ /* update data len */
+ update_data_len = total_in_len - *next_buff_cnt;
+
+ dev_dbg(dev, " temp length : *next_buff_cnt=0x%X update_data_len=0x%X\n",
+ *next_buff_cnt, update_data_len);
+
+ /* Copy the new residue to next buffer */
+ if (*next_buff_cnt) {
+ dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n",
+ next_buff, (update_data_len - *curr_buff_cnt),
+ *next_buff_cnt);
+ cc_copy_sg_portion(dev, next_buff, src,
+ (update_data_len - *curr_buff_cnt),
+ nbytes, CC_SG_TO_BUF);
+ /* change the buffer index for next operation */
+ swap_index = 1;
+ }
+
+ if (*curr_buff_cnt) {
+ if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
+ &sg_data)) {
+ return -ENOMEM;
+ }
+ /* change the buffer index for next operation */
+ swap_index = 1;
+ }
+
+ if (update_data_len > *curr_buff_cnt) {
+ if (cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
+ DMA_TO_DEVICE, &areq_ctx->in_nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
+ &mapped_nents)) {
+ goto unmap_curr_buff;
+ }
+ if (mapped_nents == 1 &&
+ areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
+ /* only one entry in the SG and no previous data */
+ memcpy(areq_ctx->buff_sg, src,
+ sizeof(struct scatterlist));
+ areq_ctx->buff_sg->length = update_data_len;
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
+ areq_ctx->curr_sg = areq_ctx->buff_sg;
+ } else {
+ areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
+ }
+ }
+
+ if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
+ mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
+ /* add the src data to the sg_data */
+ cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
+ (update_data_len - *curr_buff_cnt), 0, true,
+ &areq_ctx->mlli_nents);
+ if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
+ goto fail_unmap_din;
+ }
+ areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
+
+ return 0;
+
+fail_unmap_din:
+ dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
+
+unmap_curr_buff:
+ if (*curr_buff_cnt)
+ dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
+
+ return -ENOMEM;
+}
+
+void cc_unmap_hash_request(struct device *dev, void *ctx,
+ struct scatterlist *src, bool do_revert)
+{
+ struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
+ u32 *prev_len = cc_next_buf_cnt(areq_ctx);
+
+ /*In case a pool was set, a table was
+ *allocated and should be released
+ */
+ if (areq_ctx->mlli_params.curr_pool) {
+ dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
+ &areq_ctx->mlli_params.mlli_dma_addr,
+ areq_ctx->mlli_params.mlli_virt_addr);
+ dma_pool_free(areq_ctx->mlli_params.curr_pool,
+ areq_ctx->mlli_params.mlli_virt_addr,
+ areq_ctx->mlli_params.mlli_dma_addr);
+ }
+
+ if (src && areq_ctx->in_nents) {
+ dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
+ sg_virt(src), &sg_dma_address(src), sg_dma_len(src));
+ dma_unmap_sg(dev, src,
+ areq_ctx->in_nents, DMA_TO_DEVICE);
+ }
+
+ if (*prev_len) {
+ dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
+ sg_virt(areq_ctx->buff_sg),
+ &sg_dma_address(areq_ctx->buff_sg),
+ sg_dma_len(areq_ctx->buff_sg));
+ dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
+ if (!do_revert) {
+ /* clean the previous data length for update
+ * operation
+ */
+ *prev_len = 0;
+ } else {
+ areq_ctx->buff_index ^= 1;
+ }
+ }
+}
+
+int cc_buffer_mgr_init(struct cc_drvdata *drvdata)
+{
+ struct buff_mgr_handle *buff_mgr_handle;
+ struct device *dev = drvdata_to_dev(drvdata);
+
+ buff_mgr_handle = kmalloc(sizeof(*buff_mgr_handle), GFP_KERNEL);
+ if (!buff_mgr_handle)
+ return -ENOMEM;
+
+ drvdata->buff_mgr_handle = buff_mgr_handle;
+
+ buff_mgr_handle->mlli_buffs_pool =
+ dma_pool_create("dx_single_mlli_tables", dev,
+ MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
+ LLI_ENTRY_BYTE_SIZE,
+ MLLI_TABLE_MIN_ALIGNMENT, 0);
+
+ if (!buff_mgr_handle->mlli_buffs_pool)
+ goto error;
+
+ return 0;
+
+error:
+ cc_buffer_mgr_fini(drvdata);
+ return -ENOMEM;
+}
+
+int cc_buffer_mgr_fini(struct cc_drvdata *drvdata)
+{
+ struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle;
+
+ if (buff_mgr_handle) {
+ dma_pool_destroy(buff_mgr_handle->mlli_buffs_pool);
+ kfree(drvdata->buff_mgr_handle);
+ drvdata->buff_mgr_handle = NULL;
+ }
+ return 0;
+}
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.h b/drivers/crypto/ccree/cc_buffer_mgr.h
new file mode 100644
index 000000000000..3ec4b4db5247
--- /dev/null
+++ b/drivers/crypto/ccree/cc_buffer_mgr.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+/* \file cc_buffer_mgr.h
+ * Buffer Manager
+ */
+
+#ifndef __CC_BUFFER_MGR_H__
+#define __CC_BUFFER_MGR_H__
+
+#include <crypto/algapi.h>
+
+#include "cc_driver.h"
+
+enum cc_req_dma_buf_type {
+ CC_DMA_BUF_NULL = 0,
+ CC_DMA_BUF_DLLI,
+ CC_DMA_BUF_MLLI
+};
+
+enum cc_sg_cpy_direct {
+ CC_SG_TO_BUF = 0,
+ CC_SG_FROM_BUF = 1
+};
+
+struct cc_mlli {
+ cc_sram_addr_t sram_addr;
+ unsigned int nents; //sg nents
+ unsigned int mlli_nents; //mlli nents might be different than the above
+};
+
+struct mlli_params {
+ struct dma_pool *curr_pool;
+ u8 *mlli_virt_addr;
+ dma_addr_t mlli_dma_addr;
+ u32 mlli_len;
+};
+
+int cc_buffer_mgr_init(struct cc_drvdata *drvdata);
+
+int cc_buffer_mgr_fini(struct cc_drvdata *drvdata);
+
+int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
+ unsigned int ivsize, unsigned int nbytes,
+ void *info, struct scatterlist *src,
+ struct scatterlist *dst, gfp_t flags);
+
+void cc_unmap_cipher_request(struct device *dev, void *ctx, unsigned int ivsize,
+ struct scatterlist *src, struct scatterlist *dst);
+
+int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req);
+
+void cc_unmap_aead_request(struct device *dev, struct aead_request *req);
+
+int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
+ struct scatterlist *src, unsigned int nbytes,
+ bool do_update, gfp_t flags);
+
+int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
+ struct scatterlist *src, unsigned int nbytes,
+ unsigned int block_size, gfp_t flags);
+
+void cc_unmap_hash_request(struct device *dev, void *ctx,
+ struct scatterlist *src, bool do_revert);
+
+void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
+ u32 to_skip, u32 end, enum cc_sg_cpy_direct direct);
+
+void cc_zero_sgl(struct scatterlist *sgl, u32 data_len);
+
+#endif /*__BUFFER_MGR_H__*/
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
new file mode 100644
index 000000000000..df98f7afe645
--- /dev/null
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -0,0 +1,1150 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/des.h>
+#include <crypto/xts.h>
+#include <crypto/scatterwalk.h>
+
+#include "cc_driver.h"
+#include "cc_lli_defs.h"
+#include "cc_buffer_mgr.h"
+#include "cc_cipher.h"
+#include "cc_request_mgr.h"
+
+#define MAX_ABLKCIPHER_SEQ_LEN 6
+
+#define template_skcipher template_u.skcipher
+
+#define CC_MIN_AES_XTS_SIZE 0x10
+#define CC_MAX_AES_XTS_SIZE 0x2000
+struct cc_cipher_handle {
+ struct list_head alg_list;
+};
+
+struct cc_user_key_info {
+ u8 *key;
+ dma_addr_t key_dma_addr;
+};
+
+struct cc_hw_key_info {
+ enum cc_hw_crypto_key key1_slot;
+ enum cc_hw_crypto_key key2_slot;
+};
+
+struct cc_cipher_ctx {
+ struct cc_drvdata *drvdata;
+ int keylen;
+ int key_round_number;
+ int cipher_mode;
+ int flow_mode;
+ unsigned int flags;
+ struct cc_user_key_info user;
+ struct cc_hw_key_info hw;
+ struct crypto_shash *shash_tfm;
+};
+
+static void cc_cipher_complete(struct device *dev, void *cc_req, int err);
+
+static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size)
+{
+ switch (ctx_p->flow_mode) {
+ case S_DIN_to_AES:
+ switch (size) {
+ case CC_AES_128_BIT_KEY_SIZE:
+ case CC_AES_192_BIT_KEY_SIZE:
+ if (ctx_p->cipher_mode != DRV_CIPHER_XTS &&
+ ctx_p->cipher_mode != DRV_CIPHER_ESSIV &&
+ ctx_p->cipher_mode != DRV_CIPHER_BITLOCKER)
+ return 0;
+ break;
+ case CC_AES_256_BIT_KEY_SIZE:
+ return 0;
+ case (CC_AES_192_BIT_KEY_SIZE * 2):
+ case (CC_AES_256_BIT_KEY_SIZE * 2):
+ if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
+ ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
+ ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER)
+ return 0;
+ break;
+ default:
+ break;
+ }
+ case S_DIN_to_DES:
+ if (size == DES3_EDE_KEY_SIZE || size == DES_KEY_SIZE)
+ return 0;
+ break;
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static int validate_data_size(struct cc_cipher_ctx *ctx_p,
+ unsigned int size)
+{
+ switch (ctx_p->flow_mode) {
+ case S_DIN_to_AES:
+ switch (ctx_p->cipher_mode) {
+ case DRV_CIPHER_XTS:
+ if (size >= CC_MIN_AES_XTS_SIZE &&
+ size <= CC_MAX_AES_XTS_SIZE &&
+ IS_ALIGNED(size, AES_BLOCK_SIZE))
+ return 0;
+ break;
+ case DRV_CIPHER_CBC_CTS:
+ if (size >= AES_BLOCK_SIZE)
+ return 0;
+ break;
+ case DRV_CIPHER_OFB:
+ case DRV_CIPHER_CTR:
+ return 0;
+ case DRV_CIPHER_ECB:
+ case DRV_CIPHER_CBC:
+ case DRV_CIPHER_ESSIV:
+ case DRV_CIPHER_BITLOCKER:
+ if (IS_ALIGNED(size, AES_BLOCK_SIZE))
+ return 0;
+ break;
+ default:
+ break;
+ }
+ break;
+ case S_DIN_to_DES:
+ if (IS_ALIGNED(size, DES_BLOCK_SIZE))
+ return 0;
+ break;
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static int cc_cipher_init(struct crypto_tfm *tfm)
+{
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct cc_crypto_alg *cc_alg =
+ container_of(tfm->__crt_alg, struct cc_crypto_alg,
+ skcipher_alg.base);
+ struct device *dev = drvdata_to_dev(cc_alg->drvdata);
+ unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize;
+ int rc = 0;
+
+ dev_dbg(dev, "Initializing context @%p for %s\n", ctx_p,
+ crypto_tfm_alg_name(tfm));
+
+ crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
+ sizeof(struct cipher_req_ctx));
+
+ ctx_p->cipher_mode = cc_alg->cipher_mode;
+ ctx_p->flow_mode = cc_alg->flow_mode;
+ ctx_p->drvdata = cc_alg->drvdata;
+
+ /* Allocate key buffer, cache line aligned */
+ ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL);
+ if (!ctx_p->user.key)
+ return -ENOMEM;
+
+ dev_dbg(dev, "Allocated key buffer in context. key=@%p\n",
+ ctx_p->user.key);
+
+ /* Map key buffer */
+ ctx_p->user.key_dma_addr = dma_map_single(dev, (void *)ctx_p->user.key,
+ max_key_buf_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) {
+ dev_err(dev, "Mapping Key %u B at va=%pK for DMA failed\n",
+ max_key_buf_size, ctx_p->user.key);
+ return -ENOMEM;
+ }
+ dev_dbg(dev, "Mapped key %u B at va=%pK to dma=%pad\n",
+ max_key_buf_size, ctx_p->user.key, &ctx_p->user.key_dma_addr);
+
+ if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
+ /* Alloc hash tfm for essiv */
+ ctx_p->shash_tfm = crypto_alloc_shash("sha256-generic", 0, 0);
+ if (IS_ERR(ctx_p->shash_tfm)) {
+ dev_err(dev, "Error allocating hash tfm for ESSIV.\n");
+ return PTR_ERR(ctx_p->shash_tfm);
+ }
+ }
+
+ return rc;
+}
+
+static void cc_cipher_exit(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct cc_crypto_alg *cc_alg =
+ container_of(alg, struct cc_crypto_alg,
+ skcipher_alg.base);
+ unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize;
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+
+ dev_dbg(dev, "Clearing context @%p for %s\n",
+ crypto_tfm_ctx(tfm), crypto_tfm_alg_name(tfm));
+
+ if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
+ /* Free hash tfm for essiv */
+ crypto_free_shash(ctx_p->shash_tfm);
+ ctx_p->shash_tfm = NULL;
+ }
+
+ /* Unmap key buffer */
+ dma_unmap_single(dev, ctx_p->user.key_dma_addr, max_key_buf_size,
+ DMA_TO_DEVICE);
+ dev_dbg(dev, "Unmapped key buffer key_dma_addr=%pad\n",
+ &ctx_p->user.key_dma_addr);
+
+ /* Free key buffer in context */
+ kzfree(ctx_p->user.key);
+ dev_dbg(dev, "Free key buffer in context. key=@%p\n", ctx_p->user.key);
+}
+
+struct tdes_keys {
+ u8 key1[DES_KEY_SIZE];
+ u8 key2[DES_KEY_SIZE];
+ u8 key3[DES_KEY_SIZE];
+};
+
+static enum cc_hw_crypto_key hw_key_to_cc_hw_key(int slot_num)
+{
+ switch (slot_num) {
+ case 0:
+ return KFDE0_KEY;
+ case 1:
+ return KFDE1_KEY;
+ case 2:
+ return KFDE2_KEY;
+ case 3:
+ return KFDE3_KEY;
+ }
+ return END_OF_KEYS;
+}
+
+static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(sktfm);
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+ u32 tmp[DES3_EDE_EXPKEY_WORDS];
+ struct cc_crypto_alg *cc_alg =
+ container_of(tfm->__crt_alg, struct cc_crypto_alg,
+ skcipher_alg.base);
+ unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize;
+
+ dev_dbg(dev, "Setting key in context @%p for %s. keylen=%u\n",
+ ctx_p, crypto_tfm_alg_name(tfm), keylen);
+ dump_byte_array("key", (u8 *)key, keylen);
+
+ /* STAT_PHASE_0: Init and sanity checks */
+
+ if (validate_keys_sizes(ctx_p, keylen)) {
+ dev_err(dev, "Unsupported key size %d.\n", keylen);
+ crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ if (cc_is_hw_key(tfm)) {
+ /* setting HW key slots */
+ struct arm_hw_key_info *hki = (struct arm_hw_key_info *)key;
+
+ if (ctx_p->flow_mode != S_DIN_to_AES) {
+ dev_err(dev, "HW key not supported for non-AES flows\n");
+ return -EINVAL;
+ }
+
+ ctx_p->hw.key1_slot = hw_key_to_cc_hw_key(hki->hw_key1);
+ if (ctx_p->hw.key1_slot == END_OF_KEYS) {
+ dev_err(dev, "Unsupported hw key1 number (%d)\n",
+ hki->hw_key1);
+ return -EINVAL;
+ }
+
+ if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
+ ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
+ ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) {
+ if (hki->hw_key1 == hki->hw_key2) {
+ dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
+ hki->hw_key1, hki->hw_key2);
+ return -EINVAL;
+ }
+ ctx_p->hw.key2_slot =
+ hw_key_to_cc_hw_key(hki->hw_key2);
+ if (ctx_p->hw.key2_slot == END_OF_KEYS) {
+ dev_err(dev, "Unsupported hw key2 number (%d)\n",
+ hki->hw_key2);
+ return -EINVAL;
+ }
+ }
+
+ ctx_p->keylen = keylen;
+ dev_dbg(dev, "cc_is_hw_key ret 0");
+
+ return 0;
+ }
+
+ /*
+ * Verify DES weak keys
+ * Note that we're dropping the expanded key since the
+ * HW does the expansion on its own.
+ */
+ if (ctx_p->flow_mode == S_DIN_to_DES) {
+ if (keylen == DES3_EDE_KEY_SIZE &&
+ __des3_ede_setkey(tmp, &tfm->crt_flags, key,
+ DES3_EDE_KEY_SIZE)) {
+ dev_dbg(dev, "weak 3DES key");
+ return -EINVAL;
+ } else if (!des_ekey(tmp, key) &&
+ (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ dev_dbg(dev, "weak DES key");
+ return -EINVAL;
+ }
+ }
+
+ if (ctx_p->cipher_mode == DRV_CIPHER_XTS &&
+ xts_check_key(tfm, key, keylen)) {
+ dev_dbg(dev, "weak XTS key");
+ return -EINVAL;
+ }
+
+ /* STAT_PHASE_1: Copy key to ctx */
+ dma_sync_single_for_cpu(dev, ctx_p->user.key_dma_addr,
+ max_key_buf_size, DMA_TO_DEVICE);
+
+ memcpy(ctx_p->user.key, key, keylen);
+ if (keylen == 24)
+ memset(ctx_p->user.key + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
+
+ if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
+ /* sha256 for key2 - use sw implementation */
+ int key_len = keylen >> 1;
+ int err;
+
+ SHASH_DESC_ON_STACK(desc, ctx_p->shash_tfm);
+
+ desc->tfm = ctx_p->shash_tfm;
+
+ err = crypto_shash_digest(desc, ctx_p->user.key, key_len,
+ ctx_p->user.key + key_len);
+ if (err) {
+ dev_err(dev, "Failed to hash ESSIV key.\n");
+ return err;
+ }
+ }
+ dma_sync_single_for_device(dev, ctx_p->user.key_dma_addr,
+ max_key_buf_size, DMA_TO_DEVICE);
+ ctx_p->keylen = keylen;
+
+ dev_dbg(dev, "return safely");
+ return 0;
+}
+
+static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
+ struct cipher_req_ctx *req_ctx,
+ unsigned int ivsize, unsigned int nbytes,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+ int cipher_mode = ctx_p->cipher_mode;
+ int flow_mode = ctx_p->flow_mode;
+ int direction = req_ctx->gen_ctx.op_type;
+ dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
+ unsigned int key_len = ctx_p->keylen;
+ dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
+ unsigned int du_size = nbytes;
+
+ struct cc_crypto_alg *cc_alg =
+ container_of(tfm->__crt_alg, struct cc_crypto_alg,
+ skcipher_alg.base);
+
+ if (cc_alg->data_unit)
+ du_size = cc_alg->data_unit;
+
+ switch (cipher_mode) {
+ case DRV_CIPHER_CBC:
+ case DRV_CIPHER_CBC_CTS:
+ case DRV_CIPHER_CTR:
+ case DRV_CIPHER_OFB:
+ /* Load cipher state */
+ hw_desc_init(&desc[*seq_size]);
+ set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr, ivsize,
+ NS_BIT);
+ set_cipher_config0(&desc[*seq_size], direction);
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ if (cipher_mode == DRV_CIPHER_CTR ||
+ cipher_mode == DRV_CIPHER_OFB) {
+ set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
+ } else {
+ set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE0);
+ }
+ (*seq_size)++;
+ /*FALLTHROUGH*/
+ case DRV_CIPHER_ECB:
+ /* Load key */
+ hw_desc_init(&desc[*seq_size]);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ set_cipher_config0(&desc[*seq_size], direction);
+ if (flow_mode == S_DIN_to_AES) {
+ if (cc_is_hw_key(tfm)) {
+ set_hw_crypto_key(&desc[*seq_size],
+ ctx_p->hw.key1_slot);
+ } else {
+ set_din_type(&desc[*seq_size], DMA_DLLI,
+ key_dma_addr, ((key_len == 24) ?
+ AES_MAX_KEY_SIZE :
+ key_len), NS_BIT);
+ }
+ set_key_size_aes(&desc[*seq_size], key_len);
+ } else {
+ /*des*/
+ set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
+ key_len, NS_BIT);
+ set_key_size_des(&desc[*seq_size], key_len);
+ }
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
+ (*seq_size)++;
+ break;
+ case DRV_CIPHER_XTS:
+ case DRV_CIPHER_ESSIV:
+ case DRV_CIPHER_BITLOCKER:
+ /* Load AES key */
+ hw_desc_init(&desc[*seq_size]);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ set_cipher_config0(&desc[*seq_size], direction);
+ if (cc_is_hw_key(tfm)) {
+ set_hw_crypto_key(&desc[*seq_size],
+ ctx_p->hw.key1_slot);
+ } else {
+ set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
+ (key_len / 2), NS_BIT);
+ }
+ set_key_size_aes(&desc[*seq_size], (key_len / 2));
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
+ (*seq_size)++;
+
+ /* load XEX key */
+ hw_desc_init(&desc[*seq_size]);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ set_cipher_config0(&desc[*seq_size], direction);
+ if (cc_is_hw_key(tfm)) {
+ set_hw_crypto_key(&desc[*seq_size],
+ ctx_p->hw.key2_slot);
+ } else {
+ set_din_type(&desc[*seq_size], DMA_DLLI,
+ (key_dma_addr + (key_len / 2)),
+ (key_len / 2), NS_BIT);
+ }
+ set_xex_data_unit_size(&desc[*seq_size], du_size);
+ set_flow_mode(&desc[*seq_size], S_DIN_to_AES2);
+ set_key_size_aes(&desc[*seq_size], (key_len / 2));
+ set_setup_mode(&desc[*seq_size], SETUP_LOAD_XEX_KEY);
+ (*seq_size)++;
+
+ /* Set state */
+ hw_desc_init(&desc[*seq_size]);
+ set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ set_cipher_config0(&desc[*seq_size], direction);
+ set_key_size_aes(&desc[*seq_size], (key_len / 2));
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr,
+ CC_AES_BLOCK_SIZE, NS_BIT);
+ (*seq_size)++;
+ break;
+ default:
+ dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode);
+ }
+}
+
+static void cc_setup_cipher_data(struct crypto_tfm *tfm,
+ struct cipher_req_ctx *req_ctx,
+ struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes,
+ void *areq, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+ unsigned int flow_mode = ctx_p->flow_mode;
+
+ switch (ctx_p->flow_mode) {
+ case S_DIN_to_AES:
+ flow_mode = DIN_AES_DOUT;
+ break;
+ case S_DIN_to_DES:
+ flow_mode = DIN_DES_DOUT;
+ break;
+ default:
+ dev_err(dev, "invalid flow mode, flow_mode = %d\n", flow_mode);
+ return;
+ }
+ /* Process */
+ if (req_ctx->dma_buf_type == CC_DMA_BUF_DLLI) {
+ dev_dbg(dev, " data params addr %pad length 0x%X\n",
+ &sg_dma_address(src), nbytes);
+ dev_dbg(dev, " data params addr %pad length 0x%X\n",
+ &sg_dma_address(dst), nbytes);
+ hw_desc_init(&desc[*seq_size]);
+ set_din_type(&desc[*seq_size], DMA_DLLI, sg_dma_address(src),
+ nbytes, NS_BIT);
+ set_dout_dlli(&desc[*seq_size], sg_dma_address(dst),
+ nbytes, NS_BIT, (!areq ? 0 : 1));
+ if (areq)
+ set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
+
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ (*seq_size)++;
+ } else {
+ /* bypass */
+ dev_dbg(dev, " bypass params addr %pad length 0x%X addr 0x%08X\n",
+ &req_ctx->mlli_params.mlli_dma_addr,
+ req_ctx->mlli_params.mlli_len,
+ (unsigned int)ctx_p->drvdata->mlli_sram_addr);
+ hw_desc_init(&desc[*seq_size]);
+ set_din_type(&desc[*seq_size], DMA_DLLI,
+ req_ctx->mlli_params.mlli_dma_addr,
+ req_ctx->mlli_params.mlli_len, NS_BIT);
+ set_dout_sram(&desc[*seq_size],
+ ctx_p->drvdata->mlli_sram_addr,
+ req_ctx->mlli_params.mlli_len);
+ set_flow_mode(&desc[*seq_size], BYPASS);
+ (*seq_size)++;
+
+ hw_desc_init(&desc[*seq_size]);
+ set_din_type(&desc[*seq_size], DMA_MLLI,
+ ctx_p->drvdata->mlli_sram_addr,
+ req_ctx->in_mlli_nents, NS_BIT);
+ if (req_ctx->out_nents == 0) {
+ dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n",
+ (unsigned int)ctx_p->drvdata->mlli_sram_addr,
+ (unsigned int)ctx_p->drvdata->mlli_sram_addr);
+ set_dout_mlli(&desc[*seq_size],
+ ctx_p->drvdata->mlli_sram_addr,
+ req_ctx->in_mlli_nents, NS_BIT,
+ (!areq ? 0 : 1));
+ } else {
+ dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n",
+ (unsigned int)ctx_p->drvdata->mlli_sram_addr,
+ (unsigned int)ctx_p->drvdata->mlli_sram_addr +
+ (u32)LLI_ENTRY_BYTE_SIZE * req_ctx->in_nents);
+ set_dout_mlli(&desc[*seq_size],
+ (ctx_p->drvdata->mlli_sram_addr +
+ (LLI_ENTRY_BYTE_SIZE *
+ req_ctx->in_mlli_nents)),
+ req_ctx->out_mlli_nents, NS_BIT,
+ (!areq ? 0 : 1));
+ }
+ if (areq)
+ set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
+
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ (*seq_size)++;
+ }
+}
+
+static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
+{
+ struct skcipher_request *req = (struct skcipher_request *)cc_req;
+ struct scatterlist *dst = req->dst;
+ struct scatterlist *src = req->src;
+ struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+
+ cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
+ kzfree(req_ctx->iv);
+
+ /*
+ * The crypto API expects us to set the req->iv to the last
+ * ciphertext block. For encrypt, simply copy from the result.
+ * For decrypt, we must copy from a saved buffer since this
+ * could be an in-place decryption operation and the src is
+ * lost by this point.
+ */
+ if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ memcpy(req->iv, req_ctx->backup_info, ivsize);
+ kzfree(req_ctx->backup_info);
+ } else if (!err) {
+ scatterwalk_map_and_copy(req->iv, req->dst,
+ (req->cryptlen - ivsize),
+ ivsize, 0);
+ }
+
+ skcipher_request_complete(req, err);
+}
+
+static int cc_cipher_process(struct skcipher_request *req,
+ enum drv_crypto_direction direction)
+{
+ struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
+ struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
+ unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
+ struct scatterlist *dst = req->dst;
+ struct scatterlist *src = req->src;
+ unsigned int nbytes = req->cryptlen;
+ void *iv = req->iv;
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+ struct cc_hw_desc desc[MAX_ABLKCIPHER_SEQ_LEN];
+ struct cc_crypto_req cc_req = {};
+ int rc, cts_restore_flag = 0;
+ unsigned int seq_len = 0;
+ gfp_t flags = cc_gfp_flags(&req->base);
+
+ dev_dbg(dev, "%s req=%p iv=%p nbytes=%d\n",
+ ((direction == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+ "Encrypt" : "Decrypt"), req, iv, nbytes);
+
+ /* STAT_PHASE_0: Init and sanity checks */
+
+ /* TODO: check data length according to mode */
+ if (validate_data_size(ctx_p, nbytes)) {
+ dev_err(dev, "Unsupported data size %d.\n", nbytes);
+ crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
+ rc = -EINVAL;
+ goto exit_process;
+ }
+ if (nbytes == 0) {
+ /* No data to process is valid */
+ rc = 0;
+ goto exit_process;
+ }
+
+ /* The IV we are handed may be allocted from the stack so
+ * we must copy it to a DMAable buffer before use.
+ */
+ req_ctx->iv = kmemdup(iv, ivsize, flags);
+ if (!req_ctx->iv) {
+ rc = -ENOMEM;
+ goto exit_process;
+ }
+
+ /*For CTS in case of data size aligned to 16 use CBC mode*/
+ if (((nbytes % AES_BLOCK_SIZE) == 0) &&
+ ctx_p->cipher_mode == DRV_CIPHER_CBC_CTS) {
+ ctx_p->cipher_mode = DRV_CIPHER_CBC;
+ cts_restore_flag = 1;
+ }
+
+ /* Setup request structure */
+ cc_req.user_cb = (void *)cc_cipher_complete;
+ cc_req.user_arg = (void *)req;
+
+#ifdef ENABLE_CYCLE_COUNT
+ cc_req.op_type = (direction == DRV_CRYPTO_DIRECTION_DECRYPT) ?
+ STAT_OP_TYPE_DECODE : STAT_OP_TYPE_ENCODE;
+
+#endif
+
+ /* Setup request context */
+ req_ctx->gen_ctx.op_type = direction;
+
+ /* STAT_PHASE_1: Map buffers */
+
+ rc = cc_map_cipher_request(ctx_p->drvdata, req_ctx, ivsize, nbytes,
+ req_ctx->iv, src, dst, flags);
+ if (rc) {
+ dev_err(dev, "map_request() failed\n");
+ goto exit_process;
+ }
+
+ /* STAT_PHASE_2: Create sequence */
+
+ /* Setup processing */
+ cc_setup_cipher_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
+ /* Data processing */
+ cc_setup_cipher_data(tfm, req_ctx, dst, src, nbytes, req, desc,
+ &seq_len);
+
+ /* do we need to generate IV? */
+ if (req_ctx->is_giv) {
+ cc_req.ivgen_dma_addr[0] = req_ctx->gen_ctx.iv_dma_addr;
+ cc_req.ivgen_dma_addr_len = 1;
+ /* set the IV size (8/16 B long)*/
+ cc_req.ivgen_size = ivsize;
+ }
+
+ /* STAT_PHASE_3: Lock HW and push sequence */
+
+ rc = cc_send_request(ctx_p->drvdata, &cc_req, desc, seq_len,
+ &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ /* Failed to send the request or request completed
+ * synchronously
+ */
+ cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
+ }
+
+exit_process:
+ if (cts_restore_flag)
+ ctx_p->cipher_mode = DRV_CIPHER_CBC_CTS;
+
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ kzfree(req_ctx->backup_info);
+ kzfree(req_ctx->iv);
+ }
+
+ return rc;
+}
+
+static int cc_cipher_encrypt(struct skcipher_request *req)
+{
+ struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
+
+ req_ctx->is_giv = false;
+ req_ctx->backup_info = NULL;
+
+ return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+}
+
+static int cc_cipher_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
+ struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
+ unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
+ gfp_t flags = cc_gfp_flags(&req->base);
+
+ /*
+ * Allocate and save the last IV sized bytes of the source, which will
+ * be lost in case of in-place decryption and might be needed for CTS.
+ */
+ req_ctx->backup_info = kmalloc(ivsize, flags);
+ if (!req_ctx->backup_info)
+ return -ENOMEM;
+
+ scatterwalk_map_and_copy(req_ctx->backup_info, req->src,
+ (req->cryptlen - ivsize), ivsize, 0);
+ req_ctx->is_giv = false;
+
+ return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+}
+
+/* Block cipher alg */
+static const struct cc_alg_template skcipher_algs[] = {
+ {
+ .name = "xts(aes)",
+ .driver_name = "xts-aes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE * 2,
+ .max_keysize = AES_MAX_KEY_SIZE * 2,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_XTS,
+ .flow_mode = S_DIN_to_AES,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "xts512(aes)",
+ .driver_name = "xts-aes-du512-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE * 2,
+ .max_keysize = AES_MAX_KEY_SIZE * 2,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_XTS,
+ .flow_mode = S_DIN_to_AES,
+ .data_unit = 512,
+ .min_hw_rev = CC_HW_REV_712,
+ },
+ {
+ .name = "xts4096(aes)",
+ .driver_name = "xts-aes-du4096-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE * 2,
+ .max_keysize = AES_MAX_KEY_SIZE * 2,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_XTS,
+ .flow_mode = S_DIN_to_AES,
+ .data_unit = 4096,
+ .min_hw_rev = CC_HW_REV_712,
+ },
+ {
+ .name = "essiv(aes)",
+ .driver_name = "essiv-aes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE * 2,
+ .max_keysize = AES_MAX_KEY_SIZE * 2,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_ESSIV,
+ .flow_mode = S_DIN_to_AES,
+ .min_hw_rev = CC_HW_REV_712,
+ },
+ {
+ .name = "essiv512(aes)",
+ .driver_name = "essiv-aes-du512-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE * 2,
+ .max_keysize = AES_MAX_KEY_SIZE * 2,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_ESSIV,
+ .flow_mode = S_DIN_to_AES,
+ .data_unit = 512,
+ .min_hw_rev = CC_HW_REV_712,
+ },
+ {
+ .name = "essiv4096(aes)",
+ .driver_name = "essiv-aes-du4096-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE * 2,
+ .max_keysize = AES_MAX_KEY_SIZE * 2,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_ESSIV,
+ .flow_mode = S_DIN_to_AES,
+ .data_unit = 4096,
+ .min_hw_rev = CC_HW_REV_712,
+ },
+ {
+ .name = "bitlocker(aes)",
+ .driver_name = "bitlocker-aes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE * 2,
+ .max_keysize = AES_MAX_KEY_SIZE * 2,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_BITLOCKER,
+ .flow_mode = S_DIN_to_AES,
+ .min_hw_rev = CC_HW_REV_712,
+ },
+ {
+ .name = "bitlocker512(aes)",
+ .driver_name = "bitlocker-aes-du512-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE * 2,
+ .max_keysize = AES_MAX_KEY_SIZE * 2,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_BITLOCKER,
+ .flow_mode = S_DIN_to_AES,
+ .data_unit = 512,
+ .min_hw_rev = CC_HW_REV_712,
+ },
+ {
+ .name = "bitlocker4096(aes)",
+ .driver_name = "bitlocker-aes-du4096-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE * 2,
+ .max_keysize = AES_MAX_KEY_SIZE * 2,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_BITLOCKER,
+ .flow_mode = S_DIN_to_AES,
+ .data_unit = 4096,
+ .min_hw_rev = CC_HW_REV_712,
+ },
+ {
+ .name = "ecb(aes)",
+ .driver_name = "ecb-aes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = 0,
+ },
+ .cipher_mode = DRV_CIPHER_ECB,
+ .flow_mode = S_DIN_to_AES,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "cbc(aes)",
+ .driver_name = "cbc-aes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_AES,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "ofb(aes)",
+ .driver_name = "ofb-aes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_OFB,
+ .flow_mode = S_DIN_to_AES,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "cts1(cbc(aes))",
+ .driver_name = "cts1-cbc-aes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC_CTS,
+ .flow_mode = S_DIN_to_AES,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "ctr(aes)",
+ .driver_name = "ctr-aes-ccree",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CTR,
+ .flow_mode = S_DIN_to_AES,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "cbc(des3_ede)",
+ .driver_name = "cbc-3des-ccree",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_DES,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "ecb(des3_ede)",
+ .driver_name = "ecb-3des-ccree",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = 0,
+ },
+ .cipher_mode = DRV_CIPHER_ECB,
+ .flow_mode = S_DIN_to_DES,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "cbc(des)",
+ .driver_name = "cbc-des-ccree",
+ .blocksize = DES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_DES,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "ecb(des)",
+ .driver_name = "ecb-des-ccree",
+ .blocksize = DES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = 0,
+ },
+ .cipher_mode = DRV_CIPHER_ECB,
+ .flow_mode = S_DIN_to_DES,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+};
+
+static struct cc_crypto_alg *cc_create_alg(const struct cc_alg_template *tmpl,
+ struct device *dev)
+{
+ struct cc_crypto_alg *t_alg;
+ struct skcipher_alg *alg;
+
+ t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
+ if (!t_alg)
+ return ERR_PTR(-ENOMEM);
+
+ alg = &t_alg->skcipher_alg;
+
+ memcpy(alg, &tmpl->template_skcipher, sizeof(*alg));
+
+ snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
+ snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ tmpl->driver_name);
+ alg->base.cra_module = THIS_MODULE;
+ alg->base.cra_priority = CC_CRA_PRIO;
+ alg->base.cra_blocksize = tmpl->blocksize;
+ alg->base.cra_alignmask = 0;
+ alg->base.cra_ctxsize = sizeof(struct cc_cipher_ctx);
+
+ alg->base.cra_init = cc_cipher_init;
+ alg->base.cra_exit = cc_cipher_exit;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_TYPE_SKCIPHER;
+
+ t_alg->cipher_mode = tmpl->cipher_mode;
+ t_alg->flow_mode = tmpl->flow_mode;
+ t_alg->data_unit = tmpl->data_unit;
+
+ return t_alg;
+}
+
+int cc_cipher_free(struct cc_drvdata *drvdata)
+{
+ struct cc_crypto_alg *t_alg, *n;
+ struct cc_cipher_handle *cipher_handle = drvdata->cipher_handle;
+
+ if (cipher_handle) {
+ /* Remove registered algs */
+ list_for_each_entry_safe(t_alg, n, &cipher_handle->alg_list,
+ entry) {
+ crypto_unregister_skcipher(&t_alg->skcipher_alg);
+ list_del(&t_alg->entry);
+ kfree(t_alg);
+ }
+ kfree(cipher_handle);
+ drvdata->cipher_handle = NULL;
+ }
+ return 0;
+}
+
+int cc_cipher_alloc(struct cc_drvdata *drvdata)
+{
+ struct cc_cipher_handle *cipher_handle;
+ struct cc_crypto_alg *t_alg;
+ struct device *dev = drvdata_to_dev(drvdata);
+ int rc = -ENOMEM;
+ int alg;
+
+ cipher_handle = kmalloc(sizeof(*cipher_handle), GFP_KERNEL);
+ if (!cipher_handle)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&cipher_handle->alg_list);
+ drvdata->cipher_handle = cipher_handle;
+
+ /* Linux crypto */
+ dev_dbg(dev, "Number of algorithms = %zu\n",
+ ARRAY_SIZE(skcipher_algs));
+ for (alg = 0; alg < ARRAY_SIZE(skcipher_algs); alg++) {
+ if (skcipher_algs[alg].min_hw_rev > drvdata->hw_rev)
+ continue;
+
+ dev_dbg(dev, "creating %s\n", skcipher_algs[alg].driver_name);
+ t_alg = cc_create_alg(&skcipher_algs[alg], dev);
+ if (IS_ERR(t_alg)) {
+ rc = PTR_ERR(t_alg);
+ dev_err(dev, "%s alg allocation failed\n",
+ skcipher_algs[alg].driver_name);
+ goto fail0;
+ }
+ t_alg->drvdata = drvdata;
+
+ dev_dbg(dev, "registering %s\n",
+ skcipher_algs[alg].driver_name);
+ rc = crypto_register_skcipher(&t_alg->skcipher_alg);
+ dev_dbg(dev, "%s alg registration rc = %x\n",
+ t_alg->skcipher_alg.base.cra_driver_name, rc);
+ if (rc) {
+ dev_err(dev, "%s alg registration failed\n",
+ t_alg->skcipher_alg.base.cra_driver_name);
+ kfree(t_alg);
+ goto fail0;
+ } else {
+ list_add_tail(&t_alg->entry,
+ &cipher_handle->alg_list);
+ dev_dbg(dev, "Registered %s\n",
+ t_alg->skcipher_alg.base.cra_driver_name);
+ }
+ }
+ return 0;
+
+fail0:
+ cc_cipher_free(drvdata);
+ return rc;
+}
diff --git a/drivers/crypto/ccree/cc_cipher.h b/drivers/crypto/ccree/cc_cipher.h
new file mode 100644
index 000000000000..2a2a6f46c515
--- /dev/null
+++ b/drivers/crypto/ccree/cc_cipher.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+/* \file cc_cipher.h
+ * ARM CryptoCell Cipher Crypto API
+ */
+
+#ifndef __CC_CIPHER_H__
+#define __CC_CIPHER_H__
+
+#include <linux/kernel.h>
+#include <crypto/algapi.h>
+#include "cc_driver.h"
+#include "cc_buffer_mgr.h"
+
+/* Crypto cipher flags */
+#define CC_CRYPTO_CIPHER_KEY_KFDE0 BIT(0)
+#define CC_CRYPTO_CIPHER_KEY_KFDE1 BIT(1)
+#define CC_CRYPTO_CIPHER_KEY_KFDE2 BIT(2)
+#define CC_CRYPTO_CIPHER_KEY_KFDE3 BIT(3)
+#define CC_CRYPTO_CIPHER_DU_SIZE_512B BIT(4)
+
+#define CC_CRYPTO_CIPHER_KEY_KFDE_MASK (CC_CRYPTO_CIPHER_KEY_KFDE0 | \
+ CC_CRYPTO_CIPHER_KEY_KFDE1 | \
+ CC_CRYPTO_CIPHER_KEY_KFDE2 | \
+ CC_CRYPTO_CIPHER_KEY_KFDE3)
+
+struct cipher_req_ctx {
+ struct async_gen_req_ctx gen_ctx;
+ enum cc_req_dma_buf_type dma_buf_type;
+ u32 in_nents;
+ u32 in_mlli_nents;
+ u32 out_nents;
+ u32 out_mlli_nents;
+ u8 *backup_info; /*store iv for generated IV flow*/
+ u8 *iv;
+ bool is_giv;
+ struct mlli_params mlli_params;
+};
+
+int cc_cipher_alloc(struct cc_drvdata *drvdata);
+
+int cc_cipher_free(struct cc_drvdata *drvdata);
+
+struct arm_hw_key_info {
+ int hw_key1;
+ int hw_key2;
+};
+
+/*
+ * This is a stub function that will replaced when we
+ * implement secure keys
+ */
+static inline bool cc_is_hw_key(struct crypto_tfm *tfm)
+{
+ return false;
+}
+
+#endif /*__CC_CIPHER_H__*/
diff --git a/drivers/crypto/ccree/cc_crypto_ctx.h b/drivers/crypto/ccree/cc_crypto_ctx.h
new file mode 100644
index 000000000000..e032544f4e31
--- /dev/null
+++ b/drivers/crypto/ccree/cc_crypto_ctx.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#ifndef _CC_CRYPTO_CTX_H_
+#define _CC_CRYPTO_CTX_H_
+
+#include <linux/types.h>
+
+#define CC_DRV_DES_IV_SIZE 8
+#define CC_DRV_DES_BLOCK_SIZE 8
+
+#define CC_DRV_DES_ONE_KEY_SIZE 8
+#define CC_DRV_DES_DOUBLE_KEY_SIZE 16
+#define CC_DRV_DES_TRIPLE_KEY_SIZE 24
+#define CC_DRV_DES_KEY_SIZE_MAX CC_DRV_DES_TRIPLE_KEY_SIZE
+
+#define CC_AES_IV_SIZE 16
+#define CC_AES_IV_SIZE_WORDS (CC_AES_IV_SIZE >> 2)
+
+#define CC_AES_BLOCK_SIZE 16
+#define CC_AES_BLOCK_SIZE_WORDS 4
+
+#define CC_AES_128_BIT_KEY_SIZE 16
+#define CC_AES_128_BIT_KEY_SIZE_WORDS (CC_AES_128_BIT_KEY_SIZE >> 2)
+#define CC_AES_192_BIT_KEY_SIZE 24
+#define CC_AES_192_BIT_KEY_SIZE_WORDS (CC_AES_192_BIT_KEY_SIZE >> 2)
+#define CC_AES_256_BIT_KEY_SIZE 32
+#define CC_AES_256_BIT_KEY_SIZE_WORDS (CC_AES_256_BIT_KEY_SIZE >> 2)
+#define CC_AES_KEY_SIZE_MAX CC_AES_256_BIT_KEY_SIZE
+#define CC_AES_KEY_SIZE_WORDS_MAX (CC_AES_KEY_SIZE_MAX >> 2)
+
+#define CC_MD5_DIGEST_SIZE 16
+#define CC_SHA1_DIGEST_SIZE 20
+#define CC_SHA224_DIGEST_SIZE 28
+#define CC_SHA256_DIGEST_SIZE 32
+#define CC_SHA256_DIGEST_SIZE_IN_WORDS 8
+#define CC_SHA384_DIGEST_SIZE 48
+#define CC_SHA512_DIGEST_SIZE 64
+
+#define CC_SHA1_BLOCK_SIZE 64
+#define CC_SHA1_BLOCK_SIZE_IN_WORDS 16
+#define CC_MD5_BLOCK_SIZE 64
+#define CC_MD5_BLOCK_SIZE_IN_WORDS 16
+#define CC_SHA224_BLOCK_SIZE 64
+#define CC_SHA256_BLOCK_SIZE 64
+#define CC_SHA256_BLOCK_SIZE_IN_WORDS 16
+#define CC_SHA1_224_256_BLOCK_SIZE 64
+#define CC_SHA384_BLOCK_SIZE 128
+#define CC_SHA512_BLOCK_SIZE 128
+
+#define CC_DIGEST_SIZE_MAX CC_SHA512_DIGEST_SIZE
+#define CC_HASH_BLOCK_SIZE_MAX CC_SHA512_BLOCK_SIZE /*1024b*/
+
+#define CC_HMAC_BLOCK_SIZE_MAX CC_HASH_BLOCK_SIZE_MAX
+
+#define CC_DRV_ALG_MAX_BLOCK_SIZE CC_HASH_BLOCK_SIZE_MAX
+
+enum drv_engine_type {
+ DRV_ENGINE_NULL = 0,
+ DRV_ENGINE_AES = 1,
+ DRV_ENGINE_DES = 2,
+ DRV_ENGINE_HASH = 3,
+ DRV_ENGINE_RC4 = 4,
+ DRV_ENGINE_DOUT = 5,
+ DRV_ENGINE_RESERVE32B = S32_MAX,
+};
+
+enum drv_crypto_alg {
+ DRV_CRYPTO_ALG_NULL = -1,
+ DRV_CRYPTO_ALG_AES = 0,
+ DRV_CRYPTO_ALG_DES = 1,
+ DRV_CRYPTO_ALG_HASH = 2,
+ DRV_CRYPTO_ALG_C2 = 3,
+ DRV_CRYPTO_ALG_HMAC = 4,
+ DRV_CRYPTO_ALG_AEAD = 5,
+ DRV_CRYPTO_ALG_BYPASS = 6,
+ DRV_CRYPTO_ALG_NUM = 7,
+ DRV_CRYPTO_ALG_RESERVE32B = S32_MAX
+};
+
+enum drv_crypto_direction {
+ DRV_CRYPTO_DIRECTION_NULL = -1,
+ DRV_CRYPTO_DIRECTION_ENCRYPT = 0,
+ DRV_CRYPTO_DIRECTION_DECRYPT = 1,
+ DRV_CRYPTO_DIRECTION_DECRYPT_ENCRYPT = 3,
+ DRV_CRYPTO_DIRECTION_RESERVE32B = S32_MAX
+};
+
+enum drv_cipher_mode {
+ DRV_CIPHER_NULL_MODE = -1,
+ DRV_CIPHER_ECB = 0,
+ DRV_CIPHER_CBC = 1,
+ DRV_CIPHER_CTR = 2,
+ DRV_CIPHER_CBC_MAC = 3,
+ DRV_CIPHER_XTS = 4,
+ DRV_CIPHER_XCBC_MAC = 5,
+ DRV_CIPHER_OFB = 6,
+ DRV_CIPHER_CMAC = 7,
+ DRV_CIPHER_CCM = 8,
+ DRV_CIPHER_CBC_CTS = 11,
+ DRV_CIPHER_GCTR = 12,
+ DRV_CIPHER_ESSIV = 13,
+ DRV_CIPHER_BITLOCKER = 14,
+ DRV_CIPHER_RESERVE32B = S32_MAX
+};
+
+enum drv_hash_mode {
+ DRV_HASH_NULL = -1,
+ DRV_HASH_SHA1 = 0,
+ DRV_HASH_SHA256 = 1,
+ DRV_HASH_SHA224 = 2,
+ DRV_HASH_SHA512 = 3,
+ DRV_HASH_SHA384 = 4,
+ DRV_HASH_MD5 = 5,
+ DRV_HASH_CBC_MAC = 6,
+ DRV_HASH_XCBC_MAC = 7,
+ DRV_HASH_CMAC = 8,
+ DRV_HASH_MODE_NUM = 9,
+ DRV_HASH_RESERVE32B = S32_MAX
+};
+
+enum drv_hash_hw_mode {
+ DRV_HASH_HW_MD5 = 0,
+ DRV_HASH_HW_SHA1 = 1,
+ DRV_HASH_HW_SHA256 = 2,
+ DRV_HASH_HW_SHA224 = 10,
+ DRV_HASH_HW_SHA512 = 4,
+ DRV_HASH_HW_SHA384 = 12,
+ DRV_HASH_HW_GHASH = 6,
+ DRV_HASH_HW_RESERVE32B = S32_MAX
+};
+
+#endif /* _CC_CRYPTO_CTX_H_ */
diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c
new file mode 100644
index 000000000000..08f8db489cf0
--- /dev/null
+++ b/drivers/crypto/ccree/cc_debugfs.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <linux/stringify.h>
+#include "cc_driver.h"
+#include "cc_crypto_ctx.h"
+#include "cc_debugfs.h"
+
+struct cc_debugfs_ctx {
+ struct dentry *dir;
+};
+
+#define CC_DEBUG_REG(_X) { \
+ .name = __stringify(_X),\
+ .offset = CC_REG(_X) \
+ }
+
+/*
+ * This is a global var for the dentry of the
+ * debugfs ccree/ dir. It is not tied down to
+ * a specific instance of ccree, hence it is
+ * global.
+ */
+static struct dentry *cc_debugfs_dir;
+
+static struct debugfs_reg32 debug_regs[] = {
+ CC_DEBUG_REG(HOST_SIGNATURE),
+ CC_DEBUG_REG(HOST_IRR),
+ CC_DEBUG_REG(HOST_POWER_DOWN_EN),
+ CC_DEBUG_REG(AXIM_MON_ERR),
+ CC_DEBUG_REG(DSCRPTR_QUEUE_CONTENT),
+ CC_DEBUG_REG(HOST_IMR),
+ CC_DEBUG_REG(AXIM_CFG),
+ CC_DEBUG_REG(AXIM_CACHE_PARAMS),
+ CC_DEBUG_REG(HOST_VERSION),
+ CC_DEBUG_REG(GPR_HOST),
+ CC_DEBUG_REG(AXIM_MON_COMP),
+};
+
+int __init cc_debugfs_global_init(void)
+{
+ cc_debugfs_dir = debugfs_create_dir("ccree", NULL);
+
+ return !cc_debugfs_dir;
+}
+
+void __exit cc_debugfs_global_fini(void)
+{
+ debugfs_remove(cc_debugfs_dir);
+}
+
+int cc_debugfs_init(struct cc_drvdata *drvdata)
+{
+ struct device *dev = drvdata_to_dev(drvdata);
+ struct cc_debugfs_ctx *ctx;
+ struct debugfs_regset32 *regset;
+ struct dentry *file;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
+ if (!regset)
+ return -ENOMEM;
+
+ regset->regs = debug_regs;
+ regset->nregs = ARRAY_SIZE(debug_regs);
+ regset->base = drvdata->cc_base;
+
+ ctx->dir = debugfs_create_dir(drvdata->plat_dev->name, cc_debugfs_dir);
+ if (!ctx->dir)
+ return -ENFILE;
+
+ file = debugfs_create_regset32("regs", 0400, ctx->dir, regset);
+ if (!file) {
+ debugfs_remove(ctx->dir);
+ return -ENFILE;
+ }
+
+ file = debugfs_create_bool("coherent", 0400, ctx->dir,
+ &drvdata->coherent);
+
+ if (!file) {
+ debugfs_remove_recursive(ctx->dir);
+ return -ENFILE;
+ }
+
+ drvdata->debugfs = ctx;
+
+ return 0;
+}
+
+void cc_debugfs_fini(struct cc_drvdata *drvdata)
+{
+ struct cc_debugfs_ctx *ctx = (struct cc_debugfs_ctx *)drvdata->debugfs;
+
+ debugfs_remove_recursive(ctx->dir);
+}
diff --git a/drivers/crypto/ccree/cc_debugfs.h b/drivers/crypto/ccree/cc_debugfs.h
new file mode 100644
index 000000000000..5b5320eca7d2
--- /dev/null
+++ b/drivers/crypto/ccree/cc_debugfs.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#ifndef __CC_DEBUGFS_H__
+#define __CC_DEBUGFS_H__
+
+#ifdef CONFIG_DEBUG_FS
+int cc_debugfs_global_init(void);
+void cc_debugfs_global_fini(void);
+
+int cc_debugfs_init(struct cc_drvdata *drvdata);
+void cc_debugfs_fini(struct cc_drvdata *drvdata);
+
+#else
+
+static inline int cc_debugfs_global_init(void)
+{
+ return 0;
+}
+
+static inline void cc_debugfs_global_fini(void) {}
+
+static inline int cc_debugfs_init(struct cc_drvdata *drvdata)
+{
+ return 0;
+}
+
+static inline void cc_debugfs_fini(struct cc_drvdata *drvdata) {}
+
+#endif
+
+#endif /*__CC_SYSFS_H__*/
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
new file mode 100644
index 000000000000..89ce013ae093
--- /dev/null
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -0,0 +1,518 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <linux/crypto.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/of_address.h>
+
+#include "cc_driver.h"
+#include "cc_request_mgr.h"
+#include "cc_buffer_mgr.h"
+#include "cc_debugfs.h"
+#include "cc_cipher.h"
+#include "cc_aead.h"
+#include "cc_hash.h"
+#include "cc_ivgen.h"
+#include "cc_sram_mgr.h"
+#include "cc_pm.h"
+#include "cc_fips.h"
+
+bool cc_dump_desc;
+module_param_named(dump_desc, cc_dump_desc, bool, 0600);
+MODULE_PARM_DESC(cc_dump_desc, "Dump descriptors to kernel log as debugging aid");
+
+bool cc_dump_bytes;
+module_param_named(dump_bytes, cc_dump_bytes, bool, 0600);
+MODULE_PARM_DESC(cc_dump_bytes, "Dump buffers to kernel log as debugging aid");
+
+struct cc_hw_data {
+ char *name;
+ enum cc_hw_rev rev;
+ u32 sig;
+};
+
+/* Hardware revisions defs. */
+
+static const struct cc_hw_data cc712_hw = {
+ .name = "712", .rev = CC_HW_REV_712, .sig = 0xDCC71200U
+};
+
+static const struct cc_hw_data cc710_hw = {
+ .name = "710", .rev = CC_HW_REV_710, .sig = 0xDCC63200U
+};
+
+static const struct cc_hw_data cc630p_hw = {
+ .name = "630P", .rev = CC_HW_REV_630, .sig = 0xDCC63000U
+};
+
+static const struct of_device_id arm_ccree_dev_of_match[] = {
+ { .compatible = "arm,cryptocell-712-ree", .data = &cc712_hw },
+ { .compatible = "arm,cryptocell-710-ree", .data = &cc710_hw },
+ { .compatible = "arm,cryptocell-630p-ree", .data = &cc630p_hw },
+ {}
+};
+MODULE_DEVICE_TABLE(of, arm_ccree_dev_of_match);
+
+void __dump_byte_array(const char *name, const u8 *buf, size_t len)
+{
+ char prefix[64];
+
+ if (!buf)
+ return;
+
+ snprintf(prefix, sizeof(prefix), "%s[%zu]: ", name, len);
+
+ print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_ADDRESS, 16, 1, buf,
+ len, false);
+}
+
+static irqreturn_t cc_isr(int irq, void *dev_id)
+{
+ struct cc_drvdata *drvdata = (struct cc_drvdata *)dev_id;
+ struct device *dev = drvdata_to_dev(drvdata);
+ u32 irr;
+ u32 imr;
+
+ /* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */
+
+ /* read the interrupt status */
+ irr = cc_ioread(drvdata, CC_REG(HOST_IRR));
+ dev_dbg(dev, "Got IRR=0x%08X\n", irr);
+ if (irr == 0) { /* Probably shared interrupt line */
+ dev_err(dev, "Got interrupt with empty IRR\n");
+ return IRQ_NONE;
+ }
+ imr = cc_ioread(drvdata, CC_REG(HOST_IMR));
+
+ /* clear interrupt - must be before processing events */
+ cc_iowrite(drvdata, CC_REG(HOST_ICR), irr);
+
+ drvdata->irq = irr;
+ /* Completion interrupt - most probable */
+ if (irr & CC_COMP_IRQ_MASK) {
+ /* Mask AXI completion interrupt - will be unmasked in
+ * Deferred service handler
+ */
+ cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | CC_COMP_IRQ_MASK);
+ irr &= ~CC_COMP_IRQ_MASK;
+ complete_request(drvdata);
+ }
+#ifdef CONFIG_CRYPTO_FIPS
+ /* TEE FIPS interrupt */
+ if (irr & CC_GPR0_IRQ_MASK) {
+ /* Mask interrupt - will be unmasked in Deferred service
+ * handler
+ */
+ cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | CC_GPR0_IRQ_MASK);
+ irr &= ~CC_GPR0_IRQ_MASK;
+ fips_handler(drvdata);
+ }
+#endif
+ /* AXI error interrupt */
+ if (irr & CC_AXI_ERR_IRQ_MASK) {
+ u32 axi_err;
+
+ /* Read the AXI error ID */
+ axi_err = cc_ioread(drvdata, CC_REG(AXIM_MON_ERR));
+ dev_dbg(dev, "AXI completion error: axim_mon_err=0x%08X\n",
+ axi_err);
+
+ irr &= ~CC_AXI_ERR_IRQ_MASK;
+ }
+
+ if (irr) {
+ dev_dbg(dev, "IRR includes unknown cause bits (0x%08X)\n",
+ irr);
+ /* Just warning */
+ }
+
+ return IRQ_HANDLED;
+}
+
+int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe)
+{
+ unsigned int val, cache_params;
+ struct device *dev = drvdata_to_dev(drvdata);
+
+ /* Unmask all AXI interrupt sources AXI_CFG1 register */
+ val = cc_ioread(drvdata, CC_REG(AXIM_CFG));
+ cc_iowrite(drvdata, CC_REG(AXIM_CFG), val & ~CC_AXI_IRQ_MASK);
+ dev_dbg(dev, "AXIM_CFG=0x%08X\n",
+ cc_ioread(drvdata, CC_REG(AXIM_CFG)));
+
+ /* Clear all pending interrupts */
+ val = cc_ioread(drvdata, CC_REG(HOST_IRR));
+ dev_dbg(dev, "IRR=0x%08X\n", val);
+ cc_iowrite(drvdata, CC_REG(HOST_ICR), val);
+
+ /* Unmask relevant interrupt cause */
+ val = CC_COMP_IRQ_MASK | CC_AXI_ERR_IRQ_MASK;
+
+ if (drvdata->hw_rev >= CC_HW_REV_712)
+ val |= CC_GPR0_IRQ_MASK;
+
+ cc_iowrite(drvdata, CC_REG(HOST_IMR), ~val);
+
+ cache_params = (drvdata->coherent ? CC_COHERENT_CACHE_PARAMS : 0x0);
+
+ val = cc_ioread(drvdata, CC_REG(AXIM_CACHE_PARAMS));
+
+ if (is_probe)
+ dev_info(dev, "Cache params previous: 0x%08X\n", val);
+
+ cc_iowrite(drvdata, CC_REG(AXIM_CACHE_PARAMS), cache_params);
+ val = cc_ioread(drvdata, CC_REG(AXIM_CACHE_PARAMS));
+
+ if (is_probe)
+ dev_info(dev, "Cache params current: 0x%08X (expect: 0x%08X)\n",
+ val, cache_params);
+
+ return 0;
+}
+
+static int init_cc_resources(struct platform_device *plat_dev)
+{
+ struct resource *req_mem_cc_regs = NULL;
+ struct cc_drvdata *new_drvdata;
+ struct device *dev = &plat_dev->dev;
+ struct device_node *np = dev->of_node;
+ u32 signature_val;
+ u64 dma_mask;
+ const struct cc_hw_data *hw_rev;
+ const struct of_device_id *dev_id;
+ int rc = 0;
+
+ new_drvdata = devm_kzalloc(dev, sizeof(*new_drvdata), GFP_KERNEL);
+ if (!new_drvdata)
+ return -ENOMEM;
+
+ dev_id = of_match_node(arm_ccree_dev_of_match, np);
+ if (!dev_id)
+ return -ENODEV;
+
+ hw_rev = (struct cc_hw_data *)dev_id->data;
+ new_drvdata->hw_rev_name = hw_rev->name;
+ new_drvdata->hw_rev = hw_rev->rev;
+
+ if (hw_rev->rev >= CC_HW_REV_712) {
+ new_drvdata->hash_len_sz = HASH_LEN_SIZE_712;
+ new_drvdata->axim_mon_offset = CC_REG(AXIM_MON_COMP);
+ } else {
+ new_drvdata->hash_len_sz = HASH_LEN_SIZE_630;
+ new_drvdata->axim_mon_offset = CC_REG(AXIM_MON_COMP8);
+ }
+
+ platform_set_drvdata(plat_dev, new_drvdata);
+ new_drvdata->plat_dev = plat_dev;
+
+ new_drvdata->clk = of_clk_get(np, 0);
+ new_drvdata->coherent = of_dma_is_coherent(np);
+
+ /* Get device resources */
+ /* First CC registers space */
+ req_mem_cc_regs = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
+ /* Map registers space */
+ new_drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs);
+ if (IS_ERR(new_drvdata->cc_base)) {
+ dev_err(dev, "Failed to ioremap registers");
+ return PTR_ERR(new_drvdata->cc_base);
+ }
+
+ dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name,
+ req_mem_cc_regs);
+ dev_dbg(dev, "CC registers mapped from %pa to 0x%p\n",
+ &req_mem_cc_regs->start, new_drvdata->cc_base);
+
+ /* Then IRQ */
+ new_drvdata->irq = platform_get_irq(plat_dev, 0);
+ if (new_drvdata->irq < 0) {
+ dev_err(dev, "Failed getting IRQ resource\n");
+ return new_drvdata->irq;
+ }
+
+ rc = devm_request_irq(dev, new_drvdata->irq, cc_isr,
+ IRQF_SHARED, "ccree", new_drvdata);
+ if (rc) {
+ dev_err(dev, "Could not register to interrupt %d\n",
+ new_drvdata->irq);
+ return rc;
+ }
+ dev_dbg(dev, "Registered to IRQ: %d\n", new_drvdata->irq);
+
+ init_completion(&new_drvdata->hw_queue_avail);
+
+ if (!plat_dev->dev.dma_mask)
+ plat_dev->dev.dma_mask = &plat_dev->dev.coherent_dma_mask;
+
+ dma_mask = DMA_BIT_MASK(DMA_BIT_MASK_LEN);
+ while (dma_mask > 0x7fffffffUL) {
+ if (dma_supported(&plat_dev->dev, dma_mask)) {
+ rc = dma_set_coherent_mask(&plat_dev->dev, dma_mask);
+ if (!rc)
+ break;
+ }
+ dma_mask >>= 1;
+ }
+
+ if (rc) {
+ dev_err(dev, "Failed in dma_set_mask, mask=%pad\n", &dma_mask);
+ return rc;
+ }
+
+ rc = cc_clk_on(new_drvdata);
+ if (rc) {
+ dev_err(dev, "Failed to enable clock");
+ return rc;
+ }
+
+ /* Verify correct mapping */
+ signature_val = cc_ioread(new_drvdata, CC_REG(HOST_SIGNATURE));
+ if (signature_val != hw_rev->sig) {
+ dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
+ signature_val, hw_rev->sig);
+ rc = -EINVAL;
+ goto post_clk_err;
+ }
+ dev_dbg(dev, "CC SIGNATURE=0x%08X\n", signature_val);
+
+ /* Display HW versions */
+ dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X, Driver version %s\n",
+ hw_rev->name, cc_ioread(new_drvdata, CC_REG(HOST_VERSION)),
+ DRV_MODULE_VERSION);
+
+ rc = init_cc_regs(new_drvdata, true);
+ if (rc) {
+ dev_err(dev, "init_cc_regs failed\n");
+ goto post_clk_err;
+ }
+
+ rc = cc_debugfs_init(new_drvdata);
+ if (rc) {
+ dev_err(dev, "Failed registering debugfs interface\n");
+ goto post_regs_err;
+ }
+
+ rc = cc_fips_init(new_drvdata);
+ if (rc) {
+ dev_err(dev, "CC_FIPS_INIT failed 0x%x\n", rc);
+ goto post_debugfs_err;
+ }
+ rc = cc_sram_mgr_init(new_drvdata);
+ if (rc) {
+ dev_err(dev, "cc_sram_mgr_init failed\n");
+ goto post_fips_init_err;
+ }
+
+ new_drvdata->mlli_sram_addr =
+ cc_sram_alloc(new_drvdata, MAX_MLLI_BUFF_SIZE);
+ if (new_drvdata->mlli_sram_addr == NULL_SRAM_ADDR) {
+ dev_err(dev, "Failed to alloc MLLI Sram buffer\n");
+ rc = -ENOMEM;
+ goto post_sram_mgr_err;
+ }
+
+ rc = cc_req_mgr_init(new_drvdata);
+ if (rc) {
+ dev_err(dev, "cc_req_mgr_init failed\n");
+ goto post_sram_mgr_err;
+ }
+
+ rc = cc_buffer_mgr_init(new_drvdata);
+ if (rc) {
+ dev_err(dev, "buffer_mgr_init failed\n");
+ goto post_req_mgr_err;
+ }
+
+ rc = cc_pm_init(new_drvdata);
+ if (rc) {
+ dev_err(dev, "ssi_power_mgr_init failed\n");
+ goto post_buf_mgr_err;
+ }
+
+ rc = cc_ivgen_init(new_drvdata);
+ if (rc) {
+ dev_err(dev, "cc_ivgen_init failed\n");
+ goto post_power_mgr_err;
+ }
+
+ /* Allocate crypto algs */
+ rc = cc_cipher_alloc(new_drvdata);
+ if (rc) {
+ dev_err(dev, "cc_cipher_alloc failed\n");
+ goto post_ivgen_err;
+ }
+
+ /* hash must be allocated before aead since hash exports APIs */
+ rc = cc_hash_alloc(new_drvdata);
+ if (rc) {
+ dev_err(dev, "cc_hash_alloc failed\n");
+ goto post_cipher_err;
+ }
+
+ rc = cc_aead_alloc(new_drvdata);
+ if (rc) {
+ dev_err(dev, "cc_aead_alloc failed\n");
+ goto post_hash_err;
+ }
+
+ /* If we got here and FIPS mode is enabled
+ * it means all FIPS test passed, so let TEE
+ * know we're good.
+ */
+ cc_set_ree_fips_status(new_drvdata, true);
+
+ return 0;
+
+post_hash_err:
+ cc_hash_free(new_drvdata);
+post_cipher_err:
+ cc_cipher_free(new_drvdata);
+post_ivgen_err:
+ cc_ivgen_fini(new_drvdata);
+post_power_mgr_err:
+ cc_pm_fini(new_drvdata);
+post_buf_mgr_err:
+ cc_buffer_mgr_fini(new_drvdata);
+post_req_mgr_err:
+ cc_req_mgr_fini(new_drvdata);
+post_sram_mgr_err:
+ cc_sram_mgr_fini(new_drvdata);
+post_fips_init_err:
+ cc_fips_fini(new_drvdata);
+post_debugfs_err:
+ cc_debugfs_fini(new_drvdata);
+post_regs_err:
+ fini_cc_regs(new_drvdata);
+post_clk_err:
+ cc_clk_off(new_drvdata);
+ return rc;
+}
+
+void fini_cc_regs(struct cc_drvdata *drvdata)
+{
+ /* Mask all interrupts */
+ cc_iowrite(drvdata, CC_REG(HOST_IMR), 0xFFFFFFFF);
+}
+
+static void cleanup_cc_resources(struct platform_device *plat_dev)
+{
+ struct cc_drvdata *drvdata =
+ (struct cc_drvdata *)platform_get_drvdata(plat_dev);
+
+ cc_aead_free(drvdata);
+ cc_hash_free(drvdata);
+ cc_cipher_free(drvdata);
+ cc_ivgen_fini(drvdata);
+ cc_pm_fini(drvdata);
+ cc_buffer_mgr_fini(drvdata);
+ cc_req_mgr_fini(drvdata);
+ cc_sram_mgr_fini(drvdata);
+ cc_fips_fini(drvdata);
+ cc_debugfs_fini(drvdata);
+ fini_cc_regs(drvdata);
+ cc_clk_off(drvdata);
+}
+
+int cc_clk_on(struct cc_drvdata *drvdata)
+{
+ struct clk *clk = drvdata->clk;
+ int rc;
+
+ if (IS_ERR(clk))
+ /* Not all devices have a clock associated with CCREE */
+ return 0;
+
+ rc = clk_prepare_enable(clk);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+void cc_clk_off(struct cc_drvdata *drvdata)
+{
+ struct clk *clk = drvdata->clk;
+
+ if (IS_ERR(clk))
+ /* Not all devices have a clock associated with CCREE */
+ return;
+
+ clk_disable_unprepare(clk);
+}
+
+static int ccree_probe(struct platform_device *plat_dev)
+{
+ int rc;
+ struct device *dev = &plat_dev->dev;
+
+ /* Map registers space */
+ rc = init_cc_resources(plat_dev);
+ if (rc)
+ return rc;
+
+ dev_info(dev, "ARM ccree device initialized\n");
+
+ return 0;
+}
+
+static int ccree_remove(struct platform_device *plat_dev)
+{
+ struct device *dev = &plat_dev->dev;
+
+ dev_dbg(dev, "Releasing ccree resources...\n");
+
+ cleanup_cc_resources(plat_dev);
+
+ dev_info(dev, "ARM ccree device terminated\n");
+
+ return 0;
+}
+
+static struct platform_driver ccree_driver = {
+ .driver = {
+ .name = "ccree",
+ .of_match_table = arm_ccree_dev_of_match,
+#ifdef CONFIG_PM
+ .pm = &ccree_pm,
+#endif
+ },
+ .probe = ccree_probe,
+ .remove = ccree_remove,
+};
+
+static int __init ccree_init(void)
+{
+ int ret;
+
+ cc_hash_global_init();
+
+ ret = cc_debugfs_global_init();
+ if (ret)
+ return ret;
+
+ return platform_driver_register(&ccree_driver);
+}
+module_init(ccree_init);
+
+static void __exit ccree_exit(void)
+{
+ platform_driver_unregister(&ccree_driver);
+ cc_debugfs_global_fini();
+}
+module_exit(ccree_exit);
+
+/* Module description */
+MODULE_DESCRIPTION("ARM TrustZone CryptoCell REE Driver");
+MODULE_VERSION(DRV_MODULE_VERSION);
+MODULE_AUTHOR("ARM");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
new file mode 100644
index 000000000000..2048fdeb9579
--- /dev/null
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -0,0 +1,208 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+/* \file cc_driver.h
+ * ARM CryptoCell Linux Crypto Driver
+ */
+
+#ifndef __CC_DRIVER_H__
+#define __CC_DRIVER_H__
+
+#ifdef COMP_IN_WQ
+#include <linux/workqueue.h>
+#else
+#include <linux/interrupt.h>
+#endif
+#include <linux/dma-mapping.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/aes.h>
+#include <crypto/sha.h>
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/hash.h>
+#include <crypto/skcipher.h>
+#include <linux/version.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+
+/* Registers definitions from shared/hw/ree_include */
+#include "cc_host_regs.h"
+#define CC_DEV_SHA_MAX 512
+#include "cc_crypto_ctx.h"
+#include "cc_hw_queue_defs.h"
+#include "cc_sram_mgr.h"
+
+extern bool cc_dump_desc;
+extern bool cc_dump_bytes;
+
+#define DRV_MODULE_VERSION "4.0"
+
+enum cc_hw_rev {
+ CC_HW_REV_630 = 630,
+ CC_HW_REV_710 = 710,
+ CC_HW_REV_712 = 712
+};
+
+#define CC_COHERENT_CACHE_PARAMS 0xEEE
+
+/* Maximum DMA mask supported by IP */
+#define DMA_BIT_MASK_LEN 48
+
+#define CC_AXI_IRQ_MASK ((1 << CC_AXIM_CFG_BRESPMASK_BIT_SHIFT) | \
+ (1 << CC_AXIM_CFG_RRESPMASK_BIT_SHIFT) | \
+ (1 << CC_AXIM_CFG_INFLTMASK_BIT_SHIFT) | \
+ (1 << CC_AXIM_CFG_COMPMASK_BIT_SHIFT))
+
+#define CC_AXI_ERR_IRQ_MASK BIT(CC_HOST_IRR_AXI_ERR_INT_BIT_SHIFT)
+
+#define CC_COMP_IRQ_MASK BIT(CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT)
+
+#define AXIM_MON_COMP_VALUE GENMASK(CC_AXIM_MON_COMP_VALUE_BIT_SIZE + \
+ CC_AXIM_MON_COMP_VALUE_BIT_SHIFT, \
+ CC_AXIM_MON_COMP_VALUE_BIT_SHIFT)
+
+/* Register name mangling macro */
+#define CC_REG(reg_name) CC_ ## reg_name ## _REG_OFFSET
+
+/* TEE FIPS status interrupt */
+#define CC_GPR0_IRQ_MASK BIT(CC_HOST_IRR_GPR0_BIT_SHIFT)
+
+#define CC_CRA_PRIO 400
+
+#define MIN_HW_QUEUE_SIZE 50 /* Minimum size required for proper function */
+
+#define MAX_REQUEST_QUEUE_SIZE 4096
+#define MAX_MLLI_BUFF_SIZE 2080
+#define MAX_ICV_NENTS_SUPPORTED 2
+
+/* Definitions for HW descriptors DIN/DOUT fields */
+#define NS_BIT 1
+#define AXI_ID 0
+/* AXI_ID is not actually the AXI ID of the transaction but the value of AXI_ID
+ * field in the HW descriptor. The DMA engine +8 that value.
+ */
+
+#define CC_MAX_IVGEN_DMA_ADDRESSES 3
+struct cc_crypto_req {
+ void (*user_cb)(struct device *dev, void *req, int err);
+ void *user_arg;
+ dma_addr_t ivgen_dma_addr[CC_MAX_IVGEN_DMA_ADDRESSES];
+ /* For the first 'ivgen_dma_addr_len' addresses of this array,
+ * generated IV would be placed in it by send_request().
+ * Same generated IV for all addresses!
+ */
+ /* Amount of 'ivgen_dma_addr' elements to be filled. */
+ unsigned int ivgen_dma_addr_len;
+ /* The generated IV size required, 8/16 B allowed. */
+ unsigned int ivgen_size;
+ struct completion seq_compl; /* request completion */
+};
+
+/**
+ * struct cc_drvdata - driver private data context
+ * @cc_base: virt address of the CC registers
+ * @irq: device IRQ number
+ * @irq_mask: Interrupt mask shadow (1 for masked interrupts)
+ * @fw_ver: SeP loaded firmware version
+ */
+struct cc_drvdata {
+ void __iomem *cc_base;
+ int irq;
+ u32 irq_mask;
+ u32 fw_ver;
+ struct completion hw_queue_avail; /* wait for HW queue availability */
+ struct platform_device *plat_dev;
+ cc_sram_addr_t mlli_sram_addr;
+ void *buff_mgr_handle;
+ void *cipher_handle;
+ void *hash_handle;
+ void *aead_handle;
+ void *request_mgr_handle;
+ void *fips_handle;
+ void *ivgen_handle;
+ void *sram_mgr_handle;
+ void *debugfs;
+ struct clk *clk;
+ bool coherent;
+ char *hw_rev_name;
+ enum cc_hw_rev hw_rev;
+ u32 hash_len_sz;
+ u32 axim_mon_offset;
+};
+
+struct cc_crypto_alg {
+ struct list_head entry;
+ int cipher_mode;
+ int flow_mode; /* Note: currently, refers to the cipher mode only. */
+ int auth_mode;
+ unsigned int data_unit;
+ struct cc_drvdata *drvdata;
+ struct skcipher_alg skcipher_alg;
+ struct aead_alg aead_alg;
+};
+
+struct cc_alg_template {
+ char name[CRYPTO_MAX_ALG_NAME];
+ char driver_name[CRYPTO_MAX_ALG_NAME];
+ unsigned int blocksize;
+ u32 type;
+ union {
+ struct skcipher_alg skcipher;
+ struct aead_alg aead;
+ } template_u;
+ int cipher_mode;
+ int flow_mode; /* Note: currently, refers to the cipher mode only. */
+ int auth_mode;
+ u32 min_hw_rev;
+ unsigned int data_unit;
+ struct cc_drvdata *drvdata;
+};
+
+struct async_gen_req_ctx {
+ dma_addr_t iv_dma_addr;
+ enum drv_crypto_direction op_type;
+};
+
+static inline struct device *drvdata_to_dev(struct cc_drvdata *drvdata)
+{
+ return &drvdata->plat_dev->dev;
+}
+
+void __dump_byte_array(const char *name, const u8 *buf, size_t len);
+static inline void dump_byte_array(const char *name, const u8 *the_array,
+ size_t size)
+{
+ if (cc_dump_bytes)
+ __dump_byte_array(name, the_array, size);
+}
+
+int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe);
+void fini_cc_regs(struct cc_drvdata *drvdata);
+int cc_clk_on(struct cc_drvdata *drvdata);
+void cc_clk_off(struct cc_drvdata *drvdata);
+
+static inline void cc_iowrite(struct cc_drvdata *drvdata, u32 reg, u32 val)
+{
+ iowrite32(val, (drvdata->cc_base + reg));
+}
+
+static inline u32 cc_ioread(struct cc_drvdata *drvdata, u32 reg)
+{
+ return ioread32(drvdata->cc_base + reg);
+}
+
+static inline gfp_t cc_gfp_flags(struct crypto_async_request *req)
+{
+ return (req->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+}
+
+static inline void set_queue_last_ind(struct cc_drvdata *drvdata,
+ struct cc_hw_desc *pdesc)
+{
+ if (drvdata->hw_rev >= CC_HW_REV_712)
+ set_queue_last_ind_bit(pdesc);
+}
+
+#endif /*__CC_DRIVER_H__*/
diff --git a/drivers/crypto/ccree/cc_fips.c b/drivers/crypto/ccree/cc_fips.c
new file mode 100644
index 000000000000..b4d0a6d983e0
--- /dev/null
+++ b/drivers/crypto/ccree/cc_fips.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include <linux/kernel.h>
+#include <linux/fips.h>
+
+#include "cc_driver.h"
+#include "cc_fips.h"
+
+static void fips_dsr(unsigned long devarg);
+
+struct cc_fips_handle {
+ struct tasklet_struct tasklet;
+};
+
+/* The function called once at driver entry point to check
+ * whether TEE FIPS error occurred.
+ */
+static bool cc_get_tee_fips_status(struct cc_drvdata *drvdata)
+{
+ u32 reg;
+
+ reg = cc_ioread(drvdata, CC_REG(GPR_HOST));
+ return (reg == (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK));
+}
+
+/*
+ * This function should push the FIPS REE library status towards the TEE library
+ * by writing the error state to HOST_GPR0 register.
+ */
+void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool status)
+{
+ int val = CC_FIPS_SYNC_REE_STATUS;
+
+ if (drvdata->hw_rev < CC_HW_REV_712)
+ return;
+
+ val |= (status ? CC_FIPS_SYNC_MODULE_OK : CC_FIPS_SYNC_MODULE_ERROR);
+
+ cc_iowrite(drvdata, CC_REG(HOST_GPR0), val);
+}
+
+void cc_fips_fini(struct cc_drvdata *drvdata)
+{
+ struct cc_fips_handle *fips_h = drvdata->fips_handle;
+
+ if (drvdata->hw_rev < CC_HW_REV_712 || !fips_h)
+ return;
+
+ /* Kill tasklet */
+ tasklet_kill(&fips_h->tasklet);
+
+ kfree(fips_h);
+ drvdata->fips_handle = NULL;
+}
+
+void fips_handler(struct cc_drvdata *drvdata)
+{
+ struct cc_fips_handle *fips_handle_ptr = drvdata->fips_handle;
+
+ if (drvdata->hw_rev < CC_HW_REV_712)
+ return;
+
+ tasklet_schedule(&fips_handle_ptr->tasklet);
+}
+
+static inline void tee_fips_error(struct device *dev)
+{
+ if (fips_enabled)
+ panic("ccree: TEE reported cryptographic error in fips mode!\n");
+ else
+ dev_err(dev, "TEE reported error!\n");
+}
+
+/* Deferred service handler, run as interrupt-fired tasklet */
+static void fips_dsr(unsigned long devarg)
+{
+ struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
+ struct device *dev = drvdata_to_dev(drvdata);
+ u32 irq, state, val;
+
+ irq = (drvdata->irq & (CC_GPR0_IRQ_MASK));
+
+ if (irq) {
+ state = cc_ioread(drvdata, CC_REG(GPR_HOST));
+
+ if (state != (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK))
+ tee_fips_error(dev);
+ }
+
+ /* after verifing that there is nothing to do,
+ * unmask AXI completion interrupt.
+ */
+ val = (CC_REG(HOST_IMR) & ~irq);
+ cc_iowrite(drvdata, CC_REG(HOST_IMR), val);
+}
+
+/* The function called once at driver entry point .*/
+int cc_fips_init(struct cc_drvdata *p_drvdata)
+{
+ struct cc_fips_handle *fips_h;
+ struct device *dev = drvdata_to_dev(p_drvdata);
+
+ if (p_drvdata->hw_rev < CC_HW_REV_712)
+ return 0;
+
+ fips_h = kzalloc(sizeof(*fips_h), GFP_KERNEL);
+ if (!fips_h)
+ return -ENOMEM;
+
+ p_drvdata->fips_handle = fips_h;
+
+ dev_dbg(dev, "Initializing fips tasklet\n");
+ tasklet_init(&fips_h->tasklet, fips_dsr, (unsigned long)p_drvdata);
+
+ if (!cc_get_tee_fips_status(p_drvdata))
+ tee_fips_error(dev);
+
+ return 0;
+}
diff --git a/drivers/crypto/ccree/cc_fips.h b/drivers/crypto/ccree/cc_fips.h
new file mode 100644
index 000000000000..645e096a7a82
--- /dev/null
+++ b/drivers/crypto/ccree/cc_fips.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#ifndef __CC_FIPS_H__
+#define __CC_FIPS_H__
+
+#ifdef CONFIG_CRYPTO_FIPS
+
+enum cc_fips_status {
+ CC_FIPS_SYNC_MODULE_OK = 0x0,
+ CC_FIPS_SYNC_MODULE_ERROR = 0x1,
+ CC_FIPS_SYNC_REE_STATUS = 0x4,
+ CC_FIPS_SYNC_TEE_STATUS = 0x8,
+ CC_FIPS_SYNC_STATUS_RESERVE32B = S32_MAX
+};
+
+int cc_fips_init(struct cc_drvdata *p_drvdata);
+void cc_fips_fini(struct cc_drvdata *drvdata);
+void fips_handler(struct cc_drvdata *drvdata);
+void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool ok);
+
+#else /* CONFIG_CRYPTO_FIPS */
+
+static inline int cc_fips_init(struct cc_drvdata *p_drvdata)
+{
+ return 0;
+}
+
+static inline void cc_fips_fini(struct cc_drvdata *drvdata) {}
+static inline void cc_set_ree_fips_status(struct cc_drvdata *drvdata,
+ bool ok) {}
+static inline void fips_handler(struct cc_drvdata *drvdata) {}
+
+#endif /* CONFIG_CRYPTO_FIPS */
+
+#endif /*__CC_FIPS_H__*/
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
new file mode 100644
index 000000000000..96ff777474d7
--- /dev/null
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -0,0 +1,2296 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <crypto/algapi.h>
+#include <crypto/hash.h>
+#include <crypto/md5.h>
+#include <crypto/internal/hash.h>
+
+#include "cc_driver.h"
+#include "cc_request_mgr.h"
+#include "cc_buffer_mgr.h"
+#include "cc_hash.h"
+#include "cc_sram_mgr.h"
+
+#define CC_MAX_HASH_SEQ_LEN 12
+#define CC_MAX_OPAD_KEYS_SIZE CC_MAX_HASH_BLCK_SIZE
+
+struct cc_hash_handle {
+ cc_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/
+ cc_sram_addr_t larval_digest_sram_addr; /* const value in SRAM */
+ struct list_head hash_list;
+};
+
+static const u32 digest_len_init[] = {
+ 0x00000040, 0x00000000, 0x00000000, 0x00000000 };
+static const u32 md5_init[] = {
+ SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
+static const u32 sha1_init[] = {
+ SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
+static const u32 sha224_init[] = {
+ SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4,
+ SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 };
+static const u32 sha256_init[] = {
+ SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
+ SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
+static const u32 digest_len_sha512_init[] = {
+ 0x00000080, 0x00000000, 0x00000000, 0x00000000 };
+static u64 sha384_init[] = {
+ SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4,
+ SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 };
+static u64 sha512_init[] = {
+ SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
+ SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
+
+static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
+ unsigned int *seq_size);
+
+static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
+ unsigned int *seq_size);
+
+static const void *cc_larval_digest(struct device *dev, u32 mode);
+
+struct cc_hash_alg {
+ struct list_head entry;
+ int hash_mode;
+ int hw_mode;
+ int inter_digestsize;
+ struct cc_drvdata *drvdata;
+ struct ahash_alg ahash_alg;
+};
+
+struct hash_key_req_ctx {
+ u32 keylen;
+ dma_addr_t key_dma_addr;
+};
+
+/* hash per-session context */
+struct cc_hash_ctx {
+ struct cc_drvdata *drvdata;
+ /* holds the origin digest; the digest after "setkey" if HMAC,*
+ * the initial digest if HASH.
+ */
+ u8 digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
+ u8 opad_tmp_keys_buff[CC_MAX_OPAD_KEYS_SIZE] ____cacheline_aligned;
+
+ dma_addr_t opad_tmp_keys_dma_addr ____cacheline_aligned;
+ dma_addr_t digest_buff_dma_addr;
+ /* use for hmac with key large then mode block size */
+ struct hash_key_req_ctx key_params;
+ int hash_mode;
+ int hw_mode;
+ int inter_digestsize;
+ struct completion setkey_comp;
+ bool is_hmac;
+};
+
+static void cc_set_desc(struct ahash_req_ctx *areq_ctx, struct cc_hash_ctx *ctx,
+ unsigned int flow_mode, struct cc_hw_desc desc[],
+ bool is_not_last_data, unsigned int *seq_size);
+
+static void cc_set_endianity(u32 mode, struct cc_hw_desc *desc)
+{
+ if (mode == DRV_HASH_MD5 || mode == DRV_HASH_SHA384 ||
+ mode == DRV_HASH_SHA512) {
+ set_bytes_swap(desc, 1);
+ } else {
+ set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+ }
+}
+
+static int cc_map_result(struct device *dev, struct ahash_req_ctx *state,
+ unsigned int digestsize)
+{
+ state->digest_result_dma_addr =
+ dma_map_single(dev, state->digest_result_buff,
+ digestsize, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, state->digest_result_dma_addr)) {
+ dev_err(dev, "Mapping digest result buffer %u B for DMA failed\n",
+ digestsize);
+ return -ENOMEM;
+ }
+ dev_dbg(dev, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n",
+ digestsize, state->digest_result_buff,
+ &state->digest_result_dma_addr);
+
+ return 0;
+}
+
+static void cc_init_req(struct device *dev, struct ahash_req_ctx *state,
+ struct cc_hash_ctx *ctx)
+{
+ bool is_hmac = ctx->is_hmac;
+
+ memset(state, 0, sizeof(*state));
+
+ if (is_hmac) {
+ if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC &&
+ ctx->hw_mode != DRV_CIPHER_CMAC) {
+ dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr,
+ ctx->inter_digestsize,
+ DMA_BIDIRECTIONAL);
+
+ memcpy(state->digest_buff, ctx->digest_buff,
+ ctx->inter_digestsize);
+ if (ctx->hash_mode == DRV_HASH_SHA512 ||
+ ctx->hash_mode == DRV_HASH_SHA384)
+ memcpy(state->digest_bytes_len,
+ digest_len_sha512_init,
+ ctx->drvdata->hash_len_sz);
+ else
+ memcpy(state->digest_bytes_len, digest_len_init,
+ ctx->drvdata->hash_len_sz);
+ }
+
+ if (ctx->hash_mode != DRV_HASH_NULL) {
+ dma_sync_single_for_cpu(dev,
+ ctx->opad_tmp_keys_dma_addr,
+ ctx->inter_digestsize,
+ DMA_BIDIRECTIONAL);
+ memcpy(state->opad_digest_buff,
+ ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
+ }
+ } else { /*hash*/
+ /* Copy the initial digests if hash flow. */
+ const void *larval = cc_larval_digest(dev, ctx->hash_mode);
+
+ memcpy(state->digest_buff, larval, ctx->inter_digestsize);
+ }
+}
+
+static int cc_map_req(struct device *dev, struct ahash_req_ctx *state,
+ struct cc_hash_ctx *ctx)
+{
+ bool is_hmac = ctx->is_hmac;
+
+ state->digest_buff_dma_addr =
+ dma_map_single(dev, state->digest_buff,
+ ctx->inter_digestsize, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
+ dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n",
+ ctx->inter_digestsize, state->digest_buff);
+ return -EINVAL;
+ }
+ dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n",
+ ctx->inter_digestsize, state->digest_buff,
+ &state->digest_buff_dma_addr);
+
+ if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
+ state->digest_bytes_len_dma_addr =
+ dma_map_single(dev, state->digest_bytes_len,
+ HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
+ dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n",
+ HASH_MAX_LEN_SIZE, state->digest_bytes_len);
+ goto unmap_digest_buf;
+ }
+ dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n",
+ HASH_MAX_LEN_SIZE, state->digest_bytes_len,
+ &state->digest_bytes_len_dma_addr);
+ }
+
+ if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) {
+ state->opad_digest_dma_addr =
+ dma_map_single(dev, state->opad_digest_buff,
+ ctx->inter_digestsize,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
+ dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n",
+ ctx->inter_digestsize,
+ state->opad_digest_buff);
+ goto unmap_digest_len;
+ }
+ dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n",
+ ctx->inter_digestsize, state->opad_digest_buff,
+ &state->opad_digest_dma_addr);
+ }
+
+ return 0;
+
+unmap_digest_len:
+ if (state->digest_bytes_len_dma_addr) {
+ dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
+ HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
+ state->digest_bytes_len_dma_addr = 0;
+ }
+unmap_digest_buf:
+ if (state->digest_buff_dma_addr) {
+ dma_unmap_single(dev, state->digest_buff_dma_addr,
+ ctx->inter_digestsize, DMA_BIDIRECTIONAL);
+ state->digest_buff_dma_addr = 0;
+ }
+
+ return -EINVAL;
+}
+
+static void cc_unmap_req(struct device *dev, struct ahash_req_ctx *state,
+ struct cc_hash_ctx *ctx)
+{
+ if (state->digest_buff_dma_addr) {
+ dma_unmap_single(dev, state->digest_buff_dma_addr,
+ ctx->inter_digestsize, DMA_BIDIRECTIONAL);
+ dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
+ &state->digest_buff_dma_addr);
+ state->digest_buff_dma_addr = 0;
+ }
+ if (state->digest_bytes_len_dma_addr) {
+ dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
+ HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
+ dev_dbg(dev, "Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
+ &state->digest_bytes_len_dma_addr);
+ state->digest_bytes_len_dma_addr = 0;
+ }
+ if (state->opad_digest_dma_addr) {
+ dma_unmap_single(dev, state->opad_digest_dma_addr,
+ ctx->inter_digestsize, DMA_BIDIRECTIONAL);
+ dev_dbg(dev, "Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
+ &state->opad_digest_dma_addr);
+ state->opad_digest_dma_addr = 0;
+ }
+}
+
+static void cc_unmap_result(struct device *dev, struct ahash_req_ctx *state,
+ unsigned int digestsize, u8 *result)
+{
+ if (state->digest_result_dma_addr) {
+ dma_unmap_single(dev, state->digest_result_dma_addr, digestsize,
+ DMA_BIDIRECTIONAL);
+ dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n",
+ state->digest_result_buff,
+ &state->digest_result_dma_addr, digestsize);
+ memcpy(result, state->digest_result_buff, digestsize);
+ }
+ state->digest_result_dma_addr = 0;
+}
+
+static void cc_update_complete(struct device *dev, void *cc_req, int err)
+{
+ struct ahash_request *req = (struct ahash_request *)cc_req;
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ dev_dbg(dev, "req=%pK\n", req);
+
+ cc_unmap_hash_request(dev, state, req->src, false);
+ cc_unmap_req(dev, state, ctx);
+ req->base.complete(&req->base, err);
+}
+
+static void cc_digest_complete(struct device *dev, void *cc_req, int err)
+{
+ struct ahash_request *req = (struct ahash_request *)cc_req;
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+
+ dev_dbg(dev, "req=%pK\n", req);
+
+ cc_unmap_hash_request(dev, state, req->src, false);
+ cc_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_req(dev, state, ctx);
+ req->base.complete(&req->base, err);
+}
+
+static void cc_hash_complete(struct device *dev, void *cc_req, int err)
+{
+ struct ahash_request *req = (struct ahash_request *)cc_req;
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+
+ dev_dbg(dev, "req=%pK\n", req);
+
+ cc_unmap_hash_request(dev, state, req->src, false);
+ cc_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_req(dev, state, ctx);
+ req->base.complete(&req->base, err);
+}
+
+static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
+ int idx)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+
+ /* Get final MAC result */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ /* TODO */
+ set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
+ NS_BIT, 1);
+ set_queue_last_ind(ctx->drvdata, &desc[idx]);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
+ cc_set_endianity(ctx->hash_mode, &desc[idx]);
+ idx++;
+
+ return idx;
+}
+
+static int cc_fin_hmac(struct cc_hw_desc *desc, struct ahash_request *req,
+ int idx)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+
+ /* store the hash digest result in the context */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_dout_dlli(&desc[idx], state->digest_buff_dma_addr, digestsize,
+ NS_BIT, 0);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ cc_set_endianity(ctx->hash_mode, &desc[idx]);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ idx++;
+
+ /* Loading hash opad xor key state */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
+ ctx->inter_digestsize, NS_BIT);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load the hash current length */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_din_sram(&desc[idx],
+ cc_digest_len_addr(ctx->drvdata, ctx->hash_mode),
+ ctx->drvdata->hash_len_sz);
+ set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
+ hw_desc_init(&desc[idx]);
+ set_din_no_dma(&desc[idx], 0, 0xfffff0);
+ set_dout_no_dma(&desc[idx], 0, 0, 1);
+ idx++;
+
+ /* Perform HASH update */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+ digestsize, NS_BIT);
+ set_flow_mode(&desc[idx], DIN_HASH);
+ idx++;
+
+ return idx;
+}
+
+static int cc_hash_digest(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+ struct scatterlist *src = req->src;
+ unsigned int nbytes = req->nbytes;
+ u8 *result = req->result;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ bool is_hmac = ctx->is_hmac;
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+ cc_sram_addr_t larval_digest_addr =
+ cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
+ int idx = 0;
+ int rc = 0;
+ gfp_t flags = cc_gfp_flags(&req->base);
+
+ dev_dbg(dev, "===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash",
+ nbytes);
+
+ cc_init_req(dev, state, ctx);
+
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ return -ENOMEM;
+ }
+
+ if (cc_map_result(dev, state, digestsize)) {
+ dev_err(dev, "map_ahash_digest() failed\n");
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+
+ if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
+ flags)) {
+ dev_err(dev, "map_ahash_request_final() failed\n");
+ cc_unmap_result(dev, state, digestsize, result);
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+
+ /* Setup request structure */
+ cc_req.user_cb = cc_digest_complete;
+ cc_req.user_arg = req;
+
+ /* If HMAC then load hash IPAD xor key, if HASH then load initial
+ * digest
+ */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ if (is_hmac) {
+ set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+ ctx->inter_digestsize, NS_BIT);
+ } else {
+ set_din_sram(&desc[idx], larval_digest_addr,
+ ctx->inter_digestsize);
+ }
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load the hash current length */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+
+ if (is_hmac) {
+ set_din_type(&desc[idx], DMA_DLLI,
+ state->digest_bytes_len_dma_addr,
+ ctx->drvdata->hash_len_sz, NS_BIT);
+ } else {
+ set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
+ if (nbytes)
+ set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+ else
+ set_cipher_do(&desc[idx], DO_PAD);
+ }
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
+
+ if (is_hmac) {
+ /* HW last hash block padding (aka. "DO_PAD") */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
+ ctx->drvdata->hash_len_sz, NS_BIT, 0);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
+ set_cipher_do(&desc[idx], DO_PAD);
+ idx++;
+
+ idx = cc_fin_hmac(desc, req, idx);
+ }
+
+ idx = cc_fin_result(desc, req, idx);
+
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_hash_request(dev, state, src, true);
+ cc_unmap_result(dev, state, digestsize, result);
+ cc_unmap_req(dev, state, ctx);
+ }
+ return rc;
+}
+
+static int cc_restore_hash(struct cc_hw_desc *desc, struct cc_hash_ctx *ctx,
+ struct ahash_req_ctx *state, unsigned int idx)
+{
+ /* Restore hash digest */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+ ctx->inter_digestsize, NS_BIT);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Restore hash current length */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
+ set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
+ ctx->drvdata->hash_len_sz, NS_BIT);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
+
+ return idx;
+}
+
+static int cc_hash_update(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
+ struct scatterlist *src = req->src;
+ unsigned int nbytes = req->nbytes;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+ u32 idx = 0;
+ int rc;
+ gfp_t flags = cc_gfp_flags(&req->base);
+
+ dev_dbg(dev, "===== %s-update (%d) ====\n", ctx->is_hmac ?
+ "hmac" : "hash", nbytes);
+
+ if (nbytes == 0) {
+ /* no real updates required */
+ return 0;
+ }
+
+ rc = cc_map_hash_request_update(ctx->drvdata, state, src, nbytes,
+ block_size, flags);
+ if (rc) {
+ if (rc == 1) {
+ dev_dbg(dev, " data size not require HW update %x\n",
+ nbytes);
+ /* No hardware updates are required */
+ return 0;
+ }
+ dev_err(dev, "map_ahash_request_update() failed\n");
+ return -ENOMEM;
+ }
+
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ cc_unmap_hash_request(dev, state, src, true);
+ return -EINVAL;
+ }
+
+ /* Setup request structure */
+ cc_req.user_cb = cc_update_complete;
+ cc_req.user_arg = req;
+
+ idx = cc_restore_hash(desc, ctx, state, idx);
+
+ /* store the hash digest result in context */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
+ ctx->inter_digestsize, NS_BIT, 0);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ idx++;
+
+ /* store current hash length in context */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
+ ctx->drvdata->hash_len_sz, NS_BIT, 1);
+ set_queue_last_ind(ctx->drvdata, &desc[idx]);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
+ idx++;
+
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_hash_request(dev, state, src, true);
+ cc_unmap_req(dev, state, ctx);
+ }
+ return rc;
+}
+
+static int cc_hash_finup(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+ struct scatterlist *src = req->src;
+ unsigned int nbytes = req->nbytes;
+ u8 *result = req->result;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ bool is_hmac = ctx->is_hmac;
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+ unsigned int idx = 0;
+ int rc;
+ gfp_t flags = cc_gfp_flags(&req->base);
+
+ dev_dbg(dev, "===== %s-finup (%d) ====\n", is_hmac ? "hmac" : "hash",
+ nbytes);
+
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ return -EINVAL;
+ }
+
+ if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
+ flags)) {
+ dev_err(dev, "map_ahash_request_final() failed\n");
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+ if (cc_map_result(dev, state, digestsize)) {
+ dev_err(dev, "map_ahash_digest() failed\n");
+ cc_unmap_hash_request(dev, state, src, true);
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+
+ /* Setup request structure */
+ cc_req.user_cb = cc_hash_complete;
+ cc_req.user_arg = req;
+
+ idx = cc_restore_hash(desc, ctx, state, idx);
+
+ if (is_hmac)
+ idx = cc_fin_hmac(desc, req, idx);
+
+ idx = cc_fin_result(desc, req, idx);
+
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_hash_request(dev, state, src, true);
+ cc_unmap_result(dev, state, digestsize, result);
+ cc_unmap_req(dev, state, ctx);
+ }
+ return rc;
+}
+
+static int cc_hash_final(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+ struct scatterlist *src = req->src;
+ unsigned int nbytes = req->nbytes;
+ u8 *result = req->result;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ bool is_hmac = ctx->is_hmac;
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+ unsigned int idx = 0;
+ int rc;
+ gfp_t flags = cc_gfp_flags(&req->base);
+
+ dev_dbg(dev, "===== %s-final (%d) ====\n", is_hmac ? "hmac" : "hash",
+ nbytes);
+
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ return -EINVAL;
+ }
+
+ if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 0,
+ flags)) {
+ dev_err(dev, "map_ahash_request_final() failed\n");
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+
+ if (cc_map_result(dev, state, digestsize)) {
+ dev_err(dev, "map_ahash_digest() failed\n");
+ cc_unmap_hash_request(dev, state, src, true);
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+
+ /* Setup request structure */
+ cc_req.user_cb = cc_hash_complete;
+ cc_req.user_arg = req;
+
+ idx = cc_restore_hash(desc, ctx, state, idx);
+
+ /* "DO-PAD" must be enabled only when writing current length to HW */
+ hw_desc_init(&desc[idx]);
+ set_cipher_do(&desc[idx], DO_PAD);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
+ ctx->drvdata->hash_len_sz, NS_BIT, 0);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ idx++;
+
+ if (is_hmac)
+ idx = cc_fin_hmac(desc, req, idx);
+
+ idx = cc_fin_result(desc, req, idx);
+
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_hash_request(dev, state, src, true);
+ cc_unmap_result(dev, state, digestsize, result);
+ cc_unmap_req(dev, state, ctx);
+ }
+ return rc;
+}
+
+static int cc_hash_init(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ dev_dbg(dev, "===== init (%d) ====\n", req->nbytes);
+
+ cc_init_req(dev, state, ctx);
+
+ return 0;
+}
+
+static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
+ unsigned int keylen)
+{
+ unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
+ struct cc_crypto_req cc_req = {};
+ struct cc_hash_ctx *ctx = NULL;
+ int blocksize = 0;
+ int digestsize = 0;
+ int i, idx = 0, rc = 0;
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+ cc_sram_addr_t larval_addr;
+ struct device *dev;
+
+ ctx = crypto_ahash_ctx(ahash);
+ dev = drvdata_to_dev(ctx->drvdata);
+ dev_dbg(dev, "start keylen: %d", keylen);
+
+ blocksize = crypto_tfm_alg_blocksize(&ahash->base);
+ digestsize = crypto_ahash_digestsize(ahash);
+
+ larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
+
+ /* The keylen value distinguishes HASH in case keylen is ZERO bytes,
+ * any NON-ZERO value utilizes HMAC flow
+ */
+ ctx->key_params.keylen = keylen;
+ ctx->key_params.key_dma_addr = 0;
+ ctx->is_hmac = true;
+
+ if (keylen) {
+ ctx->key_params.key_dma_addr =
+ dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
+ dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
+ key, keylen);
+ return -ENOMEM;
+ }
+ dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
+ &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
+
+ if (keylen > blocksize) {
+ /* Load hash initial state */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_din_sram(&desc[idx], larval_addr,
+ ctx->inter_digestsize);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load the hash current length*/
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
+ set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ ctx->key_params.key_dma_addr, keylen,
+ NS_BIT);
+ set_flow_mode(&desc[idx], DIN_HASH);
+ idx++;
+
+ /* Get hashed key */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
+ digestsize, NS_BIT, 0);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
+ cc_set_endianity(ctx->hash_mode, &desc[idx]);
+ idx++;
+
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0, (blocksize - digestsize));
+ set_flow_mode(&desc[idx], BYPASS);
+ set_dout_dlli(&desc[idx],
+ (ctx->opad_tmp_keys_dma_addr +
+ digestsize),
+ (blocksize - digestsize), NS_BIT, 0);
+ idx++;
+ } else {
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ ctx->key_params.key_dma_addr, keylen,
+ NS_BIT);
+ set_flow_mode(&desc[idx], BYPASS);
+ set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
+ keylen, NS_BIT, 0);
+ idx++;
+
+ if ((blocksize - keylen)) {
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0,
+ (blocksize - keylen));
+ set_flow_mode(&desc[idx], BYPASS);
+ set_dout_dlli(&desc[idx],
+ (ctx->opad_tmp_keys_dma_addr +
+ keylen), (blocksize - keylen),
+ NS_BIT, 0);
+ idx++;
+ }
+ }
+ } else {
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0, blocksize);
+ set_flow_mode(&desc[idx], BYPASS);
+ set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr),
+ blocksize, NS_BIT, 0);
+ idx++;
+ }
+
+ rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
+ if (rc) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ goto out;
+ }
+
+ /* calc derived HMAC key */
+ for (idx = 0, i = 0; i < 2; i++) {
+ /* Load hash initial state */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_din_sram(&desc[idx], larval_addr, ctx->inter_digestsize);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load the hash current length*/
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ /* Prepare ipad key */
+ hw_desc_init(&desc[idx]);
+ set_xor_val(&desc[idx], hmac_pad_const[i]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_flow_mode(&desc[idx], S_DIN_to_HASH);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+ idx++;
+
+ /* Perform HASH update */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
+ blocksize, NS_BIT);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_xor_active(&desc[idx]);
+ set_flow_mode(&desc[idx], DIN_HASH);
+ idx++;
+
+ /* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest
+ * of the first HASH "update" state)
+ */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ if (i > 0) /* Not first iteration */
+ set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
+ ctx->inter_digestsize, NS_BIT, 0);
+ else /* First iteration */
+ set_dout_dlli(&desc[idx], ctx->digest_buff_dma_addr,
+ ctx->inter_digestsize, NS_BIT, 0);
+ set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ idx++;
+ }
+
+ rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
+
+out:
+ if (rc)
+ crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+
+ if (ctx->key_params.key_dma_addr) {
+ dma_unmap_single(dev, ctx->key_params.key_dma_addr,
+ ctx->key_params.keylen, DMA_TO_DEVICE);
+ dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
+ &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
+ }
+ return rc;
+}
+
+static int cc_xcbc_setkey(struct crypto_ahash *ahash,
+ const u8 *key, unsigned int keylen)
+{
+ struct cc_crypto_req cc_req = {};
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ int rc = 0;
+ unsigned int idx = 0;
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+
+ dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ case AES_KEYSIZE_192:
+ case AES_KEYSIZE_256:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ctx->key_params.keylen = keylen;
+
+ ctx->key_params.key_dma_addr =
+ dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
+ dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
+ key, keylen);
+ return -ENOMEM;
+ }
+ dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
+ &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
+
+ ctx->is_hmac = true;
+ /* 1. Load the AES key */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr,
+ keylen, NS_BIT);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
+ set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+ set_key_size_aes(&desc[idx], keylen);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[idx], DIN_AES_DOUT);
+ set_dout_dlli(&desc[idx],
+ (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
+ CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
+ idx++;
+
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[idx], DIN_AES_DOUT);
+ set_dout_dlli(&desc[idx],
+ (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
+ CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
+ idx++;
+
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[idx], DIN_AES_DOUT);
+ set_dout_dlli(&desc[idx],
+ (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
+ CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
+ idx++;
+
+ rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
+
+ if (rc)
+ crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+
+ dma_unmap_single(dev, ctx->key_params.key_dma_addr,
+ ctx->key_params.keylen, DMA_TO_DEVICE);
+ dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
+ &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
+
+ return rc;
+}
+
+static int cc_cmac_setkey(struct crypto_ahash *ahash,
+ const u8 *key, unsigned int keylen)
+{
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
+
+ ctx->is_hmac = true;
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ case AES_KEYSIZE_192:
+ case AES_KEYSIZE_256:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ctx->key_params.keylen = keylen;
+
+ /* STAT_PHASE_1: Copy key to ctx */
+
+ dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr,
+ keylen, DMA_TO_DEVICE);
+
+ memcpy(ctx->opad_tmp_keys_buff, key, keylen);
+ if (keylen == 24) {
+ memset(ctx->opad_tmp_keys_buff + 24, 0,
+ CC_AES_KEY_SIZE_MAX - 24);
+ }
+
+ dma_sync_single_for_device(dev, ctx->opad_tmp_keys_dma_addr,
+ keylen, DMA_TO_DEVICE);
+
+ ctx->key_params.keylen = keylen;
+
+ return 0;
+}
+
+static void cc_free_ctx(struct cc_hash_ctx *ctx)
+{
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ if (ctx->digest_buff_dma_addr) {
+ dma_unmap_single(dev, ctx->digest_buff_dma_addr,
+ sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
+ dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
+ &ctx->digest_buff_dma_addr);
+ ctx->digest_buff_dma_addr = 0;
+ }
+ if (ctx->opad_tmp_keys_dma_addr) {
+ dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr,
+ sizeof(ctx->opad_tmp_keys_buff),
+ DMA_BIDIRECTIONAL);
+ dev_dbg(dev, "Unmapped opad-digest: opad_tmp_keys_dma_addr=%pad\n",
+ &ctx->opad_tmp_keys_dma_addr);
+ ctx->opad_tmp_keys_dma_addr = 0;
+ }
+
+ ctx->key_params.keylen = 0;
+}
+
+static int cc_alloc_ctx(struct cc_hash_ctx *ctx)
+{
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ ctx->key_params.keylen = 0;
+
+ ctx->digest_buff_dma_addr =
+ dma_map_single(dev, (void *)ctx->digest_buff,
+ sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
+ dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
+ sizeof(ctx->digest_buff), ctx->digest_buff);
+ goto fail;
+ }
+ dev_dbg(dev, "Mapped digest %zu B at va=%pK to dma=%pad\n",
+ sizeof(ctx->digest_buff), ctx->digest_buff,
+ &ctx->digest_buff_dma_addr);
+
+ ctx->opad_tmp_keys_dma_addr =
+ dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff,
+ sizeof(ctx->opad_tmp_keys_buff),
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
+ dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n",
+ sizeof(ctx->opad_tmp_keys_buff),
+ ctx->opad_tmp_keys_buff);
+ goto fail;
+ }
+ dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
+ sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
+ &ctx->opad_tmp_keys_dma_addr);
+
+ ctx->is_hmac = false;
+ return 0;
+
+fail:
+ cc_free_ctx(ctx);
+ return -ENOMEM;
+}
+
+static int cc_cra_init(struct crypto_tfm *tfm)
+{
+ struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct hash_alg_common *hash_alg_common =
+ container_of(tfm->__crt_alg, struct hash_alg_common, base);
+ struct ahash_alg *ahash_alg =
+ container_of(hash_alg_common, struct ahash_alg, halg);
+ struct cc_hash_alg *cc_alg =
+ container_of(ahash_alg, struct cc_hash_alg, ahash_alg);
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct ahash_req_ctx));
+
+ ctx->hash_mode = cc_alg->hash_mode;
+ ctx->hw_mode = cc_alg->hw_mode;
+ ctx->inter_digestsize = cc_alg->inter_digestsize;
+ ctx->drvdata = cc_alg->drvdata;
+
+ return cc_alloc_ctx(ctx);
+}
+
+static void cc_cra_exit(struct crypto_tfm *tfm)
+{
+ struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ dev_dbg(dev, "cc_cra_exit");
+ cc_free_ctx(ctx);
+}
+
+static int cc_mac_update(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+ int rc;
+ u32 idx = 0;
+ gfp_t flags = cc_gfp_flags(&req->base);
+
+ if (req->nbytes == 0) {
+ /* no real updates required */
+ return 0;
+ }
+
+ state->xcbc_count++;
+
+ rc = cc_map_hash_request_update(ctx->drvdata, state, req->src,
+ req->nbytes, block_size, flags);
+ if (rc) {
+ if (rc == 1) {
+ dev_dbg(dev, " data size not require HW update %x\n",
+ req->nbytes);
+ /* No hardware updates are required */
+ return 0;
+ }
+ dev_err(dev, "map_ahash_request_update() failed\n");
+ return -ENOMEM;
+ }
+
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ return -EINVAL;
+ }
+
+ if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
+ cc_setup_xcbc(req, desc, &idx);
+ else
+ cc_setup_cmac(req, desc, &idx);
+
+ cc_set_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
+
+ /* store the hash digest result in context */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
+ ctx->inter_digestsize, NS_BIT, 1);
+ set_queue_last_ind(ctx->drvdata, &desc[idx]);
+ set_flow_mode(&desc[idx], S_AES_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ idx++;
+
+ /* Setup request structure */
+ cc_req.user_cb = (void *)cc_update_complete;
+ cc_req.user_arg = (void *)req;
+
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_req(dev, state, ctx);
+ }
+ return rc;
+}
+
+static int cc_mac_final(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+ int idx = 0;
+ int rc = 0;
+ u32 key_size, key_len;
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+ gfp_t flags = cc_gfp_flags(&req->base);
+ u32 rem_cnt = *cc_hash_buf_cnt(state);
+
+ if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
+ key_size = CC_AES_128_BIT_KEY_SIZE;
+ key_len = CC_AES_128_BIT_KEY_SIZE;
+ } else {
+ key_size = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
+ ctx->key_params.keylen;
+ key_len = ctx->key_params.keylen;
+ }
+
+ dev_dbg(dev, "===== final xcbc reminder (%d) ====\n", rem_cnt);
+
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ return -EINVAL;
+ }
+
+ if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
+ req->nbytes, 0, flags)) {
+ dev_err(dev, "map_ahash_request_final() failed\n");
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+
+ if (cc_map_result(dev, state, digestsize)) {
+ dev_err(dev, "map_ahash_digest() failed\n");
+ cc_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+
+ /* Setup request structure */
+ cc_req.user_cb = (void *)cc_hash_complete;
+ cc_req.user_arg = (void *)req;
+
+ if (state->xcbc_count && rem_cnt == 0) {
+ /* Load key for ECB decryption */
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
+ set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
+ set_din_type(&desc[idx], DMA_DLLI,
+ (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
+ key_size, NS_BIT);
+ set_key_size_aes(&desc[idx], key_len);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ /* Initiate decryption of block state to previous
+ * block_state-XOR-M[n]
+ */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+ CC_AES_BLOCK_SIZE, NS_BIT);
+ set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
+ CC_AES_BLOCK_SIZE, NS_BIT, 0);
+ set_flow_mode(&desc[idx], DIN_AES_DOUT);
+ idx++;
+
+ /* Memory Barrier: wait for axi write to complete */
+ hw_desc_init(&desc[idx]);
+ set_din_no_dma(&desc[idx], 0, 0xfffff0);
+ set_dout_no_dma(&desc[idx], 0, 0, 1);
+ idx++;
+ }
+
+ if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
+ cc_setup_xcbc(req, desc, &idx);
+ else
+ cc_setup_cmac(req, desc, &idx);
+
+ if (state->xcbc_count == 0) {
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_key_size_aes(&desc[idx], key_len);
+ set_cmac_size0_mode(&desc[idx]);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+ } else if (rem_cnt > 0) {
+ cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
+ } else {
+ hw_desc_init(&desc[idx]);
+ set_din_const(&desc[idx], 0x00, CC_AES_BLOCK_SIZE);
+ set_flow_mode(&desc[idx], DIN_AES_DOUT);
+ idx++;
+ }
+
+ /* Get final MAC result */
+ hw_desc_init(&desc[idx]);
+ /* TODO */
+ set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
+ digestsize, NS_BIT, 1);
+ set_queue_last_ind(ctx->drvdata, &desc[idx]);
+ set_flow_mode(&desc[idx], S_AES_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ idx++;
+
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_req(dev, state, ctx);
+ }
+ return rc;
+}
+
+static int cc_mac_finup(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+ int idx = 0;
+ int rc = 0;
+ u32 key_len = 0;
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+ gfp_t flags = cc_gfp_flags(&req->base);
+
+ dev_dbg(dev, "===== finup xcbc(%d) ====\n", req->nbytes);
+ if (state->xcbc_count > 0 && req->nbytes == 0) {
+ dev_dbg(dev, "No data to update. Call to fdx_mac_final\n");
+ return cc_mac_final(req);
+ }
+
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ return -EINVAL;
+ }
+
+ if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
+ req->nbytes, 1, flags)) {
+ dev_err(dev, "map_ahash_request_final() failed\n");
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+ if (cc_map_result(dev, state, digestsize)) {
+ dev_err(dev, "map_ahash_digest() failed\n");
+ cc_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+
+ /* Setup request structure */
+ cc_req.user_cb = (void *)cc_hash_complete;
+ cc_req.user_arg = (void *)req;
+
+ if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
+ key_len = CC_AES_128_BIT_KEY_SIZE;
+ cc_setup_xcbc(req, desc, &idx);
+ } else {
+ key_len = ctx->key_params.keylen;
+ cc_setup_cmac(req, desc, &idx);
+ }
+
+ if (req->nbytes == 0) {
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_key_size_aes(&desc[idx], key_len);
+ set_cmac_size0_mode(&desc[idx]);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+ } else {
+ cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
+ }
+
+ /* Get final MAC result */
+ hw_desc_init(&desc[idx]);
+ /* TODO */
+ set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
+ digestsize, NS_BIT, 1);
+ set_queue_last_ind(ctx->drvdata, &desc[idx]);
+ set_flow_mode(&desc[idx], S_AES_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ idx++;
+
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_req(dev, state, ctx);
+ }
+ return rc;
+}
+
+static int cc_mac_digest(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ u32 digestsize = crypto_ahash_digestsize(tfm);
+ struct cc_crypto_req cc_req = {};
+ struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
+ u32 key_len;
+ unsigned int idx = 0;
+ int rc;
+ gfp_t flags = cc_gfp_flags(&req->base);
+
+ dev_dbg(dev, "===== -digest mac (%d) ====\n", req->nbytes);
+
+ cc_init_req(dev, state, ctx);
+
+ if (cc_map_req(dev, state, ctx)) {
+ dev_err(dev, "map_ahash_source() failed\n");
+ return -ENOMEM;
+ }
+ if (cc_map_result(dev, state, digestsize)) {
+ dev_err(dev, "map_ahash_digest() failed\n");
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+
+ if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
+ req->nbytes, 1, flags)) {
+ dev_err(dev, "map_ahash_request_final() failed\n");
+ cc_unmap_req(dev, state, ctx);
+ return -ENOMEM;
+ }
+
+ /* Setup request structure */
+ cc_req.user_cb = (void *)cc_digest_complete;
+ cc_req.user_arg = (void *)req;
+
+ if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
+ key_len = CC_AES_128_BIT_KEY_SIZE;
+ cc_setup_xcbc(req, desc, &idx);
+ } else {
+ key_len = ctx->key_params.keylen;
+ cc_setup_cmac(req, desc, &idx);
+ }
+
+ if (req->nbytes == 0) {
+ hw_desc_init(&desc[idx]);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_key_size_aes(&desc[idx], key_len);
+ set_cmac_size0_mode(&desc[idx]);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+ } else {
+ cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
+ }
+
+ /* Get final MAC result */
+ hw_desc_init(&desc[idx]);
+ set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
+ CC_AES_BLOCK_SIZE, NS_BIT, 1);
+ set_queue_last_ind(ctx->drvdata, &desc[idx]);
+ set_flow_mode(&desc[idx], S_AES_to_DOUT);
+ set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_cipher_mode(&desc[idx], ctx->hw_mode);
+ idx++;
+
+ rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
+ cc_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_req(dev, state, ctx);
+ }
+ return rc;
+}
+
+static int cc_hash_export(struct ahash_request *req, void *out)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ u8 *curr_buff = cc_hash_buf(state);
+ u32 curr_buff_cnt = *cc_hash_buf_cnt(state);
+ const u32 tmp = CC_EXPORT_MAGIC;
+
+ memcpy(out, &tmp, sizeof(u32));
+ out += sizeof(u32);
+
+ memcpy(out, state->digest_buff, ctx->inter_digestsize);
+ out += ctx->inter_digestsize;
+
+ memcpy(out, state->digest_bytes_len, ctx->drvdata->hash_len_sz);
+ out += ctx->drvdata->hash_len_sz;
+
+ memcpy(out, &curr_buff_cnt, sizeof(u32));
+ out += sizeof(u32);
+
+ memcpy(out, curr_buff, curr_buff_cnt);
+
+ return 0;
+}
+
+static int cc_hash_import(struct ahash_request *req, const void *in)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ u32 tmp;
+
+ memcpy(&tmp, in, sizeof(u32));
+ if (tmp != CC_EXPORT_MAGIC)
+ return -EINVAL;
+ in += sizeof(u32);
+
+ cc_init_req(dev, state, ctx);
+
+ memcpy(state->digest_buff, in, ctx->inter_digestsize);
+ in += ctx->inter_digestsize;
+
+ memcpy(state->digest_bytes_len, in, ctx->drvdata->hash_len_sz);
+ in += ctx->drvdata->hash_len_sz;
+
+ /* Sanity check the data as much as possible */
+ memcpy(&tmp, in, sizeof(u32));
+ if (tmp > CC_MAX_HASH_BLCK_SIZE)
+ return -EINVAL;
+ in += sizeof(u32);
+
+ state->buf_cnt[0] = tmp;
+ memcpy(state->buffers[0], in, tmp);
+
+ return 0;
+}
+
+struct cc_hash_template {
+ char name[CRYPTO_MAX_ALG_NAME];
+ char driver_name[CRYPTO_MAX_ALG_NAME];
+ char mac_name[CRYPTO_MAX_ALG_NAME];
+ char mac_driver_name[CRYPTO_MAX_ALG_NAME];
+ unsigned int blocksize;
+ bool synchronize;
+ struct ahash_alg template_ahash;
+ int hash_mode;
+ int hw_mode;
+ int inter_digestsize;
+ struct cc_drvdata *drvdata;
+ u32 min_hw_rev;
+};
+
+#define CC_STATE_SIZE(_x) \
+ ((_x) + HASH_MAX_LEN_SIZE + CC_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32)))
+
+/* hash descriptors */
+static struct cc_hash_template driver_hash[] = {
+ //Asynchronize hash template
+ {
+ .name = "sha1",
+ .driver_name = "sha1-ccree",
+ .mac_name = "hmac(sha1)",
+ .mac_driver_name = "hmac-sha1-ccree",
+ .blocksize = SHA1_BLOCK_SIZE,
+ .synchronize = false,
+ .template_ahash = {
+ .init = cc_hash_init,
+ .update = cc_hash_update,
+ .final = cc_hash_final,
+ .finup = cc_hash_finup,
+ .digest = cc_hash_digest,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .setkey = cc_hash_setkey,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = CC_STATE_SIZE(SHA1_DIGEST_SIZE),
+ },
+ },
+ .hash_mode = DRV_HASH_SHA1,
+ .hw_mode = DRV_HASH_HW_SHA1,
+ .inter_digestsize = SHA1_DIGEST_SIZE,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "sha256",
+ .driver_name = "sha256-ccree",
+ .mac_name = "hmac(sha256)",
+ .mac_driver_name = "hmac-sha256-ccree",
+ .blocksize = SHA256_BLOCK_SIZE,
+ .template_ahash = {
+ .init = cc_hash_init,
+ .update = cc_hash_update,
+ .final = cc_hash_final,
+ .finup = cc_hash_finup,
+ .digest = cc_hash_digest,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .setkey = cc_hash_setkey,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE)
+ },
+ },
+ .hash_mode = DRV_HASH_SHA256,
+ .hw_mode = DRV_HASH_HW_SHA256,
+ .inter_digestsize = SHA256_DIGEST_SIZE,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "sha224",
+ .driver_name = "sha224-ccree",
+ .mac_name = "hmac(sha224)",
+ .mac_driver_name = "hmac-sha224-ccree",
+ .blocksize = SHA224_BLOCK_SIZE,
+ .template_ahash = {
+ .init = cc_hash_init,
+ .update = cc_hash_update,
+ .final = cc_hash_final,
+ .finup = cc_hash_finup,
+ .digest = cc_hash_digest,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .setkey = cc_hash_setkey,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = CC_STATE_SIZE(SHA224_DIGEST_SIZE),
+ },
+ },
+ .hash_mode = DRV_HASH_SHA224,
+ .hw_mode = DRV_HASH_HW_SHA256,
+ .inter_digestsize = SHA256_DIGEST_SIZE,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .name = "sha384",
+ .driver_name = "sha384-ccree",
+ .mac_name = "hmac(sha384)",
+ .mac_driver_name = "hmac-sha384-ccree",
+ .blocksize = SHA384_BLOCK_SIZE,
+ .template_ahash = {
+ .init = cc_hash_init,
+ .update = cc_hash_update,
+ .final = cc_hash_final,
+ .finup = cc_hash_finup,
+ .digest = cc_hash_digest,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .setkey = cc_hash_setkey,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = CC_STATE_SIZE(SHA384_DIGEST_SIZE),
+ },
+ },
+ .hash_mode = DRV_HASH_SHA384,
+ .hw_mode = DRV_HASH_HW_SHA512,
+ .inter_digestsize = SHA512_DIGEST_SIZE,
+ .min_hw_rev = CC_HW_REV_712,
+ },
+ {
+ .name = "sha512",
+ .driver_name = "sha512-ccree",
+ .mac_name = "hmac(sha512)",
+ .mac_driver_name = "hmac-sha512-ccree",
+ .blocksize = SHA512_BLOCK_SIZE,
+ .template_ahash = {
+ .init = cc_hash_init,
+ .update = cc_hash_update,
+ .final = cc_hash_final,
+ .finup = cc_hash_finup,
+ .digest = cc_hash_digest,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .setkey = cc_hash_setkey,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
+ },
+ },
+ .hash_mode = DRV_HASH_SHA512,
+ .hw_mode = DRV_HASH_HW_SHA512,
+ .inter_digestsize = SHA512_DIGEST_SIZE,
+ .min_hw_rev = CC_HW_REV_712,
+ },
+ {
+ .name = "md5",
+ .driver_name = "md5-ccree",
+ .mac_name = "hmac(md5)",
+ .mac_driver_name = "hmac-md5-ccree",
+ .blocksize = MD5_HMAC_BLOCK_SIZE,
+ .template_ahash = {
+ .init = cc_hash_init,
+ .update = cc_hash_update,
+ .final = cc_hash_final,
+ .finup = cc_hash_finup,
+ .digest = cc_hash_digest,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .setkey = cc_hash_setkey,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = CC_STATE_SIZE(MD5_DIGEST_SIZE),
+ },
+ },
+ .hash_mode = DRV_HASH_MD5,
+ .hw_mode = DRV_HASH_HW_MD5,
+ .inter_digestsize = MD5_DIGEST_SIZE,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .mac_name = "xcbc(aes)",
+ .mac_driver_name = "xcbc-aes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_ahash = {
+ .init = cc_hash_init,
+ .update = cc_mac_update,
+ .final = cc_mac_final,
+ .finup = cc_mac_finup,
+ .digest = cc_mac_digest,
+ .setkey = cc_xcbc_setkey,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .halg = {
+ .digestsize = AES_BLOCK_SIZE,
+ .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
+ },
+ },
+ .hash_mode = DRV_HASH_NULL,
+ .hw_mode = DRV_CIPHER_XCBC_MAC,
+ .inter_digestsize = AES_BLOCK_SIZE,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+ {
+ .mac_name = "cmac(aes)",
+ .mac_driver_name = "cmac-aes-ccree",
+ .blocksize = AES_BLOCK_SIZE,
+ .template_ahash = {
+ .init = cc_hash_init,
+ .update = cc_mac_update,
+ .final = cc_mac_final,
+ .finup = cc_mac_finup,
+ .digest = cc_mac_digest,
+ .setkey = cc_cmac_setkey,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .halg = {
+ .digestsize = AES_BLOCK_SIZE,
+ .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
+ },
+ },
+ .hash_mode = DRV_HASH_NULL,
+ .hw_mode = DRV_CIPHER_CMAC,
+ .inter_digestsize = AES_BLOCK_SIZE,
+ .min_hw_rev = CC_HW_REV_630,
+ },
+};
+
+static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template,
+ struct device *dev, bool keyed)
+{
+ struct cc_hash_alg *t_crypto_alg;
+ struct crypto_alg *alg;
+ struct ahash_alg *halg;
+
+ t_crypto_alg = kzalloc(sizeof(*t_crypto_alg), GFP_KERNEL);
+ if (!t_crypto_alg)
+ return ERR_PTR(-ENOMEM);
+
+ t_crypto_alg->ahash_alg = template->template_ahash;
+ halg = &t_crypto_alg->ahash_alg;
+ alg = &halg->halg.base;
+
+ if (keyed) {
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->mac_name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->mac_driver_name);
+ } else {
+ halg->setkey = NULL;
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->driver_name);
+ }
+ alg->cra_module = THIS_MODULE;
+ alg->cra_ctxsize = sizeof(struct cc_hash_ctx);
+ alg->cra_priority = CC_CRA_PRIO;
+ alg->cra_blocksize = template->blocksize;
+ alg->cra_alignmask = 0;
+ alg->cra_exit = cc_cra_exit;
+
+ alg->cra_init = cc_cra_init;
+ alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_KERN_DRIVER_ONLY;
+ alg->cra_type = &crypto_ahash_type;
+
+ t_crypto_alg->hash_mode = template->hash_mode;
+ t_crypto_alg->hw_mode = template->hw_mode;
+ t_crypto_alg->inter_digestsize = template->inter_digestsize;
+
+ return t_crypto_alg;
+}
+
+int cc_init_hash_sram(struct cc_drvdata *drvdata)
+{
+ struct cc_hash_handle *hash_handle = drvdata->hash_handle;
+ cc_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr;
+ unsigned int larval_seq_len = 0;
+ struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
+ bool large_sha_supported = (drvdata->hw_rev >= CC_HW_REV_712);
+ int rc = 0;
+
+ /* Copy-to-sram digest-len */
+ cc_set_sram_desc(digest_len_init, sram_buff_ofs,
+ ARRAY_SIZE(digest_len_init), larval_seq,
+ &larval_seq_len);
+ rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ if (rc)
+ goto init_digest_const_err;
+
+ sram_buff_ofs += sizeof(digest_len_init);
+ larval_seq_len = 0;
+
+ if (large_sha_supported) {
+ /* Copy-to-sram digest-len for sha384/512 */
+ cc_set_sram_desc(digest_len_sha512_init, sram_buff_ofs,
+ ARRAY_SIZE(digest_len_sha512_init),
+ larval_seq, &larval_seq_len);
+ rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ if (rc)
+ goto init_digest_const_err;
+
+ sram_buff_ofs += sizeof(digest_len_sha512_init);
+ larval_seq_len = 0;
+ }
+
+ /* The initial digests offset */
+ hash_handle->larval_digest_sram_addr = sram_buff_ofs;
+
+ /* Copy-to-sram initial SHA* digests */
+ cc_set_sram_desc(md5_init, sram_buff_ofs, ARRAY_SIZE(md5_init),
+ larval_seq, &larval_seq_len);
+ rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ if (rc)
+ goto init_digest_const_err;
+ sram_buff_ofs += sizeof(md5_init);
+ larval_seq_len = 0;
+
+ cc_set_sram_desc(sha1_init, sram_buff_ofs,
+ ARRAY_SIZE(sha1_init), larval_seq,
+ &larval_seq_len);
+ rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ if (rc)
+ goto init_digest_const_err;
+ sram_buff_ofs += sizeof(sha1_init);
+ larval_seq_len = 0;
+
+ cc_set_sram_desc(sha224_init, sram_buff_ofs,
+ ARRAY_SIZE(sha224_init), larval_seq,
+ &larval_seq_len);
+ rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ if (rc)
+ goto init_digest_const_err;
+ sram_buff_ofs += sizeof(sha224_init);
+ larval_seq_len = 0;
+
+ cc_set_sram_desc(sha256_init, sram_buff_ofs,
+ ARRAY_SIZE(sha256_init), larval_seq,
+ &larval_seq_len);
+ rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ if (rc)
+ goto init_digest_const_err;
+ sram_buff_ofs += sizeof(sha256_init);
+ larval_seq_len = 0;
+
+ if (large_sha_supported) {
+ cc_set_sram_desc((u32 *)sha384_init, sram_buff_ofs,
+ (ARRAY_SIZE(sha384_init) * 2), larval_seq,
+ &larval_seq_len);
+ rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ if (rc)
+ goto init_digest_const_err;
+ sram_buff_ofs += sizeof(sha384_init);
+ larval_seq_len = 0;
+
+ cc_set_sram_desc((u32 *)sha512_init, sram_buff_ofs,
+ (ARRAY_SIZE(sha512_init) * 2), larval_seq,
+ &larval_seq_len);
+ rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ if (rc)
+ goto init_digest_const_err;
+ }
+
+init_digest_const_err:
+ return rc;
+}
+
+static void __init cc_swap_dwords(u32 *buf, unsigned long size)
+{
+ int i;
+ u32 tmp;
+
+ for (i = 0; i < size; i += 2) {
+ tmp = buf[i];
+ buf[i] = buf[i + 1];
+ buf[i + 1] = tmp;
+ }
+}
+
+/*
+ * Due to the way the HW works we need to swap every
+ * double word in the SHA384 and SHA512 larval hashes
+ */
+void __init cc_hash_global_init(void)
+{
+ cc_swap_dwords((u32 *)&sha384_init, (ARRAY_SIZE(sha384_init) * 2));
+ cc_swap_dwords((u32 *)&sha512_init, (ARRAY_SIZE(sha512_init) * 2));
+}
+
+int cc_hash_alloc(struct cc_drvdata *drvdata)
+{
+ struct cc_hash_handle *hash_handle;
+ cc_sram_addr_t sram_buff;
+ u32 sram_size_to_alloc;
+ struct device *dev = drvdata_to_dev(drvdata);
+ int rc = 0;
+ int alg;
+
+ hash_handle = kzalloc(sizeof(*hash_handle), GFP_KERNEL);
+ if (!hash_handle)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&hash_handle->hash_list);
+ drvdata->hash_handle = hash_handle;
+
+ sram_size_to_alloc = sizeof(digest_len_init) +
+ sizeof(md5_init) +
+ sizeof(sha1_init) +
+ sizeof(sha224_init) +
+ sizeof(sha256_init);
+
+ if (drvdata->hw_rev >= CC_HW_REV_712)
+ sram_size_to_alloc += sizeof(digest_len_sha512_init) +
+ sizeof(sha384_init) + sizeof(sha512_init);
+
+ sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc);
+ if (sram_buff == NULL_SRAM_ADDR) {
+ dev_err(dev, "SRAM pool exhausted\n");
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ /* The initial digest-len offset */
+ hash_handle->digest_len_sram_addr = sram_buff;
+
+ /*must be set before the alg registration as it is being used there*/
+ rc = cc_init_hash_sram(drvdata);
+ if (rc) {
+ dev_err(dev, "Init digest CONST failed (rc=%d)\n", rc);
+ goto fail;
+ }
+
+ /* ahash registration */
+ for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
+ struct cc_hash_alg *t_alg;
+ int hw_mode = driver_hash[alg].hw_mode;
+
+ /* We either support both HASH and MAC or none */
+ if (driver_hash[alg].min_hw_rev > drvdata->hw_rev)
+ continue;
+
+ /* register hmac version */
+ t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, true);
+ if (IS_ERR(t_alg)) {
+ rc = PTR_ERR(t_alg);
+ dev_err(dev, "%s alg allocation failed\n",
+ driver_hash[alg].driver_name);
+ goto fail;
+ }
+ t_alg->drvdata = drvdata;
+
+ rc = crypto_register_ahash(&t_alg->ahash_alg);
+ if (rc) {
+ dev_err(dev, "%s alg registration failed\n",
+ driver_hash[alg].driver_name);
+ kfree(t_alg);
+ goto fail;
+ } else {
+ list_add_tail(&t_alg->entry, &hash_handle->hash_list);
+ }
+
+ if (hw_mode == DRV_CIPHER_XCBC_MAC ||
+ hw_mode == DRV_CIPHER_CMAC)
+ continue;
+
+ /* register hash version */
+ t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, false);
+ if (IS_ERR(t_alg)) {
+ rc = PTR_ERR(t_alg);
+ dev_err(dev, "%s alg allocation failed\n",
+ driver_hash[alg].driver_name);
+ goto fail;
+ }
+ t_alg->drvdata = drvdata;
+
+ rc = crypto_register_ahash(&t_alg->ahash_alg);
+ if (rc) {
+ dev_err(dev, "%s alg registration failed\n",
+ driver_hash[alg].driver_name);
+ kfree(t_alg);
+ goto fail;
+ } else {
+ list_add_tail(&t_alg->entry, &hash_handle->hash_list);
+ }
+ }
+
+ return 0;
+
+fail:
+ kfree(drvdata->hash_handle);
+ drvdata->hash_handle = NULL;
+ return rc;
+}
+
+int cc_hash_free(struct cc_drvdata *drvdata)
+{
+ struct cc_hash_alg *t_hash_alg, *hash_n;
+ struct cc_hash_handle *hash_handle = drvdata->hash_handle;
+
+ if (hash_handle) {
+ list_for_each_entry_safe(t_hash_alg, hash_n,
+ &hash_handle->hash_list, entry) {
+ crypto_unregister_ahash(&t_hash_alg->ahash_alg);
+ list_del(&t_hash_alg->entry);
+ kfree(t_hash_alg);
+ }
+
+ kfree(hash_handle);
+ drvdata->hash_handle = NULL;
+ }
+ return 0;
+}
+
+static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ unsigned int idx = *seq_size;
+ struct ahash_req_ctx *state = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ /* Setup XCBC MAC K1 */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
+ XCBC_MAC_K1_OFFSET),
+ CC_AES_128_BIT_KEY_SIZE, NS_BIT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* Setup XCBC MAC K2 */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
+ CC_AES_128_BIT_KEY_SIZE, NS_BIT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* Setup XCBC MAC K3 */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
+ CC_AES_128_BIT_KEY_SIZE, NS_BIT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* Loading MAC state */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+ CC_AES_BLOCK_SIZE, NS_BIT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+ *seq_size = idx;
+}
+
+static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ unsigned int idx = *seq_size;
+ struct ahash_req_ctx *state = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ /* Setup CMAC Key */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
+ ((ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
+ ctx->key_params.keylen), NS_BIT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_key_size_aes(&desc[idx], ctx->key_params.keylen);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* Load MAC state */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+ CC_AES_BLOCK_SIZE, NS_BIT);
+ set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+ set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
+ set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_key_size_aes(&desc[idx], ctx->key_params.keylen);
+ set_flow_mode(&desc[idx], S_DIN_to_AES);
+ idx++;
+ *seq_size = idx;
+}
+
+static void cc_set_desc(struct ahash_req_ctx *areq_ctx,
+ struct cc_hash_ctx *ctx, unsigned int flow_mode,
+ struct cc_hw_desc desc[], bool is_not_last_data,
+ unsigned int *seq_size)
+{
+ unsigned int idx = *seq_size;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_DLLI) {
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ sg_dma_address(areq_ctx->curr_sg),
+ areq_ctx->curr_sg->length, NS_BIT);
+ set_flow_mode(&desc[idx], flow_mode);
+ idx++;
+ } else {
+ if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
+ dev_dbg(dev, " NULL mode\n");
+ /* nothing to build */
+ return;
+ }
+ /* bypass */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_DLLI,
+ areq_ctx->mlli_params.mlli_dma_addr,
+ areq_ctx->mlli_params.mlli_len, NS_BIT);
+ set_dout_sram(&desc[idx], ctx->drvdata->mlli_sram_addr,
+ areq_ctx->mlli_params.mlli_len);
+ set_flow_mode(&desc[idx], BYPASS);
+ idx++;
+ /* process */
+ hw_desc_init(&desc[idx]);
+ set_din_type(&desc[idx], DMA_MLLI,
+ ctx->drvdata->mlli_sram_addr,
+ areq_ctx->mlli_nents, NS_BIT);
+ set_flow_mode(&desc[idx], flow_mode);
+ idx++;
+ }
+ if (is_not_last_data)
+ set_din_not_last_indication(&desc[(idx - 1)]);
+ /* return updated desc sequence size */
+ *seq_size = idx;
+}
+
+static const void *cc_larval_digest(struct device *dev, u32 mode)
+{
+ switch (mode) {
+ case DRV_HASH_MD5:
+ return md5_init;
+ case DRV_HASH_SHA1:
+ return sha1_init;
+ case DRV_HASH_SHA224:
+ return sha224_init;
+ case DRV_HASH_SHA256:
+ return sha256_init;
+ case DRV_HASH_SHA384:
+ return sha384_init;
+ case DRV_HASH_SHA512:
+ return sha512_init;
+ default:
+ dev_err(dev, "Invalid hash mode (%d)\n", mode);
+ return md5_init;
+ }
+}
+
+/*!
+ * Gets the address of the initial digest in SRAM
+ * according to the given hash mode
+ *
+ * \param drvdata
+ * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
+ *
+ * \return u32 The address of the initial digest in SRAM
+ */
+cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode)
+{
+ struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
+ struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
+ struct device *dev = drvdata_to_dev(_drvdata);
+
+ switch (mode) {
+ case DRV_HASH_NULL:
+ break; /*Ignore*/
+ case DRV_HASH_MD5:
+ return (hash_handle->larval_digest_sram_addr);
+ case DRV_HASH_SHA1:
+ return (hash_handle->larval_digest_sram_addr +
+ sizeof(md5_init));
+ case DRV_HASH_SHA224:
+ return (hash_handle->larval_digest_sram_addr +
+ sizeof(md5_init) +
+ sizeof(sha1_init));
+ case DRV_HASH_SHA256:
+ return (hash_handle->larval_digest_sram_addr +
+ sizeof(md5_init) +
+ sizeof(sha1_init) +
+ sizeof(sha224_init));
+ case DRV_HASH_SHA384:
+ return (hash_handle->larval_digest_sram_addr +
+ sizeof(md5_init) +
+ sizeof(sha1_init) +
+ sizeof(sha224_init) +
+ sizeof(sha256_init));
+ case DRV_HASH_SHA512:
+ return (hash_handle->larval_digest_sram_addr +
+ sizeof(md5_init) +
+ sizeof(sha1_init) +
+ sizeof(sha224_init) +
+ sizeof(sha256_init) +
+ sizeof(sha384_init));
+ default:
+ dev_err(dev, "Invalid hash mode (%d)\n", mode);
+ }
+
+ /*This is valid wrong value to avoid kernel crash*/
+ return hash_handle->larval_digest_sram_addr;
+}
+
+cc_sram_addr_t
+cc_digest_len_addr(void *drvdata, u32 mode)
+{
+ struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
+ struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
+ cc_sram_addr_t digest_len_addr = hash_handle->digest_len_sram_addr;
+
+ switch (mode) {
+ case DRV_HASH_SHA1:
+ case DRV_HASH_SHA224:
+ case DRV_HASH_SHA256:
+ case DRV_HASH_MD5:
+ return digest_len_addr;
+#if (CC_DEV_SHA_MAX > 256)
+ case DRV_HASH_SHA384:
+ case DRV_HASH_SHA512:
+ return digest_len_addr + sizeof(digest_len_init);
+#endif
+ default:
+ return digest_len_addr; /*to avoid kernel crash*/
+ }
+}
diff --git a/drivers/crypto/ccree/cc_hash.h b/drivers/crypto/ccree/cc_hash.h
new file mode 100644
index 000000000000..2e5bf8b0bbb6
--- /dev/null
+++ b/drivers/crypto/ccree/cc_hash.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+/* \file cc_hash.h
+ * ARM CryptoCell Hash Crypto API
+ */
+
+#ifndef __CC_HASH_H__
+#define __CC_HASH_H__
+
+#include "cc_buffer_mgr.h"
+
+#define HMAC_IPAD_CONST 0x36363636
+#define HMAC_OPAD_CONST 0x5C5C5C5C
+#define HASH_LEN_SIZE_712 16
+#define HASH_LEN_SIZE_630 8
+#define HASH_MAX_LEN_SIZE HASH_LEN_SIZE_712
+#define CC_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
+#define CC_MAX_HASH_BLCK_SIZE SHA512_BLOCK_SIZE
+
+#define XCBC_MAC_K1_OFFSET 0
+#define XCBC_MAC_K2_OFFSET 16
+#define XCBC_MAC_K3_OFFSET 32
+
+#define CC_EXPORT_MAGIC 0xC2EE1070U
+
+/* this struct was taken from drivers/crypto/nx/nx-aes-xcbc.c and it is used
+ * for xcbc/cmac statesize
+ */
+struct aeshash_state {
+ u8 state[AES_BLOCK_SIZE];
+ unsigned int count;
+ u8 buffer[AES_BLOCK_SIZE];
+};
+
+/* ahash state */
+struct ahash_req_ctx {
+ u8 buffers[2][CC_MAX_HASH_BLCK_SIZE] ____cacheline_aligned;
+ u8 digest_result_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
+ u8 digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
+ u8 opad_digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
+ u8 digest_bytes_len[HASH_MAX_LEN_SIZE] ____cacheline_aligned;
+ struct async_gen_req_ctx gen_ctx ____cacheline_aligned;
+ enum cc_req_dma_buf_type data_dma_buf_type;
+ dma_addr_t opad_digest_dma_addr;
+ dma_addr_t digest_buff_dma_addr;
+ dma_addr_t digest_bytes_len_dma_addr;
+ dma_addr_t digest_result_dma_addr;
+ u32 buf_cnt[2];
+ u32 buff_index;
+ u32 xcbc_count; /* count xcbc update operatations */
+ struct scatterlist buff_sg[2];
+ struct scatterlist *curr_sg;
+ u32 in_nents;
+ u32 mlli_nents;
+ struct mlli_params mlli_params;
+};
+
+static inline u32 *cc_hash_buf_cnt(struct ahash_req_ctx *state)
+{
+ return &state->buf_cnt[state->buff_index];
+}
+
+static inline u8 *cc_hash_buf(struct ahash_req_ctx *state)
+{
+ return state->buffers[state->buff_index];
+}
+
+static inline u32 *cc_next_buf_cnt(struct ahash_req_ctx *state)
+{
+ return &state->buf_cnt[state->buff_index ^ 1];
+}
+
+static inline u8 *cc_next_buf(struct ahash_req_ctx *state)
+{
+ return state->buffers[state->buff_index ^ 1];
+}
+
+int cc_hash_alloc(struct cc_drvdata *drvdata);
+int cc_init_hash_sram(struct cc_drvdata *drvdata);
+int cc_hash_free(struct cc_drvdata *drvdata);
+
+/*!
+ * Gets the initial digest length
+ *
+ * \param drvdata
+ * \param mode The Hash mode. Supported modes:
+ * MD5/SHA1/SHA224/SHA256/SHA384/SHA512
+ *
+ * \return u32 returns the address of the initial digest length in SRAM
+ */
+cc_sram_addr_t
+cc_digest_len_addr(void *drvdata, u32 mode);
+
+/*!
+ * Gets the address of the initial digest in SRAM
+ * according to the given hash mode
+ *
+ * \param drvdata
+ * \param mode The Hash mode. Supported modes:
+ * MD5/SHA1/SHA224/SHA256/SHA384/SHA512
+ *
+ * \return u32 The address of the initial digest in SRAM
+ */
+cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode);
+
+void cc_hash_global_init(void);
+
+#endif /*__CC_HASH_H__*/
diff --git a/drivers/crypto/ccree/cc_host_regs.h b/drivers/crypto/ccree/cc_host_regs.h
new file mode 100644
index 000000000000..f51001898ca1
--- /dev/null
+++ b/drivers/crypto/ccree/cc_host_regs.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#ifndef __CC_HOST_H__
+#define __CC_HOST_H__
+
+// --------------------------------------
+// BLOCK: HOST_P
+// --------------------------------------
+#define CC_HOST_IRR_REG_OFFSET 0xA00UL
+#define CC_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SHIFT 0x2UL
+#define CC_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_AXI_ERR_INT_BIT_SHIFT 0x8UL
+#define CC_HOST_IRR_AXI_ERR_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_GPR0_BIT_SHIFT 0xBUL
+#define CC_HOST_IRR_GPR0_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SHIFT 0x13UL
+#define CC_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT 0x17UL
+#define CC_HOST_IRR_AXIM_COMP_INT_BIT_SIZE 0x1UL
+#define CC_HOST_SEP_SRAM_THRESHOLD_REG_OFFSET 0xA10UL
+#define CC_HOST_SEP_SRAM_THRESHOLD_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_SEP_SRAM_THRESHOLD_VALUE_BIT_SIZE 0xCUL
+#define CC_HOST_IMR_REG_OFFSET 0xA04UL
+#define CC_HOST_IMR_NOT_USED_MASK_BIT_SHIFT 0x1UL
+#define CC_HOST_IMR_NOT_USED_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SHIFT 0x2UL
+#define CC_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_AXI_ERR_MASK_BIT_SHIFT 0x8UL
+#define CC_HOST_IMR_AXI_ERR_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_GPR0_BIT_SHIFT 0xBUL
+#define CC_HOST_IMR_GPR0_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SHIFT 0x13UL
+#define CC_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SHIFT 0x17UL
+#define CC_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_ICR_REG_OFFSET 0xA08UL
+#define CC_HOST_ICR_DSCRPTR_COMPLETION_BIT_SHIFT 0x2UL
+#define CC_HOST_ICR_DSCRPTR_COMPLETION_BIT_SIZE 0x1UL
+#define CC_HOST_ICR_AXI_ERR_CLEAR_BIT_SHIFT 0x8UL
+#define CC_HOST_ICR_AXI_ERR_CLEAR_BIT_SIZE 0x1UL
+#define CC_HOST_ICR_GPR_INT_CLEAR_BIT_SHIFT 0xBUL
+#define CC_HOST_ICR_GPR_INT_CLEAR_BIT_SIZE 0x1UL
+#define CC_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SHIFT 0x13UL
+#define CC_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SIZE 0x1UL
+#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SHIFT 0x17UL
+#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SIZE 0x1UL
+#define CC_HOST_SIGNATURE_REG_OFFSET 0xA24UL
+#define CC_HOST_SIGNATURE_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_SIGNATURE_VALUE_BIT_SIZE 0x20UL
+#define CC_HOST_BOOT_REG_OFFSET 0xA28UL
+#define CC_HOST_BOOT_SYNTHESIS_CONFIG_BIT_SHIFT 0x0UL
+#define CC_HOST_BOOT_SYNTHESIS_CONFIG_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_LARGE_RKEK_LOCAL_BIT_SHIFT 0x1UL
+#define CC_HOST_BOOT_LARGE_RKEK_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_HASH_IN_FUSES_LOCAL_BIT_SHIFT 0x2UL
+#define CC_HOST_BOOT_HASH_IN_FUSES_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_EXT_MEM_SECURED_LOCAL_BIT_SHIFT 0x3UL
+#define CC_HOST_BOOT_EXT_MEM_SECURED_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_RKEK_ECC_EXISTS_LOCAL_N_BIT_SHIFT 0x5UL
+#define CC_HOST_BOOT_RKEK_ECC_EXISTS_LOCAL_N_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_SRAM_SIZE_LOCAL_BIT_SHIFT 0x6UL
+#define CC_HOST_BOOT_SRAM_SIZE_LOCAL_BIT_SIZE 0x3UL
+#define CC_HOST_BOOT_DSCRPTR_EXISTS_LOCAL_BIT_SHIFT 0x9UL
+#define CC_HOST_BOOT_DSCRPTR_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_PAU_EXISTS_LOCAL_BIT_SHIFT 0xAUL
+#define CC_HOST_BOOT_PAU_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_RNG_EXISTS_LOCAL_BIT_SHIFT 0xBUL
+#define CC_HOST_BOOT_RNG_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_PKA_EXISTS_LOCAL_BIT_SHIFT 0xCUL
+#define CC_HOST_BOOT_PKA_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_RC4_EXISTS_LOCAL_BIT_SHIFT 0xDUL
+#define CC_HOST_BOOT_RC4_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_SHA_512_PRSNT_LOCAL_BIT_SHIFT 0xEUL
+#define CC_HOST_BOOT_SHA_512_PRSNT_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_SHA_256_PRSNT_LOCAL_BIT_SHIFT 0xFUL
+#define CC_HOST_BOOT_SHA_256_PRSNT_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_MD5_PRSNT_LOCAL_BIT_SHIFT 0x10UL
+#define CC_HOST_BOOT_MD5_PRSNT_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_HASH_EXISTS_LOCAL_BIT_SHIFT 0x11UL
+#define CC_HOST_BOOT_HASH_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_C2_EXISTS_LOCAL_BIT_SHIFT 0x12UL
+#define CC_HOST_BOOT_C2_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_DES_EXISTS_LOCAL_BIT_SHIFT 0x13UL
+#define CC_HOST_BOOT_DES_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_AES_XCBC_MAC_EXISTS_LOCAL_BIT_SHIFT 0x14UL
+#define CC_HOST_BOOT_AES_XCBC_MAC_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_AES_CMAC_EXISTS_LOCAL_BIT_SHIFT 0x15UL
+#define CC_HOST_BOOT_AES_CMAC_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_AES_CCM_EXISTS_LOCAL_BIT_SHIFT 0x16UL
+#define CC_HOST_BOOT_AES_CCM_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_AES_XEX_HW_T_CALC_LOCAL_BIT_SHIFT 0x17UL
+#define CC_HOST_BOOT_AES_XEX_HW_T_CALC_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_AES_XEX_EXISTS_LOCAL_BIT_SHIFT 0x18UL
+#define CC_HOST_BOOT_AES_XEX_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_CTR_EXISTS_LOCAL_BIT_SHIFT 0x19UL
+#define CC_HOST_BOOT_CTR_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_AES_DIN_BYTE_RESOLUTION_LOCAL_BIT_SHIFT 0x1AUL
+#define CC_HOST_BOOT_AES_DIN_BYTE_RESOLUTION_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_TUNNELING_ENB_LOCAL_BIT_SHIFT 0x1BUL
+#define CC_HOST_BOOT_TUNNELING_ENB_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_SUPPORT_256_192_KEY_LOCAL_BIT_SHIFT 0x1CUL
+#define CC_HOST_BOOT_SUPPORT_256_192_KEY_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_ONLY_ENCRYPT_LOCAL_BIT_SHIFT 0x1DUL
+#define CC_HOST_BOOT_ONLY_ENCRYPT_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_BOOT_AES_EXISTS_LOCAL_BIT_SHIFT 0x1EUL
+#define CC_HOST_BOOT_AES_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define CC_HOST_VERSION_REG_OFFSET 0xA40UL
+#define CC_HOST_VERSION_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_VERSION_VALUE_BIT_SIZE 0x20UL
+#define CC_HOST_KFDE0_VALID_REG_OFFSET 0xA60UL
+#define CC_HOST_KFDE0_VALID_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_KFDE0_VALID_VALUE_BIT_SIZE 0x1UL
+#define CC_HOST_KFDE1_VALID_REG_OFFSET 0xA64UL
+#define CC_HOST_KFDE1_VALID_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_KFDE1_VALID_VALUE_BIT_SIZE 0x1UL
+#define CC_HOST_KFDE2_VALID_REG_OFFSET 0xA68UL
+#define CC_HOST_KFDE2_VALID_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_KFDE2_VALID_VALUE_BIT_SIZE 0x1UL
+#define CC_HOST_KFDE3_VALID_REG_OFFSET 0xA6CUL
+#define CC_HOST_KFDE3_VALID_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_KFDE3_VALID_VALUE_BIT_SIZE 0x1UL
+#define CC_HOST_GPR0_REG_OFFSET 0xA70UL
+#define CC_HOST_GPR0_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_GPR0_VALUE_BIT_SIZE 0x20UL
+#define CC_GPR_HOST_REG_OFFSET 0xA74UL
+#define CC_GPR_HOST_VALUE_BIT_SHIFT 0x0UL
+#define CC_GPR_HOST_VALUE_BIT_SIZE 0x20UL
+#define CC_HOST_POWER_DOWN_EN_REG_OFFSET 0xA78UL
+#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SHIFT 0x0UL
+#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SIZE 0x1UL
+// --------------------------------------
+// BLOCK: HOST_SRAM
+// --------------------------------------
+#define CC_SRAM_DATA_REG_OFFSET 0xF00UL
+#define CC_SRAM_DATA_VALUE_BIT_SHIFT 0x0UL
+#define CC_SRAM_DATA_VALUE_BIT_SIZE 0x20UL
+#define CC_SRAM_ADDR_REG_OFFSET 0xF04UL
+#define CC_SRAM_ADDR_VALUE_BIT_SHIFT 0x0UL
+#define CC_SRAM_ADDR_VALUE_BIT_SIZE 0xFUL
+#define CC_SRAM_DATA_READY_REG_OFFSET 0xF08UL
+#define CC_SRAM_DATA_READY_VALUE_BIT_SHIFT 0x0UL
+#define CC_SRAM_DATA_READY_VALUE_BIT_SIZE 0x1UL
+
+#endif //__CC_HOST_H__
diff --git a/drivers/crypto/ccree/cc_hw_queue_defs.h b/drivers/crypto/ccree/cc_hw_queue_defs.h
new file mode 100644
index 000000000000..a091ae57f902
--- /dev/null
+++ b/drivers/crypto/ccree/cc_hw_queue_defs.h
@@ -0,0 +1,576 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#ifndef __CC_HW_QUEUE_DEFS_H__
+#define __CC_HW_QUEUE_DEFS_H__
+
+#include <linux/types.h>
+
+#include "cc_kernel_regs.h"
+#include <linux/bitfield.h>
+
+/******************************************************************************
+ * DEFINITIONS
+ ******************************************************************************/
+
+#define HW_DESC_SIZE_WORDS 6
+/* Define max. available slots in HW queue */
+#define HW_QUEUE_SLOTS_MAX 15
+
+#define CC_REG_LOW(word, name) \
+ (CC_DSCRPTR_QUEUE_WORD ## word ## _ ## name ## _BIT_SHIFT)
+
+#define CC_REG_HIGH(word, name) \
+ (CC_REG_LOW(word, name) + \
+ CC_DSCRPTR_QUEUE_WORD ## word ## _ ## name ## _BIT_SIZE - 1)
+
+#define CC_GENMASK(word, name) \
+ GENMASK(CC_REG_HIGH(word, name), CC_REG_LOW(word, name))
+
+#define WORD0_VALUE CC_GENMASK(0, VALUE)
+#define WORD1_DIN_CONST_VALUE CC_GENMASK(1, DIN_CONST_VALUE)
+#define WORD1_DIN_DMA_MODE CC_GENMASK(1, DIN_DMA_MODE)
+#define WORD1_DIN_SIZE CC_GENMASK(1, DIN_SIZE)
+#define WORD1_NOT_LAST CC_GENMASK(1, NOT_LAST)
+#define WORD1_NS_BIT CC_GENMASK(1, NS_BIT)
+#define WORD2_VALUE CC_GENMASK(2, VALUE)
+#define WORD3_DOUT_DMA_MODE CC_GENMASK(3, DOUT_DMA_MODE)
+#define WORD3_DOUT_LAST_IND CC_GENMASK(3, DOUT_LAST_IND)
+#define WORD3_DOUT_SIZE CC_GENMASK(3, DOUT_SIZE)
+#define WORD3_HASH_XOR_BIT CC_GENMASK(3, HASH_XOR_BIT)
+#define WORD3_NS_BIT CC_GENMASK(3, NS_BIT)
+#define WORD3_QUEUE_LAST_IND CC_GENMASK(3, QUEUE_LAST_IND)
+#define WORD4_ACK_NEEDED CC_GENMASK(4, ACK_NEEDED)
+#define WORD4_AES_SEL_N_HASH CC_GENMASK(4, AES_SEL_N_HASH)
+#define WORD4_BYTES_SWAP CC_GENMASK(4, BYTES_SWAP)
+#define WORD4_CIPHER_CONF0 CC_GENMASK(4, CIPHER_CONF0)
+#define WORD4_CIPHER_CONF1 CC_GENMASK(4, CIPHER_CONF1)
+#define WORD4_CIPHER_CONF2 CC_GENMASK(4, CIPHER_CONF2)
+#define WORD4_CIPHER_DO CC_GENMASK(4, CIPHER_DO)
+#define WORD4_CIPHER_MODE CC_GENMASK(4, CIPHER_MODE)
+#define WORD4_CMAC_SIZE0 CC_GENMASK(4, CMAC_SIZE0)
+#define WORD4_DATA_FLOW_MODE CC_GENMASK(4, DATA_FLOW_MODE)
+#define WORD4_KEY_SIZE CC_GENMASK(4, KEY_SIZE)
+#define WORD4_SETUP_OPERATION CC_GENMASK(4, SETUP_OPERATION)
+#define WORD5_DIN_ADDR_HIGH CC_GENMASK(5, DIN_ADDR_HIGH)
+#define WORD5_DOUT_ADDR_HIGH CC_GENMASK(5, DOUT_ADDR_HIGH)
+
+/******************************************************************************
+ * TYPE DEFINITIONS
+ ******************************************************************************/
+
+struct cc_hw_desc {
+ union {
+ u32 word[HW_DESC_SIZE_WORDS];
+ u16 hword[HW_DESC_SIZE_WORDS * 2];
+ };
+};
+
+enum cc_axi_sec {
+ AXI_SECURE = 0,
+ AXI_NOT_SECURE = 1
+};
+
+enum cc_desc_direction {
+ DESC_DIRECTION_ILLEGAL = -1,
+ DESC_DIRECTION_ENCRYPT_ENCRYPT = 0,
+ DESC_DIRECTION_DECRYPT_DECRYPT = 1,
+ DESC_DIRECTION_DECRYPT_ENCRYPT = 3,
+ DESC_DIRECTION_END = S32_MAX,
+};
+
+enum cc_dma_mode {
+ DMA_MODE_NULL = -1,
+ NO_DMA = 0,
+ DMA_SRAM = 1,
+ DMA_DLLI = 2,
+ DMA_MLLI = 3,
+ DMA_MODE_END = S32_MAX,
+};
+
+enum cc_flow_mode {
+ FLOW_MODE_NULL = -1,
+ /* data flows */
+ BYPASS = 0,
+ DIN_AES_DOUT = 1,
+ AES_to_HASH = 2,
+ AES_and_HASH = 3,
+ DIN_DES_DOUT = 4,
+ DES_to_HASH = 5,
+ DES_and_HASH = 6,
+ DIN_HASH = 7,
+ DIN_HASH_and_BYPASS = 8,
+ AESMAC_and_BYPASS = 9,
+ AES_to_HASH_and_DOUT = 10,
+ DIN_RC4_DOUT = 11,
+ DES_to_HASH_and_DOUT = 12,
+ AES_to_AES_to_HASH_and_DOUT = 13,
+ AES_to_AES_to_HASH = 14,
+ AES_to_HASH_and_AES = 15,
+ DIN_AES_AESMAC = 17,
+ HASH_to_DOUT = 18,
+ /* setup flows */
+ S_DIN_to_AES = 32,
+ S_DIN_to_AES2 = 33,
+ S_DIN_to_DES = 34,
+ S_DIN_to_RC4 = 35,
+ S_DIN_to_HASH = 37,
+ S_AES_to_DOUT = 38,
+ S_AES2_to_DOUT = 39,
+ S_RC4_to_DOUT = 41,
+ S_DES_to_DOUT = 42,
+ S_HASH_to_DOUT = 43,
+ SET_FLOW_ID = 44,
+ FLOW_MODE_END = S32_MAX,
+};
+
+enum cc_setup_op {
+ SETUP_LOAD_NOP = 0,
+ SETUP_LOAD_STATE0 = 1,
+ SETUP_LOAD_STATE1 = 2,
+ SETUP_LOAD_STATE2 = 3,
+ SETUP_LOAD_KEY0 = 4,
+ SETUP_LOAD_XEX_KEY = 5,
+ SETUP_WRITE_STATE0 = 8,
+ SETUP_WRITE_STATE1 = 9,
+ SETUP_WRITE_STATE2 = 10,
+ SETUP_WRITE_STATE3 = 11,
+ SETUP_OP_END = S32_MAX,
+};
+
+enum cc_hash_conf_pad {
+ HASH_PADDING_DISABLED = 0,
+ HASH_PADDING_ENABLED = 1,
+ HASH_DIGEST_RESULT_LITTLE_ENDIAN = 2,
+ HASH_CONFIG1_PADDING_RESERVE32 = S32_MAX,
+};
+
+enum cc_aes_mac_selector {
+ AES_SK = 1,
+ AES_CMAC_INIT = 2,
+ AES_CMAC_SIZE0 = 3,
+ AES_MAC_END = S32_MAX,
+};
+
+#define HW_KEY_MASK_CIPHER_DO 0x3
+#define HW_KEY_SHIFT_CIPHER_CFG2 2
+
+/* HwCryptoKey[1:0] is mapped to cipher_do[1:0] */
+/* HwCryptoKey[2:3] is mapped to cipher_config2[1:0] */
+enum cc_hw_crypto_key {
+ USER_KEY = 0, /* 0x0000 */
+ ROOT_KEY = 1, /* 0x0001 */
+ PROVISIONING_KEY = 2, /* 0x0010 */ /* ==KCP */
+ SESSION_KEY = 3, /* 0x0011 */
+ RESERVED_KEY = 4, /* NA */
+ PLATFORM_KEY = 5, /* 0x0101 */
+ CUSTOMER_KEY = 6, /* 0x0110 */
+ KFDE0_KEY = 7, /* 0x0111 */
+ KFDE1_KEY = 9, /* 0x1001 */
+ KFDE2_KEY = 10, /* 0x1010 */
+ KFDE3_KEY = 11, /* 0x1011 */
+ END_OF_KEYS = S32_MAX,
+};
+
+enum cc_hw_aes_key_size {
+ AES_128_KEY = 0,
+ AES_192_KEY = 1,
+ AES_256_KEY = 2,
+ END_OF_AES_KEYS = S32_MAX,
+};
+
+enum cc_hash_cipher_pad {
+ DO_NOT_PAD = 0,
+ DO_PAD = 1,
+ HASH_CIPHER_DO_PADDING_RESERVE32 = S32_MAX,
+};
+
+/*****************************/
+/* Descriptor packing macros */
+/*****************************/
+
+/*
+ * Init a HW descriptor struct
+ * @pdesc: pointer HW descriptor struct
+ */
+static inline void hw_desc_init(struct cc_hw_desc *pdesc)
+{
+ memset(pdesc, 0, sizeof(struct cc_hw_desc));
+}
+
+/*
+ * Indicates the end of current HW descriptors flow and release the HW engines.
+ *
+ * @pdesc: pointer HW descriptor struct
+ */
+static inline void set_queue_last_ind_bit(struct cc_hw_desc *pdesc)
+{
+ pdesc->word[3] |= FIELD_PREP(WORD3_QUEUE_LAST_IND, 1);
+}
+
+/*
+ * Set the DIN field of a HW descriptors
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @dma_mode: dmaMode The DMA mode: NO_DMA, SRAM, DLLI, MLLI, CONSTANT
+ * @addr: dinAdr DIN address
+ * @size: Data size in bytes
+ * @axi_sec: AXI secure bit
+ */
+static inline void set_din_type(struct cc_hw_desc *pdesc,
+ enum cc_dma_mode dma_mode, dma_addr_t addr,
+ u32 size, enum cc_axi_sec axi_sec)
+{
+ pdesc->word[0] = (u32)addr;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ pdesc->word[5] |= FIELD_PREP(WORD5_DIN_ADDR_HIGH, ((u16)(addr >> 32)));
+#endif
+ pdesc->word[1] |= FIELD_PREP(WORD1_DIN_DMA_MODE, dma_mode) |
+ FIELD_PREP(WORD1_DIN_SIZE, size) |
+ FIELD_PREP(WORD1_NS_BIT, axi_sec);
+}
+
+/*
+ * Set the DIN field of a HW descriptors to NO DMA mode.
+ * Used for NOP descriptor, register patches and other special modes.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @addr: DIN address
+ * @size: Data size in bytes
+ */
+static inline void set_din_no_dma(struct cc_hw_desc *pdesc, u32 addr, u32 size)
+{
+ pdesc->word[0] = addr;
+ pdesc->word[1] |= FIELD_PREP(WORD1_DIN_SIZE, size);
+}
+
+/*
+ * Set the DIN field of a HW descriptors to SRAM mode.
+ * Note: No need to check SRAM alignment since host requests do not use SRAM and
+ * adaptor will enforce alignment check.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @addr: DIN address
+ * @size Data size in bytes
+ */
+static inline void set_din_sram(struct cc_hw_desc *pdesc, dma_addr_t addr,
+ u32 size)
+{
+ pdesc->word[0] = (u32)addr;
+ pdesc->word[1] |= FIELD_PREP(WORD1_DIN_SIZE, size) |
+ FIELD_PREP(WORD1_DIN_DMA_MODE, DMA_SRAM);
+}
+
+/*
+ * Set the DIN field of a HW descriptors to CONST mode
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @val: DIN const value
+ * @size: Data size in bytes
+ */
+static inline void set_din_const(struct cc_hw_desc *pdesc, u32 val, u32 size)
+{
+ pdesc->word[0] = val;
+ pdesc->word[1] |= FIELD_PREP(WORD1_DIN_CONST_VALUE, 1) |
+ FIELD_PREP(WORD1_DIN_DMA_MODE, DMA_SRAM) |
+ FIELD_PREP(WORD1_DIN_SIZE, size);
+}
+
+/*
+ * Set the DIN not last input data indicator
+ *
+ * @pdesc: pointer HW descriptor struct
+ */
+static inline void set_din_not_last_indication(struct cc_hw_desc *pdesc)
+{
+ pdesc->word[1] |= FIELD_PREP(WORD1_NOT_LAST, 1);
+}
+
+/*
+ * Set the DOUT field of a HW descriptors
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @dma_mode: The DMA mode: NO_DMA, SRAM, DLLI, MLLI, CONSTANT
+ * @addr: DOUT address
+ * @size: Data size in bytes
+ * @axi_sec: AXI secure bit
+ */
+static inline void set_dout_type(struct cc_hw_desc *pdesc,
+ enum cc_dma_mode dma_mode, dma_addr_t addr,
+ u32 size, enum cc_axi_sec axi_sec)
+{
+ pdesc->word[2] = (u32)addr;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ pdesc->word[5] |= FIELD_PREP(WORD5_DOUT_ADDR_HIGH, ((u16)(addr >> 32)));
+#endif
+ pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_DMA_MODE, dma_mode) |
+ FIELD_PREP(WORD3_DOUT_SIZE, size) |
+ FIELD_PREP(WORD3_NS_BIT, axi_sec);
+}
+
+/*
+ * Set the DOUT field of a HW descriptors to DLLI type
+ * The LAST INDICATION is provided by the user
+ *
+ * @pdesc pointer HW descriptor struct
+ * @addr: DOUT address
+ * @size: Data size in bytes
+ * @last_ind: The last indication bit
+ * @axi_sec: AXI secure bit
+ */
+static inline void set_dout_dlli(struct cc_hw_desc *pdesc, dma_addr_t addr,
+ u32 size, enum cc_axi_sec axi_sec,
+ u32 last_ind)
+{
+ set_dout_type(pdesc, DMA_DLLI, addr, size, axi_sec);
+ pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_LAST_IND, last_ind);
+}
+
+/*
+ * Set the DOUT field of a HW descriptors to DLLI type
+ * The LAST INDICATION is provided by the user
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @addr: DOUT address
+ * @size: Data size in bytes
+ * @last_ind: The last indication bit
+ * @axi_sec: AXI secure bit
+ */
+static inline void set_dout_mlli(struct cc_hw_desc *pdesc, dma_addr_t addr,
+ u32 size, enum cc_axi_sec axi_sec,
+ bool last_ind)
+{
+ set_dout_type(pdesc, DMA_MLLI, addr, size, axi_sec);
+ pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_LAST_IND, last_ind);
+}
+
+/*
+ * Set the DOUT field of a HW descriptors to NO DMA mode.
+ * Used for NOP descriptor, register patches and other special modes.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @addr: DOUT address
+ * @size: Data size in bytes
+ * @write_enable: Enables a write operation to a register
+ */
+static inline void set_dout_no_dma(struct cc_hw_desc *pdesc, u32 addr,
+ u32 size, bool write_enable)
+{
+ pdesc->word[2] = addr;
+ pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_SIZE, size) |
+ FIELD_PREP(WORD3_DOUT_LAST_IND, write_enable);
+}
+
+/*
+ * Set the word for the XOR operation.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @val: xor data value
+ */
+static inline void set_xor_val(struct cc_hw_desc *pdesc, u32 val)
+{
+ pdesc->word[2] = val;
+}
+
+/*
+ * Sets the XOR indicator bit in the descriptor
+ *
+ * @pdesc: pointer HW descriptor struct
+ */
+static inline void set_xor_active(struct cc_hw_desc *pdesc)
+{
+ pdesc->word[3] |= FIELD_PREP(WORD3_HASH_XOR_BIT, 1);
+}
+
+/*
+ * Select the AES engine instead of HASH engine when setting up combined mode
+ * with AES XCBC MAC
+ *
+ * @pdesc: pointer HW descriptor struct
+ */
+static inline void set_aes_not_hash_mode(struct cc_hw_desc *pdesc)
+{
+ pdesc->word[4] |= FIELD_PREP(WORD4_AES_SEL_N_HASH, 1);
+}
+
+/*
+ * Set the DOUT field of a HW descriptors to SRAM mode
+ * Note: No need to check SRAM alignment since host requests do not use SRAM and
+ * adaptor will enforce alignment check.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @addr: DOUT address
+ * @size: Data size in bytes
+ */
+static inline void set_dout_sram(struct cc_hw_desc *pdesc, u32 addr, u32 size)
+{
+ pdesc->word[2] = addr;
+ pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_DMA_MODE, DMA_SRAM) |
+ FIELD_PREP(WORD3_DOUT_SIZE, size);
+}
+
+/*
+ * Sets the data unit size for XEX mode in data_out_addr[15:0]
+ *
+ * @pdesc: pDesc pointer HW descriptor struct
+ * @size: data unit size for XEX mode
+ */
+static inline void set_xex_data_unit_size(struct cc_hw_desc *pdesc, u32 size)
+{
+ pdesc->word[2] = size;
+}
+
+/*
+ * Set the number of rounds for Multi2 in data_out_addr[15:0]
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @num: number of rounds for Multi2
+ */
+static inline void set_multi2_num_rounds(struct cc_hw_desc *pdesc, u32 num)
+{
+ pdesc->word[2] = num;
+}
+
+/*
+ * Set the flow mode.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @mode: Any one of the modes defined in [CC7x-DESC]
+ */
+static inline void set_flow_mode(struct cc_hw_desc *pdesc,
+ enum cc_flow_mode mode)
+{
+ pdesc->word[4] |= FIELD_PREP(WORD4_DATA_FLOW_MODE, mode);
+}
+
+/*
+ * Set the cipher mode.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @mode: Any one of the modes defined in [CC7x-DESC]
+ */
+static inline void set_cipher_mode(struct cc_hw_desc *pdesc,
+ enum drv_cipher_mode mode)
+{
+ pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_MODE, mode);
+}
+
+/*
+ * Set the cipher configuration fields.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @mode: Any one of the modes defined in [CC7x-DESC]
+ */
+static inline void set_cipher_config0(struct cc_hw_desc *pdesc,
+ enum drv_crypto_direction mode)
+{
+ pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_CONF0, mode);
+}
+
+/*
+ * Set the cipher configuration fields.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @config: Any one of the modes defined in [CC7x-DESC]
+ */
+static inline void set_cipher_config1(struct cc_hw_desc *pdesc,
+ enum cc_hash_conf_pad config)
+{
+ pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_CONF1, config);
+}
+
+/*
+ * Set HW key configuration fields.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @hw_key: The HW key slot asdefined in enum cc_hw_crypto_key
+ */
+static inline void set_hw_crypto_key(struct cc_hw_desc *pdesc,
+ enum cc_hw_crypto_key hw_key)
+{
+ pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_DO,
+ (hw_key & HW_KEY_MASK_CIPHER_DO)) |
+ FIELD_PREP(WORD4_CIPHER_CONF2,
+ (hw_key >> HW_KEY_SHIFT_CIPHER_CFG2));
+}
+
+/*
+ * Set byte order of all setup-finalize descriptors.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @config: Any one of the modes defined in [CC7x-DESC]
+ */
+static inline void set_bytes_swap(struct cc_hw_desc *pdesc, bool config)
+{
+ pdesc->word[4] |= FIELD_PREP(WORD4_BYTES_SWAP, config);
+}
+
+/*
+ * Set CMAC_SIZE0 mode.
+ *
+ * @pdesc: pointer HW descriptor struct
+ */
+static inline void set_cmac_size0_mode(struct cc_hw_desc *pdesc)
+{
+ pdesc->word[4] |= FIELD_PREP(WORD4_CMAC_SIZE0, 1);
+}
+
+/*
+ * Set key size descriptor field.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @size: key size in bytes (NOT size code)
+ */
+static inline void set_key_size(struct cc_hw_desc *pdesc, u32 size)
+{
+ pdesc->word[4] |= FIELD_PREP(WORD4_KEY_SIZE, size);
+}
+
+/*
+ * Set AES key size.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @size: key size in bytes (NOT size code)
+ */
+static inline void set_key_size_aes(struct cc_hw_desc *pdesc, u32 size)
+{
+ set_key_size(pdesc, ((size >> 3) - 2));
+}
+
+/*
+ * Set DES key size.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @size: key size in bytes (NOT size code)
+ */
+static inline void set_key_size_des(struct cc_hw_desc *pdesc, u32 size)
+{
+ set_key_size(pdesc, ((size >> 3) - 1));
+}
+
+/*
+ * Set the descriptor setup mode
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @mode: Any one of the setup modes defined in [CC7x-DESC]
+ */
+static inline void set_setup_mode(struct cc_hw_desc *pdesc,
+ enum cc_setup_op mode)
+{
+ pdesc->word[4] |= FIELD_PREP(WORD4_SETUP_OPERATION, mode);
+}
+
+/*
+ * Set the descriptor cipher DO
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @config: Any one of the cipher do defined in [CC7x-DESC]
+ */
+static inline void set_cipher_do(struct cc_hw_desc *pdesc,
+ enum cc_hash_cipher_pad config)
+{
+ pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_DO,
+ (config & HW_KEY_MASK_CIPHER_DO));
+}
+
+#endif /*__CC_HW_QUEUE_DEFS_H__*/
diff --git a/drivers/crypto/ccree/cc_ivgen.c b/drivers/crypto/ccree/cc_ivgen.c
new file mode 100644
index 000000000000..769458323394
--- /dev/null
+++ b/drivers/crypto/ccree/cc_ivgen.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include <crypto/ctr.h>
+#include "cc_driver.h"
+#include "cc_ivgen.h"
+#include "cc_request_mgr.h"
+#include "cc_sram_mgr.h"
+#include "cc_buffer_mgr.h"
+
+/* The max. size of pool *MUST* be <= SRAM total size */
+#define CC_IVPOOL_SIZE 1024
+/* The first 32B fraction of pool are dedicated to the
+ * next encryption "key" & "IV" for pool regeneration
+ */
+#define CC_IVPOOL_META_SIZE (CC_AES_IV_SIZE + AES_KEYSIZE_128)
+#define CC_IVPOOL_GEN_SEQ_LEN 4
+
+/**
+ * struct cc_ivgen_ctx -IV pool generation context
+ * @pool: the start address of the iv-pool resides in internal RAM
+ * @ctr_key_dma: address of pool's encryption key material in internal RAM
+ * @ctr_iv_dma: address of pool's counter iv in internal RAM
+ * @next_iv_ofs: the offset to the next available IV in pool
+ * @pool_meta: virt. address of the initial enc. key/IV
+ * @pool_meta_dma: phys. address of the initial enc. key/IV
+ */
+struct cc_ivgen_ctx {
+ cc_sram_addr_t pool;
+ cc_sram_addr_t ctr_key;
+ cc_sram_addr_t ctr_iv;
+ u32 next_iv_ofs;
+ u8 *pool_meta;
+ dma_addr_t pool_meta_dma;
+};
+
+/*!
+ * Generates CC_IVPOOL_SIZE of random bytes by
+ * encrypting 0's using AES128-CTR.
+ *
+ * \param ivgen iv-pool context
+ * \param iv_seq IN/OUT array to the descriptors sequence
+ * \param iv_seq_len IN/OUT pointer to the sequence length
+ */
+static int cc_gen_iv_pool(struct cc_ivgen_ctx *ivgen_ctx,
+ struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len)
+{
+ unsigned int idx = *iv_seq_len;
+
+ if ((*iv_seq_len + CC_IVPOOL_GEN_SEQ_LEN) > CC_IVPOOL_SEQ_LEN) {
+ /* The sequence will be longer than allowed */
+ return -EINVAL;
+ }
+ /* Setup key */
+ hw_desc_init(&iv_seq[idx]);
+ set_din_sram(&iv_seq[idx], ivgen_ctx->ctr_key, AES_KEYSIZE_128);
+ set_setup_mode(&iv_seq[idx], SETUP_LOAD_KEY0);
+ set_cipher_config0(&iv_seq[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_flow_mode(&iv_seq[idx], S_DIN_to_AES);
+ set_key_size_aes(&iv_seq[idx], CC_AES_128_BIT_KEY_SIZE);
+ set_cipher_mode(&iv_seq[idx], DRV_CIPHER_CTR);
+ idx++;
+
+ /* Setup cipher state */
+ hw_desc_init(&iv_seq[idx]);
+ set_din_sram(&iv_seq[idx], ivgen_ctx->ctr_iv, CC_AES_IV_SIZE);
+ set_cipher_config0(&iv_seq[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ set_flow_mode(&iv_seq[idx], S_DIN_to_AES);
+ set_setup_mode(&iv_seq[idx], SETUP_LOAD_STATE1);
+ set_key_size_aes(&iv_seq[idx], CC_AES_128_BIT_KEY_SIZE);
+ set_cipher_mode(&iv_seq[idx], DRV_CIPHER_CTR);
+ idx++;
+
+ /* Perform dummy encrypt to skip first block */
+ hw_desc_init(&iv_seq[idx]);
+ set_din_const(&iv_seq[idx], 0, CC_AES_IV_SIZE);
+ set_dout_sram(&iv_seq[idx], ivgen_ctx->pool, CC_AES_IV_SIZE);
+ set_flow_mode(&iv_seq[idx], DIN_AES_DOUT);
+ idx++;
+
+ /* Generate IV pool */
+ hw_desc_init(&iv_seq[idx]);
+ set_din_const(&iv_seq[idx], 0, CC_IVPOOL_SIZE);
+ set_dout_sram(&iv_seq[idx], ivgen_ctx->pool, CC_IVPOOL_SIZE);
+ set_flow_mode(&iv_seq[idx], DIN_AES_DOUT);
+ idx++;
+
+ *iv_seq_len = idx; /* Update sequence length */
+
+ /* queue ordering assures pool readiness */
+ ivgen_ctx->next_iv_ofs = CC_IVPOOL_META_SIZE;
+
+ return 0;
+}
+
+/*!
+ * Generates the initial pool in SRAM.
+ * This function should be invoked when resuming driver.
+ *
+ * \param drvdata
+ *
+ * \return int Zero for success, negative value otherwise.
+ */
+int cc_init_iv_sram(struct cc_drvdata *drvdata)
+{
+ struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
+ struct cc_hw_desc iv_seq[CC_IVPOOL_SEQ_LEN];
+ unsigned int iv_seq_len = 0;
+ int rc;
+
+ /* Generate initial enc. key/iv */
+ get_random_bytes(ivgen_ctx->pool_meta, CC_IVPOOL_META_SIZE);
+
+ /* The first 32B reserved for the enc. Key/IV */
+ ivgen_ctx->ctr_key = ivgen_ctx->pool;
+ ivgen_ctx->ctr_iv = ivgen_ctx->pool + AES_KEYSIZE_128;
+
+ /* Copy initial enc. key and IV to SRAM at a single descriptor */
+ hw_desc_init(&iv_seq[iv_seq_len]);
+ set_din_type(&iv_seq[iv_seq_len], DMA_DLLI, ivgen_ctx->pool_meta_dma,
+ CC_IVPOOL_META_SIZE, NS_BIT);
+ set_dout_sram(&iv_seq[iv_seq_len], ivgen_ctx->pool,
+ CC_IVPOOL_META_SIZE);
+ set_flow_mode(&iv_seq[iv_seq_len], BYPASS);
+ iv_seq_len++;
+
+ /* Generate initial pool */
+ rc = cc_gen_iv_pool(ivgen_ctx, iv_seq, &iv_seq_len);
+ if (rc)
+ return rc;
+
+ /* Fire-and-forget */
+ return send_request_init(drvdata, iv_seq, iv_seq_len);
+}
+
+/*!
+ * Free iv-pool and ivgen context.
+ *
+ * \param drvdata
+ */
+void cc_ivgen_fini(struct cc_drvdata *drvdata)
+{
+ struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
+ struct device *device = &drvdata->plat_dev->dev;
+
+ if (!ivgen_ctx)
+ return;
+
+ if (ivgen_ctx->pool_meta) {
+ memset(ivgen_ctx->pool_meta, 0, CC_IVPOOL_META_SIZE);
+ dma_free_coherent(device, CC_IVPOOL_META_SIZE,
+ ivgen_ctx->pool_meta,
+ ivgen_ctx->pool_meta_dma);
+ }
+
+ ivgen_ctx->pool = NULL_SRAM_ADDR;
+
+ /* release "this" context */
+ kfree(ivgen_ctx);
+}
+
+/*!
+ * Allocates iv-pool and maps resources.
+ * This function generates the first IV pool.
+ *
+ * \param drvdata Driver's private context
+ *
+ * \return int Zero for success, negative value otherwise.
+ */
+int cc_ivgen_init(struct cc_drvdata *drvdata)
+{
+ struct cc_ivgen_ctx *ivgen_ctx;
+ struct device *device = &drvdata->plat_dev->dev;
+ int rc;
+
+ /* Allocate "this" context */
+ ivgen_ctx = kzalloc(sizeof(*ivgen_ctx), GFP_KERNEL);
+ if (!ivgen_ctx)
+ return -ENOMEM;
+
+ /* Allocate pool's header for initial enc. key/IV */
+ ivgen_ctx->pool_meta = dma_alloc_coherent(device, CC_IVPOOL_META_SIZE,
+ &ivgen_ctx->pool_meta_dma,
+ GFP_KERNEL);
+ if (!ivgen_ctx->pool_meta) {
+ dev_err(device, "Not enough memory to allocate DMA of pool_meta (%u B)\n",
+ CC_IVPOOL_META_SIZE);
+ rc = -ENOMEM;
+ goto out;
+ }
+ /* Allocate IV pool in SRAM */
+ ivgen_ctx->pool = cc_sram_alloc(drvdata, CC_IVPOOL_SIZE);
+ if (ivgen_ctx->pool == NULL_SRAM_ADDR) {
+ dev_err(device, "SRAM pool exhausted\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ drvdata->ivgen_handle = ivgen_ctx;
+
+ return cc_init_iv_sram(drvdata);
+
+out:
+ cc_ivgen_fini(drvdata);
+ return rc;
+}
+
+/*!
+ * Acquires 16 Bytes IV from the iv-pool
+ *
+ * \param drvdata Driver private context
+ * \param iv_out_dma Array of physical IV out addresses
+ * \param iv_out_dma_len Length of iv_out_dma array (additional elements
+ * of iv_out_dma array are ignore)
+ * \param iv_out_size May be 8 or 16 bytes long
+ * \param iv_seq IN/OUT array to the descriptors sequence
+ * \param iv_seq_len IN/OUT pointer to the sequence length
+ *
+ * \return int Zero for success, negative value otherwise.
+ */
+int cc_get_iv(struct cc_drvdata *drvdata, dma_addr_t iv_out_dma[],
+ unsigned int iv_out_dma_len, unsigned int iv_out_size,
+ struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len)
+{
+ struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
+ unsigned int idx = *iv_seq_len;
+ struct device *dev = drvdata_to_dev(drvdata);
+ unsigned int t;
+
+ if (iv_out_size != CC_AES_IV_SIZE &&
+ iv_out_size != CTR_RFC3686_IV_SIZE) {
+ return -EINVAL;
+ }
+ if ((iv_out_dma_len + 1) > CC_IVPOOL_SEQ_LEN) {
+ /* The sequence will be longer than allowed */
+ return -EINVAL;
+ }
+
+ /* check that number of generated IV is limited to max dma address
+ * iv buffer size
+ */
+ if (iv_out_dma_len > CC_MAX_IVGEN_DMA_ADDRESSES) {
+ /* The sequence will be longer than allowed */
+ return -EINVAL;
+ }
+
+ for (t = 0; t < iv_out_dma_len; t++) {
+ /* Acquire IV from pool */
+ hw_desc_init(&iv_seq[idx]);
+ set_din_sram(&iv_seq[idx], (ivgen_ctx->pool +
+ ivgen_ctx->next_iv_ofs),
+ iv_out_size);
+ set_dout_dlli(&iv_seq[idx], iv_out_dma[t], iv_out_size,
+ NS_BIT, 0);
+ set_flow_mode(&iv_seq[idx], BYPASS);
+ idx++;
+ }
+
+ /* Bypass operation is proceeded by crypto sequence, hence must
+ * assure bypass-write-transaction by a memory barrier
+ */
+ hw_desc_init(&iv_seq[idx]);
+ set_din_no_dma(&iv_seq[idx], 0, 0xfffff0);
+ set_dout_no_dma(&iv_seq[idx], 0, 0, 1);
+ idx++;
+
+ *iv_seq_len = idx; /* update seq length */
+
+ /* Update iv index */
+ ivgen_ctx->next_iv_ofs += iv_out_size;
+
+ if ((CC_IVPOOL_SIZE - ivgen_ctx->next_iv_ofs) < CC_AES_IV_SIZE) {
+ dev_dbg(dev, "Pool exhausted, regenerating iv-pool\n");
+ /* pool is drained -regenerate it! */
+ return cc_gen_iv_pool(ivgen_ctx, iv_seq, iv_seq_len);
+ }
+
+ return 0;
+}
diff --git a/drivers/crypto/ccree/cc_ivgen.h b/drivers/crypto/ccree/cc_ivgen.h
new file mode 100644
index 000000000000..b6ac16903dda
--- /dev/null
+++ b/drivers/crypto/ccree/cc_ivgen.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#ifndef __CC_IVGEN_H__
+#define __CC_IVGEN_H__
+
+#include "cc_hw_queue_defs.h"
+
+#define CC_IVPOOL_SEQ_LEN 8
+
+/*!
+ * Allocates iv-pool and maps resources.
+ * This function generates the first IV pool.
+ *
+ * \param drvdata Driver's private context
+ *
+ * \return int Zero for success, negative value otherwise.
+ */
+int cc_ivgen_init(struct cc_drvdata *drvdata);
+
+/*!
+ * Free iv-pool and ivgen context.
+ *
+ * \param drvdata
+ */
+void cc_ivgen_fini(struct cc_drvdata *drvdata);
+
+/*!
+ * Generates the initial pool in SRAM.
+ * This function should be invoked when resuming DX driver.
+ *
+ * \param drvdata
+ *
+ * \return int Zero for success, negative value otherwise.
+ */
+int cc_init_iv_sram(struct cc_drvdata *drvdata);
+
+/*!
+ * Acquires 16 Bytes IV from the iv-pool
+ *
+ * \param drvdata Driver private context
+ * \param iv_out_dma Array of physical IV out addresses
+ * \param iv_out_dma_len Length of iv_out_dma array (additional elements of
+ * iv_out_dma array are ignore)
+ * \param iv_out_size May be 8 or 16 bytes long
+ * \param iv_seq IN/OUT array to the descriptors sequence
+ * \param iv_seq_len IN/OUT pointer to the sequence length
+ *
+ * \return int Zero for success, negative value otherwise.
+ */
+int cc_get_iv(struct cc_drvdata *drvdata, dma_addr_t iv_out_dma[],
+ unsigned int iv_out_dma_len, unsigned int iv_out_size,
+ struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len);
+
+#endif /*__CC_IVGEN_H__*/
diff --git a/drivers/crypto/ccree/cc_kernel_regs.h b/drivers/crypto/ccree/cc_kernel_regs.h
new file mode 100644
index 000000000000..8d7262a35156
--- /dev/null
+++ b/drivers/crypto/ccree/cc_kernel_regs.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#ifndef __CC_CRYS_KERNEL_H__
+#define __CC_CRYS_KERNEL_H__
+
+// --------------------------------------
+// BLOCK: DSCRPTR
+// --------------------------------------
+#define CC_DSCRPTR_COMPLETION_COUNTER_REG_OFFSET 0xE00UL
+#define CC_DSCRPTR_COMPLETION_COUNTER_COMPLETION_COUNTER_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_COMPLETION_COUNTER_COMPLETION_COUNTER_BIT_SIZE 0x6UL
+#define CC_DSCRPTR_COMPLETION_COUNTER_OVERFLOW_COUNTER_BIT_SHIFT 0x6UL
+#define CC_DSCRPTR_COMPLETION_COUNTER_OVERFLOW_COUNTER_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_SW_RESET_REG_OFFSET 0xE40UL
+#define CC_DSCRPTR_SW_RESET_VALUE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_SW_RESET_VALUE_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_REG_OFFSET 0xE60UL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_NUM_OF_DSCRPTR_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_NUM_OF_DSCRPTR_BIT_SIZE 0xAUL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_DSCRPTR_SRAM_SIZE_BIT_SHIFT 0xAUL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_DSCRPTR_SRAM_SIZE_BIT_SIZE 0xCUL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_SRAM_SIZE_BIT_SHIFT 0x16UL
+#define CC_DSCRPTR_QUEUE_SRAM_SIZE_SRAM_SIZE_BIT_SIZE 0x3UL
+#define CC_DSCRPTR_SINGLE_ADDR_EN_REG_OFFSET 0xE64UL
+#define CC_DSCRPTR_SINGLE_ADDR_EN_VALUE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_SINGLE_ADDR_EN_VALUE_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_MEASURE_CNTR_REG_OFFSET 0xE68UL
+#define CC_DSCRPTR_MEASURE_CNTR_VALUE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_MEASURE_CNTR_VALUE_BIT_SIZE 0x20UL
+#define CC_DSCRPTR_QUEUE_WORD0_REG_OFFSET 0xE80UL
+#define CC_DSCRPTR_QUEUE_WORD0_VALUE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_WORD0_VALUE_BIT_SIZE 0x20UL
+#define CC_DSCRPTR_QUEUE_WORD1_REG_OFFSET 0xE84UL
+#define CC_DSCRPTR_QUEUE_WORD1_DIN_DMA_MODE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_WORD1_DIN_DMA_MODE_BIT_SIZE 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SHIFT 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SIZE 0x18UL
+#define CC_DSCRPTR_QUEUE_WORD1_NS_BIT_BIT_SHIFT 0x1AUL
+#define CC_DSCRPTR_QUEUE_WORD1_NS_BIT_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD1_DIN_CONST_VALUE_BIT_SHIFT 0x1BUL
+#define CC_DSCRPTR_QUEUE_WORD1_DIN_CONST_VALUE_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD1_NOT_LAST_BIT_SHIFT 0x1CUL
+#define CC_DSCRPTR_QUEUE_WORD1_NOT_LAST_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD1_LOCK_QUEUE_BIT_SHIFT 0x1DUL
+#define CC_DSCRPTR_QUEUE_WORD1_LOCK_QUEUE_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD1_NOT_USED_BIT_SHIFT 0x1EUL
+#define CC_DSCRPTR_QUEUE_WORD1_NOT_USED_BIT_SIZE 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD2_REG_OFFSET 0xE88UL
+#define CC_DSCRPTR_QUEUE_WORD2_VALUE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_WORD2_VALUE_BIT_SIZE 0x20UL
+#define CC_DSCRPTR_QUEUE_WORD3_REG_OFFSET 0xE8CUL
+#define CC_DSCRPTR_QUEUE_WORD3_DOUT_DMA_MODE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_WORD3_DOUT_DMA_MODE_BIT_SIZE 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD3_DOUT_SIZE_BIT_SHIFT 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD3_DOUT_SIZE_BIT_SIZE 0x18UL
+#define CC_DSCRPTR_QUEUE_WORD3_NS_BIT_BIT_SHIFT 0x1AUL
+#define CC_DSCRPTR_QUEUE_WORD3_NS_BIT_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD3_DOUT_LAST_IND_BIT_SHIFT 0x1BUL
+#define CC_DSCRPTR_QUEUE_WORD3_DOUT_LAST_IND_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD3_HASH_XOR_BIT_BIT_SHIFT 0x1DUL
+#define CC_DSCRPTR_QUEUE_WORD3_HASH_XOR_BIT_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD3_NOT_USED_BIT_SHIFT 0x1EUL
+#define CC_DSCRPTR_QUEUE_WORD3_NOT_USED_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD3_QUEUE_LAST_IND_BIT_SHIFT 0x1FUL
+#define CC_DSCRPTR_QUEUE_WORD3_QUEUE_LAST_IND_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_REG_OFFSET 0xE90UL
+#define CC_DSCRPTR_QUEUE_WORD4_DATA_FLOW_MODE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_WORD4_DATA_FLOW_MODE_BIT_SIZE 0x6UL
+#define CC_DSCRPTR_QUEUE_WORD4_AES_SEL_N_HASH_BIT_SHIFT 0x6UL
+#define CC_DSCRPTR_QUEUE_WORD4_AES_SEL_N_HASH_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_AES_XOR_CRYPTO_KEY_BIT_SHIFT 0x7UL
+#define CC_DSCRPTR_QUEUE_WORD4_AES_XOR_CRYPTO_KEY_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_ACK_NEEDED_BIT_SHIFT 0x8UL
+#define CC_DSCRPTR_QUEUE_WORD4_ACK_NEEDED_BIT_SIZE 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_MODE_BIT_SHIFT 0xAUL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_MODE_BIT_SIZE 0x4UL
+#define CC_DSCRPTR_QUEUE_WORD4_CMAC_SIZE0_BIT_SHIFT 0xEUL
+#define CC_DSCRPTR_QUEUE_WORD4_CMAC_SIZE0_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_DO_BIT_SHIFT 0xFUL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_DO_BIT_SIZE 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF0_BIT_SHIFT 0x11UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF0_BIT_SIZE 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF1_BIT_SHIFT 0x13UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF1_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF2_BIT_SHIFT 0x14UL
+#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF2_BIT_SIZE 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD4_KEY_SIZE_BIT_SHIFT 0x16UL
+#define CC_DSCRPTR_QUEUE_WORD4_KEY_SIZE_BIT_SIZE 0x2UL
+#define CC_DSCRPTR_QUEUE_WORD4_SETUP_OPERATION_BIT_SHIFT 0x18UL
+#define CC_DSCRPTR_QUEUE_WORD4_SETUP_OPERATION_BIT_SIZE 0x4UL
+#define CC_DSCRPTR_QUEUE_WORD4_DIN_SRAM_ENDIANNESS_BIT_SHIFT 0x1CUL
+#define CC_DSCRPTR_QUEUE_WORD4_DIN_SRAM_ENDIANNESS_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_DOUT_SRAM_ENDIANNESS_BIT_SHIFT 0x1DUL
+#define CC_DSCRPTR_QUEUE_WORD4_DOUT_SRAM_ENDIANNESS_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_WORD_SWAP_BIT_SHIFT 0x1EUL
+#define CC_DSCRPTR_QUEUE_WORD4_WORD_SWAP_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD4_BYTES_SWAP_BIT_SHIFT 0x1FUL
+#define CC_DSCRPTR_QUEUE_WORD4_BYTES_SWAP_BIT_SIZE 0x1UL
+#define CC_DSCRPTR_QUEUE_WORD5_REG_OFFSET 0xE94UL
+#define CC_DSCRPTR_QUEUE_WORD5_DIN_ADDR_HIGH_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_WORD5_DIN_ADDR_HIGH_BIT_SIZE 0x10UL
+#define CC_DSCRPTR_QUEUE_WORD5_DOUT_ADDR_HIGH_BIT_SHIFT 0x10UL
+#define CC_DSCRPTR_QUEUE_WORD5_DOUT_ADDR_HIGH_BIT_SIZE 0x10UL
+#define CC_DSCRPTR_QUEUE_WATERMARK_REG_OFFSET 0xE98UL
+#define CC_DSCRPTR_QUEUE_WATERMARK_VALUE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_WATERMARK_VALUE_BIT_SIZE 0xAUL
+#define CC_DSCRPTR_QUEUE_CONTENT_REG_OFFSET 0xE9CUL
+#define CC_DSCRPTR_QUEUE_CONTENT_VALUE_BIT_SHIFT 0x0UL
+#define CC_DSCRPTR_QUEUE_CONTENT_VALUE_BIT_SIZE 0xAUL
+// --------------------------------------
+// BLOCK: AXI_P
+// --------------------------------------
+#define CC_AXIM_MON_INFLIGHT_REG_OFFSET 0xB00UL
+#define CC_AXIM_MON_INFLIGHT_VALUE_BIT_SHIFT 0x0UL
+#define CC_AXIM_MON_INFLIGHT_VALUE_BIT_SIZE 0x8UL
+#define CC_AXIM_MON_INFLIGHTLAST_REG_OFFSET 0xB40UL
+#define CC_AXIM_MON_INFLIGHTLAST_VALUE_BIT_SHIFT 0x0UL
+#define CC_AXIM_MON_INFLIGHTLAST_VALUE_BIT_SIZE 0x8UL
+#define CC_AXIM_MON_COMP_REG_OFFSET 0xB80UL
+#define CC_AXIM_MON_COMP8_REG_OFFSET 0xBA0UL
+#define CC_AXIM_MON_COMP_VALUE_BIT_SHIFT 0x0UL
+#define CC_AXIM_MON_COMP_VALUE_BIT_SIZE 0x10UL
+#define CC_AXIM_MON_ERR_REG_OFFSET 0xBC4UL
+#define CC_AXIM_MON_ERR_BRESP_BIT_SHIFT 0x0UL
+#define CC_AXIM_MON_ERR_BRESP_BIT_SIZE 0x2UL
+#define CC_AXIM_MON_ERR_BID_BIT_SHIFT 0x2UL
+#define CC_AXIM_MON_ERR_BID_BIT_SIZE 0x4UL
+#define CC_AXIM_MON_ERR_RRESP_BIT_SHIFT 0x10UL
+#define CC_AXIM_MON_ERR_RRESP_BIT_SIZE 0x2UL
+#define CC_AXIM_MON_ERR_RID_BIT_SHIFT 0x12UL
+#define CC_AXIM_MON_ERR_RID_BIT_SIZE 0x4UL
+#define CC_AXIM_CFG_REG_OFFSET 0xBE8UL
+#define CC_AXIM_CFG_BRESPMASK_BIT_SHIFT 0x4UL
+#define CC_AXIM_CFG_BRESPMASK_BIT_SIZE 0x1UL
+#define CC_AXIM_CFG_RRESPMASK_BIT_SHIFT 0x5UL
+#define CC_AXIM_CFG_RRESPMASK_BIT_SIZE 0x1UL
+#define CC_AXIM_CFG_INFLTMASK_BIT_SHIFT 0x6UL
+#define CC_AXIM_CFG_INFLTMASK_BIT_SIZE 0x1UL
+#define CC_AXIM_CFG_COMPMASK_BIT_SHIFT 0x7UL
+#define CC_AXIM_CFG_COMPMASK_BIT_SIZE 0x1UL
+#define CC_AXIM_ACE_CONST_REG_OFFSET 0xBECUL
+#define CC_AXIM_ACE_CONST_ARDOMAIN_BIT_SHIFT 0x0UL
+#define CC_AXIM_ACE_CONST_ARDOMAIN_BIT_SIZE 0x2UL
+#define CC_AXIM_ACE_CONST_AWDOMAIN_BIT_SHIFT 0x2UL
+#define CC_AXIM_ACE_CONST_AWDOMAIN_BIT_SIZE 0x2UL
+#define CC_AXIM_ACE_CONST_ARBAR_BIT_SHIFT 0x4UL
+#define CC_AXIM_ACE_CONST_ARBAR_BIT_SIZE 0x2UL
+#define CC_AXIM_ACE_CONST_AWBAR_BIT_SHIFT 0x6UL
+#define CC_AXIM_ACE_CONST_AWBAR_BIT_SIZE 0x2UL
+#define CC_AXIM_ACE_CONST_ARSNOOP_BIT_SHIFT 0x8UL
+#define CC_AXIM_ACE_CONST_ARSNOOP_BIT_SIZE 0x4UL
+#define CC_AXIM_ACE_CONST_AWSNOOP_NOT_ALIGNED_BIT_SHIFT 0xCUL
+#define CC_AXIM_ACE_CONST_AWSNOOP_NOT_ALIGNED_BIT_SIZE 0x3UL
+#define CC_AXIM_ACE_CONST_AWSNOOP_ALIGNED_BIT_SHIFT 0xFUL
+#define CC_AXIM_ACE_CONST_AWSNOOP_ALIGNED_BIT_SIZE 0x3UL
+#define CC_AXIM_ACE_CONST_AWADDR_NOT_MASKED_BIT_SHIFT 0x12UL
+#define CC_AXIM_ACE_CONST_AWADDR_NOT_MASKED_BIT_SIZE 0x7UL
+#define CC_AXIM_ACE_CONST_AWLEN_VAL_BIT_SHIFT 0x19UL
+#define CC_AXIM_ACE_CONST_AWLEN_VAL_BIT_SIZE 0x4UL
+#define CC_AXIM_CACHE_PARAMS_REG_OFFSET 0xBF0UL
+#define CC_AXIM_CACHE_PARAMS_AWCACHE_LAST_BIT_SHIFT 0x0UL
+#define CC_AXIM_CACHE_PARAMS_AWCACHE_LAST_BIT_SIZE 0x4UL
+#define CC_AXIM_CACHE_PARAMS_AWCACHE_BIT_SHIFT 0x4UL
+#define CC_AXIM_CACHE_PARAMS_AWCACHE_BIT_SIZE 0x4UL
+#define CC_AXIM_CACHE_PARAMS_ARCACHE_BIT_SHIFT 0x8UL
+#define CC_AXIM_CACHE_PARAMS_ARCACHE_BIT_SIZE 0x4UL
+#endif // __CC_CRYS_KERNEL_H__
diff --git a/drivers/crypto/ccree/cc_lli_defs.h b/drivers/crypto/ccree/cc_lli_defs.h
new file mode 100644
index 000000000000..64b15ac9f1d3
--- /dev/null
+++ b/drivers/crypto/ccree/cc_lli_defs.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#ifndef _CC_LLI_DEFS_H_
+#define _CC_LLI_DEFS_H_
+
+#include <linux/types.h>
+
+/* Max DLLI size
+ * AKA CC_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SIZE
+ */
+#define DLLI_SIZE_BIT_SIZE 0x18
+
+#define CC_MAX_MLLI_ENTRY_SIZE 0xFFFF
+
+#define LLI_MAX_NUM_OF_DATA_ENTRIES 128
+#define LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES 4
+#define MLLI_TABLE_MIN_ALIGNMENT 4 /* 32 bit alignment */
+#define MAX_NUM_OF_BUFFERS_IN_MLLI 4
+#define MAX_NUM_OF_TOTAL_MLLI_ENTRIES \
+ (2 * LLI_MAX_NUM_OF_DATA_ENTRIES + \
+ LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)
+
+/* Size of entry */
+#define LLI_ENTRY_WORD_SIZE 2
+#define LLI_ENTRY_BYTE_SIZE (LLI_ENTRY_WORD_SIZE * sizeof(u32))
+
+/* Word0[31:0] = ADDR[31:0] */
+#define LLI_WORD0_OFFSET 0
+#define LLI_LADDR_BIT_OFFSET 0
+#define LLI_LADDR_BIT_SIZE 32
+/* Word1[31:16] = ADDR[47:32]; Word1[15:0] = SIZE */
+#define LLI_WORD1_OFFSET 1
+#define LLI_SIZE_BIT_OFFSET 0
+#define LLI_SIZE_BIT_SIZE 16
+#define LLI_HADDR_BIT_OFFSET 16
+#define LLI_HADDR_BIT_SIZE 16
+
+#define LLI_SIZE_MASK GENMASK((LLI_SIZE_BIT_SIZE - 1), LLI_SIZE_BIT_OFFSET)
+#define LLI_HADDR_MASK GENMASK( \
+ (LLI_HADDR_BIT_OFFSET + LLI_HADDR_BIT_SIZE - 1),\
+ LLI_HADDR_BIT_OFFSET)
+
+static inline void cc_lli_set_addr(u32 *lli_p, dma_addr_t addr)
+{
+ lli_p[LLI_WORD0_OFFSET] = (addr & U32_MAX);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ lli_p[LLI_WORD1_OFFSET] &= ~LLI_HADDR_MASK;
+ lli_p[LLI_WORD1_OFFSET] |= FIELD_PREP(LLI_HADDR_MASK, (addr >> 32));
+#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
+}
+
+static inline void cc_lli_set_size(u32 *lli_p, u16 size)
+{
+ lli_p[LLI_WORD1_OFFSET] &= ~LLI_SIZE_MASK;
+ lli_p[LLI_WORD1_OFFSET] |= FIELD_PREP(LLI_SIZE_MASK, size);
+}
+
+#endif /*_CC_LLI_DEFS_H_*/
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
new file mode 100644
index 000000000000..d990f472e89f
--- /dev/null
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
+#include "cc_driver.h"
+#include "cc_buffer_mgr.h"
+#include "cc_request_mgr.h"
+#include "cc_sram_mgr.h"
+#include "cc_ivgen.h"
+#include "cc_hash.h"
+#include "cc_pm.h"
+
+#define POWER_DOWN_ENABLE 0x01
+#define POWER_DOWN_DISABLE 0x00
+
+const struct dev_pm_ops ccree_pm = {
+ SET_RUNTIME_PM_OPS(cc_pm_suspend, cc_pm_resume, NULL)
+};
+
+int cc_pm_suspend(struct device *dev)
+{
+ struct cc_drvdata *drvdata = dev_get_drvdata(dev);
+ int rc;
+
+ dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
+ cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
+ rc = cc_suspend_req_queue(drvdata);
+ if (rc) {
+ dev_err(dev, "cc_suspend_req_queue (%x)\n", rc);
+ return rc;
+ }
+ fini_cc_regs(drvdata);
+ cc_clk_off(drvdata);
+ return 0;
+}
+
+int cc_pm_resume(struct device *dev)
+{
+ int rc;
+ struct cc_drvdata *drvdata = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n");
+ cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
+
+ rc = cc_clk_on(drvdata);
+ if (rc) {
+ dev_err(dev, "failed getting clock back on. We're toast.\n");
+ return rc;
+ }
+
+ rc = init_cc_regs(drvdata, false);
+ if (rc) {
+ dev_err(dev, "init_cc_regs (%x)\n", rc);
+ return rc;
+ }
+
+ rc = cc_resume_req_queue(drvdata);
+ if (rc) {
+ dev_err(dev, "cc_resume_req_queue (%x)\n", rc);
+ return rc;
+ }
+
+ /* must be after the queue resuming as it uses the HW queue*/
+ cc_init_hash_sram(drvdata);
+
+ cc_init_iv_sram(drvdata);
+ return 0;
+}
+
+int cc_pm_get(struct device *dev)
+{
+ int rc = 0;
+ struct cc_drvdata *drvdata = dev_get_drvdata(dev);
+
+ if (cc_req_queue_suspended(drvdata))
+ rc = pm_runtime_get_sync(dev);
+ else
+ pm_runtime_get_noresume(dev);
+
+ return rc;
+}
+
+int cc_pm_put_suspend(struct device *dev)
+{
+ int rc = 0;
+ struct cc_drvdata *drvdata = dev_get_drvdata(dev);
+
+ if (!cc_req_queue_suspended(drvdata)) {
+ pm_runtime_mark_last_busy(dev);
+ rc = pm_runtime_put_autosuspend(dev);
+ } else {
+ /* Something wrong happens*/
+ dev_err(dev, "request to suspend already suspended queue");
+ rc = -EBUSY;
+ }
+ return rc;
+}
+
+int cc_pm_init(struct cc_drvdata *drvdata)
+{
+ int rc = 0;
+ struct device *dev = drvdata_to_dev(drvdata);
+
+ /* must be before the enabling to avoid resdundent suspending */
+ pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(dev);
+ /* activate the PM module */
+ rc = pm_runtime_set_active(dev);
+ if (rc)
+ return rc;
+ /* enable the PM module*/
+ pm_runtime_enable(dev);
+
+ return rc;
+}
+
+void cc_pm_fini(struct cc_drvdata *drvdata)
+{
+ pm_runtime_disable(drvdata_to_dev(drvdata));
+}
diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h
new file mode 100644
index 000000000000..020a5403c58b
--- /dev/null
+++ b/drivers/crypto/ccree/cc_pm.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+/* \file cc_pm.h
+ */
+
+#ifndef __CC_POWER_MGR_H__
+#define __CC_POWER_MGR_H__
+
+#include "cc_driver.h"
+
+#define CC_SUSPEND_TIMEOUT 3000
+
+#if defined(CONFIG_PM)
+
+extern const struct dev_pm_ops ccree_pm;
+
+int cc_pm_init(struct cc_drvdata *drvdata);
+void cc_pm_fini(struct cc_drvdata *drvdata);
+int cc_pm_suspend(struct device *dev);
+int cc_pm_resume(struct device *dev);
+int cc_pm_get(struct device *dev);
+int cc_pm_put_suspend(struct device *dev);
+
+#else
+
+static inline int cc_pm_init(struct cc_drvdata *drvdata)
+{
+ return 0;
+}
+
+static inline void cc_pm_fini(struct cc_drvdata *drvdata) {}
+
+static inline int cc_pm_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static inline int cc_pm_resume(struct device *dev)
+{
+ return 0;
+}
+
+static inline int cc_pm_get(struct device *dev)
+{
+ return 0;
+}
+
+static inline int cc_pm_put_suspend(struct device *dev)
+{
+ return 0;
+}
+
+#endif
+
+#endif /*__POWER_MGR_H__*/
diff --git a/drivers/crypto/ccree/cc_request_mgr.c b/drivers/crypto/ccree/cc_request_mgr.c
new file mode 100644
index 000000000000..83a8aaae61c7
--- /dev/null
+++ b/drivers/crypto/ccree/cc_request_mgr.c
@@ -0,0 +1,711 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include <linux/kernel.h>
+#include "cc_driver.h"
+#include "cc_buffer_mgr.h"
+#include "cc_request_mgr.h"
+#include "cc_ivgen.h"
+#include "cc_pm.h"
+
+#define CC_MAX_POLL_ITER 10
+/* The highest descriptor count in used */
+#define CC_MAX_DESC_SEQ_LEN 23
+
+struct cc_req_mgr_handle {
+ /* Request manager resources */
+ unsigned int hw_queue_size; /* HW capability */
+ unsigned int min_free_hw_slots;
+ unsigned int max_used_sw_slots;
+ struct cc_crypto_req req_queue[MAX_REQUEST_QUEUE_SIZE];
+ u32 req_queue_head;
+ u32 req_queue_tail;
+ u32 axi_completed;
+ u32 q_free_slots;
+ /* This lock protects access to HW register
+ * that must be single request at a time
+ */
+ spinlock_t hw_lock;
+ struct cc_hw_desc compl_desc;
+ u8 *dummy_comp_buff;
+ dma_addr_t dummy_comp_buff_dma;
+
+ /* backlog queue */
+ struct list_head backlog;
+ unsigned int bl_len;
+ spinlock_t bl_lock; /* protect backlog queue */
+
+#ifdef COMP_IN_WQ
+ struct workqueue_struct *workq;
+ struct delayed_work compwork;
+#else
+ struct tasklet_struct comptask;
+#endif
+ bool is_runtime_suspended;
+};
+
+struct cc_bl_item {
+ struct cc_crypto_req creq;
+ struct cc_hw_desc desc[CC_MAX_DESC_SEQ_LEN];
+ unsigned int len;
+ struct list_head list;
+ bool notif;
+};
+
+static void comp_handler(unsigned long devarg);
+#ifdef COMP_IN_WQ
+static void comp_work_handler(struct work_struct *work);
+#endif
+
+void cc_req_mgr_fini(struct cc_drvdata *drvdata)
+{
+ struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
+ struct device *dev = drvdata_to_dev(drvdata);
+
+ if (!req_mgr_h)
+ return; /* Not allocated */
+
+ if (req_mgr_h->dummy_comp_buff_dma) {
+ dma_free_coherent(dev, sizeof(u32), req_mgr_h->dummy_comp_buff,
+ req_mgr_h->dummy_comp_buff_dma);
+ }
+
+ dev_dbg(dev, "max_used_hw_slots=%d\n", (req_mgr_h->hw_queue_size -
+ req_mgr_h->min_free_hw_slots));
+ dev_dbg(dev, "max_used_sw_slots=%d\n", req_mgr_h->max_used_sw_slots);
+
+#ifdef COMP_IN_WQ
+ flush_workqueue(req_mgr_h->workq);
+ destroy_workqueue(req_mgr_h->workq);
+#else
+ /* Kill tasklet */
+ tasklet_kill(&req_mgr_h->comptask);
+#endif
+ kzfree(req_mgr_h);
+ drvdata->request_mgr_handle = NULL;
+}
+
+int cc_req_mgr_init(struct cc_drvdata *drvdata)
+{
+ struct cc_req_mgr_handle *req_mgr_h;
+ struct device *dev = drvdata_to_dev(drvdata);
+ int rc = 0;
+
+ req_mgr_h = kzalloc(sizeof(*req_mgr_h), GFP_KERNEL);
+ if (!req_mgr_h) {
+ rc = -ENOMEM;
+ goto req_mgr_init_err;
+ }
+
+ drvdata->request_mgr_handle = req_mgr_h;
+
+ spin_lock_init(&req_mgr_h->hw_lock);
+ spin_lock_init(&req_mgr_h->bl_lock);
+ INIT_LIST_HEAD(&req_mgr_h->backlog);
+
+#ifdef COMP_IN_WQ
+ dev_dbg(dev, "Initializing completion workqueue\n");
+ req_mgr_h->workq = create_singlethread_workqueue("ccree");
+ if (!req_mgr_h->workq) {
+ dev_err(dev, "Failed creating work queue\n");
+ rc = -ENOMEM;
+ goto req_mgr_init_err;
+ }
+ INIT_DELAYED_WORK(&req_mgr_h->compwork, comp_work_handler);
+#else
+ dev_dbg(dev, "Initializing completion tasklet\n");
+ tasklet_init(&req_mgr_h->comptask, comp_handler,
+ (unsigned long)drvdata);
+#endif
+ req_mgr_h->hw_queue_size = cc_ioread(drvdata,
+ CC_REG(DSCRPTR_QUEUE_SRAM_SIZE));
+ dev_dbg(dev, "hw_queue_size=0x%08X\n", req_mgr_h->hw_queue_size);
+ if (req_mgr_h->hw_queue_size < MIN_HW_QUEUE_SIZE) {
+ dev_err(dev, "Invalid HW queue size = %u (Min. required is %u)\n",
+ req_mgr_h->hw_queue_size, MIN_HW_QUEUE_SIZE);
+ rc = -ENOMEM;
+ goto req_mgr_init_err;
+ }
+ req_mgr_h->min_free_hw_slots = req_mgr_h->hw_queue_size;
+ req_mgr_h->max_used_sw_slots = 0;
+
+ /* Allocate DMA word for "dummy" completion descriptor use */
+ req_mgr_h->dummy_comp_buff =
+ dma_alloc_coherent(dev, sizeof(u32),
+ &req_mgr_h->dummy_comp_buff_dma,
+ GFP_KERNEL);
+ if (!req_mgr_h->dummy_comp_buff) {
+ dev_err(dev, "Not enough memory to allocate DMA (%zu) dropped buffer\n",
+ sizeof(u32));
+ rc = -ENOMEM;
+ goto req_mgr_init_err;
+ }
+
+ /* Init. "dummy" completion descriptor */
+ hw_desc_init(&req_mgr_h->compl_desc);
+ set_din_const(&req_mgr_h->compl_desc, 0, sizeof(u32));
+ set_dout_dlli(&req_mgr_h->compl_desc, req_mgr_h->dummy_comp_buff_dma,
+ sizeof(u32), NS_BIT, 1);
+ set_flow_mode(&req_mgr_h->compl_desc, BYPASS);
+ set_queue_last_ind(drvdata, &req_mgr_h->compl_desc);
+
+ return 0;
+
+req_mgr_init_err:
+ cc_req_mgr_fini(drvdata);
+ return rc;
+}
+
+static void enqueue_seq(struct cc_drvdata *drvdata, struct cc_hw_desc seq[],
+ unsigned int seq_len)
+{
+ int i, w;
+ void __iomem *reg = drvdata->cc_base + CC_REG(DSCRPTR_QUEUE_WORD0);
+ struct device *dev = drvdata_to_dev(drvdata);
+
+ /*
+ * We do indeed write all 6 command words to the same
+ * register. The HW supports this.
+ */
+
+ for (i = 0; i < seq_len; i++) {
+ for (w = 0; w <= 5; w++)
+ writel_relaxed(seq[i].word[w], reg);
+
+ if (cc_dump_desc)
+ dev_dbg(dev, "desc[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ i, seq[i].word[0], seq[i].word[1],
+ seq[i].word[2], seq[i].word[3],
+ seq[i].word[4], seq[i].word[5]);
+ }
+}
+
+/*!
+ * Completion will take place if and only if user requested completion
+ * by cc_send_sync_request().
+ *
+ * \param dev
+ * \param dx_compl_h The completion event to signal
+ */
+static void request_mgr_complete(struct device *dev, void *dx_compl_h,
+ int dummy)
+{
+ struct completion *this_compl = dx_compl_h;
+
+ complete(this_compl);
+}
+
+static int cc_queues_status(struct cc_drvdata *drvdata,
+ struct cc_req_mgr_handle *req_mgr_h,
+ unsigned int total_seq_len)
+{
+ unsigned long poll_queue;
+ struct device *dev = drvdata_to_dev(drvdata);
+
+ /* SW queue is checked only once as it will not
+ * be chaned during the poll because the spinlock_bh
+ * is held by the thread
+ */
+ if (((req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1)) ==
+ req_mgr_h->req_queue_tail) {
+ dev_err(dev, "SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n",
+ req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE);
+ return -ENOSPC;
+ }
+
+ if (req_mgr_h->q_free_slots >= total_seq_len)
+ return 0;
+
+ /* Wait for space in HW queue. Poll constant num of iterations. */
+ for (poll_queue = 0; poll_queue < CC_MAX_POLL_ITER ; poll_queue++) {
+ req_mgr_h->q_free_slots =
+ cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
+ if (req_mgr_h->q_free_slots < req_mgr_h->min_free_hw_slots)
+ req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots;
+
+ if (req_mgr_h->q_free_slots >= total_seq_len) {
+ /* If there is enough place return */
+ return 0;
+ }
+
+ dev_dbg(dev, "HW FIFO is full. q_free_slots=%d total_seq_len=%d\n",
+ req_mgr_h->q_free_slots, total_seq_len);
+ }
+ /* No room in the HW queue try again later */
+ dev_dbg(dev, "HW FIFO full, timeout. req_queue_head=%d sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n",
+ req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE,
+ req_mgr_h->q_free_slots, total_seq_len);
+ return -ENOSPC;
+}
+
+/*!
+ * Enqueue caller request to crypto hardware.
+ * Need to be called with HW lock held and PM running
+ *
+ * \param drvdata
+ * \param cc_req The request to enqueue
+ * \param desc The crypto sequence
+ * \param len The crypto sequence length
+ * \param add_comp If "true": add an artificial dout DMA to mark completion
+ *
+ * \return int Returns -EINPROGRESS or error code
+ */
+static int cc_do_send_request(struct cc_drvdata *drvdata,
+ struct cc_crypto_req *cc_req,
+ struct cc_hw_desc *desc, unsigned int len,
+ bool add_comp, bool ivgen)
+{
+ struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
+ unsigned int used_sw_slots;
+ unsigned int iv_seq_len = 0;
+ unsigned int total_seq_len = len; /*initial sequence length*/
+ struct cc_hw_desc iv_seq[CC_IVPOOL_SEQ_LEN];
+ struct device *dev = drvdata_to_dev(drvdata);
+ int rc;
+
+ if (ivgen) {
+ dev_dbg(dev, "Acquire IV from pool into %d DMA addresses %pad, %pad, %pad, IV-size=%u\n",
+ cc_req->ivgen_dma_addr_len,
+ &cc_req->ivgen_dma_addr[0],
+ &cc_req->ivgen_dma_addr[1],
+ &cc_req->ivgen_dma_addr[2],
+ cc_req->ivgen_size);
+
+ /* Acquire IV from pool */
+ rc = cc_get_iv(drvdata, cc_req->ivgen_dma_addr,
+ cc_req->ivgen_dma_addr_len,
+ cc_req->ivgen_size, iv_seq, &iv_seq_len);
+
+ if (rc) {
+ dev_err(dev, "Failed to generate IV (rc=%d)\n", rc);
+ return rc;
+ }
+
+ total_seq_len += iv_seq_len;
+ }
+
+ used_sw_slots = ((req_mgr_h->req_queue_head -
+ req_mgr_h->req_queue_tail) &
+ (MAX_REQUEST_QUEUE_SIZE - 1));
+ if (used_sw_slots > req_mgr_h->max_used_sw_slots)
+ req_mgr_h->max_used_sw_slots = used_sw_slots;
+
+ /* Enqueue request - must be locked with HW lock*/
+ req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *cc_req;
+ req_mgr_h->req_queue_head = (req_mgr_h->req_queue_head + 1) &
+ (MAX_REQUEST_QUEUE_SIZE - 1);
+ /* TODO: Use circ_buf.h ? */
+
+ dev_dbg(dev, "Enqueue request head=%u\n", req_mgr_h->req_queue_head);
+
+ /*
+ * We are about to push command to the HW via the command registers
+ * that may refernece hsot memory. We need to issue a memory barrier
+ * to make sure there are no outstnading memory writes
+ */
+ wmb();
+
+ /* STAT_PHASE_4: Push sequence */
+ if (ivgen)
+ enqueue_seq(drvdata, iv_seq, iv_seq_len);
+
+ enqueue_seq(drvdata, desc, len);
+
+ if (add_comp) {
+ enqueue_seq(drvdata, &req_mgr_h->compl_desc, 1);
+ total_seq_len++;
+ }
+
+ if (req_mgr_h->q_free_slots < total_seq_len) {
+ /* This situation should never occur. Maybe indicating problem
+ * with resuming power. Set the free slot count to 0 and hope
+ * for the best.
+ */
+ dev_err(dev, "HW free slot count mismatch.");
+ req_mgr_h->q_free_slots = 0;
+ } else {
+ /* Update the free slots in HW queue */
+ req_mgr_h->q_free_slots -= total_seq_len;
+ }
+
+ /* Operation still in process */
+ return -EINPROGRESS;
+}
+
+static void cc_enqueue_backlog(struct cc_drvdata *drvdata,
+ struct cc_bl_item *bli)
+{
+ struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
+
+ spin_lock_bh(&mgr->bl_lock);
+ list_add_tail(&bli->list, &mgr->backlog);
+ ++mgr->bl_len;
+ spin_unlock_bh(&mgr->bl_lock);
+ tasklet_schedule(&mgr->comptask);
+}
+
+static void cc_proc_backlog(struct cc_drvdata *drvdata)
+{
+ struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
+ struct cc_bl_item *bli;
+ struct cc_crypto_req *creq;
+ struct crypto_async_request *req;
+ bool ivgen;
+ unsigned int total_len;
+ struct device *dev = drvdata_to_dev(drvdata);
+ int rc;
+
+ spin_lock(&mgr->bl_lock);
+
+ while (mgr->bl_len) {
+ bli = list_first_entry(&mgr->backlog, struct cc_bl_item, list);
+ spin_unlock(&mgr->bl_lock);
+
+ creq = &bli->creq;
+ req = (struct crypto_async_request *)creq->user_arg;
+
+ /*
+ * Notify the request we're moving out of the backlog
+ * but only if we haven't done so already.
+ */
+ if (!bli->notif) {
+ req->complete(req, -EINPROGRESS);
+ bli->notif = true;
+ }
+
+ ivgen = !!creq->ivgen_dma_addr_len;
+ total_len = bli->len + (ivgen ? CC_IVPOOL_SEQ_LEN : 0);
+
+ spin_lock(&mgr->hw_lock);
+
+ rc = cc_queues_status(drvdata, mgr, total_len);
+ if (rc) {
+ /*
+ * There is still not room in the FIFO for
+ * this request. Bail out. We'll return here
+ * on the next completion irq.
+ */
+ spin_unlock(&mgr->hw_lock);
+ return;
+ }
+
+ rc = cc_do_send_request(drvdata, &bli->creq, bli->desc,
+ bli->len, false, ivgen);
+
+ spin_unlock(&mgr->hw_lock);
+
+ if (rc != -EINPROGRESS) {
+ cc_pm_put_suspend(dev);
+ creq->user_cb(dev, req, rc);
+ }
+
+ /* Remove ourselves from the backlog list */
+ spin_lock(&mgr->bl_lock);
+ list_del(&bli->list);
+ --mgr->bl_len;
+ }
+
+ spin_unlock(&mgr->bl_lock);
+}
+
+int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
+ struct cc_hw_desc *desc, unsigned int len,
+ struct crypto_async_request *req)
+{
+ int rc;
+ struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
+ bool ivgen = !!cc_req->ivgen_dma_addr_len;
+ unsigned int total_len = len + (ivgen ? CC_IVPOOL_SEQ_LEN : 0);
+ struct device *dev = drvdata_to_dev(drvdata);
+ bool backlog_ok = req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG;
+ gfp_t flags = cc_gfp_flags(req);
+ struct cc_bl_item *bli;
+
+ rc = cc_pm_get(dev);
+ if (rc) {
+ dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
+ return rc;
+ }
+
+ spin_lock_bh(&mgr->hw_lock);
+ rc = cc_queues_status(drvdata, mgr, total_len);
+
+#ifdef CC_DEBUG_FORCE_BACKLOG
+ if (backlog_ok)
+ rc = -ENOSPC;
+#endif /* CC_DEBUG_FORCE_BACKLOG */
+
+ if (rc == -ENOSPC && backlog_ok) {
+ spin_unlock_bh(&mgr->hw_lock);
+
+ bli = kmalloc(sizeof(*bli), flags);
+ if (!bli) {
+ cc_pm_put_suspend(dev);
+ return -ENOMEM;
+ }
+
+ memcpy(&bli->creq, cc_req, sizeof(*cc_req));
+ memcpy(&bli->desc, desc, len * sizeof(*desc));
+ bli->len = len;
+ bli->notif = false;
+ cc_enqueue_backlog(drvdata, bli);
+ return -EBUSY;
+ }
+
+ if (!rc)
+ rc = cc_do_send_request(drvdata, cc_req, desc, len, false,
+ ivgen);
+
+ spin_unlock_bh(&mgr->hw_lock);
+ return rc;
+}
+
+int cc_send_sync_request(struct cc_drvdata *drvdata,
+ struct cc_crypto_req *cc_req, struct cc_hw_desc *desc,
+ unsigned int len)
+{
+ int rc;
+ struct device *dev = drvdata_to_dev(drvdata);
+ struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
+
+ init_completion(&cc_req->seq_compl);
+ cc_req->user_cb = request_mgr_complete;
+ cc_req->user_arg = &cc_req->seq_compl;
+
+ rc = cc_pm_get(dev);
+ if (rc) {
+ dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
+ return rc;
+ }
+
+ while (true) {
+ spin_lock_bh(&mgr->hw_lock);
+ rc = cc_queues_status(drvdata, mgr, len + 1);
+
+ if (!rc)
+ break;
+
+ spin_unlock_bh(&mgr->hw_lock);
+ if (rc != -EAGAIN) {
+ cc_pm_put_suspend(dev);
+ return rc;
+ }
+ wait_for_completion_interruptible(&drvdata->hw_queue_avail);
+ reinit_completion(&drvdata->hw_queue_avail);
+ }
+
+ rc = cc_do_send_request(drvdata, cc_req, desc, len, true, false);
+ spin_unlock_bh(&mgr->hw_lock);
+
+ if (rc != -EINPROGRESS) {
+ cc_pm_put_suspend(dev);
+ return rc;
+ }
+
+ wait_for_completion(&cc_req->seq_compl);
+ return 0;
+}
+
+/*!
+ * Enqueue caller request to crypto hardware during init process.
+ * assume this function is not called in middle of a flow,
+ * since we set QUEUE_LAST_IND flag in the last descriptor.
+ *
+ * \param drvdata
+ * \param desc The crypto sequence
+ * \param len The crypto sequence length
+ *
+ * \return int Returns "0" upon success
+ */
+int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc,
+ unsigned int len)
+{
+ struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
+ unsigned int total_seq_len = len; /*initial sequence length*/
+ int rc = 0;
+
+ /* Wait for space in HW and SW FIFO. Poll for as much as FIFO_TIMEOUT.
+ */
+ rc = cc_queues_status(drvdata, req_mgr_h, total_seq_len);
+ if (rc)
+ return rc;
+
+ set_queue_last_ind(drvdata, &desc[(len - 1)]);
+
+ /*
+ * We are about to push command to the HW via the command registers
+ * that may refernece hsot memory. We need to issue a memory barrier
+ * to make sure there are no outstnading memory writes
+ */
+ wmb();
+ enqueue_seq(drvdata, desc, len);
+
+ /* Update the free slots in HW queue */
+ req_mgr_h->q_free_slots =
+ cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
+
+ return 0;
+}
+
+void complete_request(struct cc_drvdata *drvdata)
+{
+ struct cc_req_mgr_handle *request_mgr_handle =
+ drvdata->request_mgr_handle;
+
+ complete(&drvdata->hw_queue_avail);
+#ifdef COMP_IN_WQ
+ queue_delayed_work(request_mgr_handle->workq,
+ &request_mgr_handle->compwork, 0);
+#else
+ tasklet_schedule(&request_mgr_handle->comptask);
+#endif
+}
+
+#ifdef COMP_IN_WQ
+static void comp_work_handler(struct work_struct *work)
+{
+ struct cc_drvdata *drvdata =
+ container_of(work, struct cc_drvdata, compwork.work);
+
+ comp_handler((unsigned long)drvdata);
+}
+#endif
+
+static void proc_completions(struct cc_drvdata *drvdata)
+{
+ struct cc_crypto_req *cc_req;
+ struct device *dev = drvdata_to_dev(drvdata);
+ struct cc_req_mgr_handle *request_mgr_handle =
+ drvdata->request_mgr_handle;
+ unsigned int *tail = &request_mgr_handle->req_queue_tail;
+ unsigned int *head = &request_mgr_handle->req_queue_head;
+
+ while (request_mgr_handle->axi_completed) {
+ request_mgr_handle->axi_completed--;
+
+ /* Dequeue request */
+ if (*head == *tail) {
+ /* We are supposed to handle a completion but our
+ * queue is empty. This is not normal. Return and
+ * hope for the best.
+ */
+ dev_err(dev, "Request queue is empty head == tail %u\n",
+ *head);
+ break;
+ }
+
+ cc_req = &request_mgr_handle->req_queue[*tail];
+
+ if (cc_req->user_cb)
+ cc_req->user_cb(dev, cc_req->user_arg, 0);
+ *tail = (*tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
+ dev_dbg(dev, "Dequeue request tail=%u\n", *tail);
+ dev_dbg(dev, "Request completed. axi_completed=%d\n",
+ request_mgr_handle->axi_completed);
+ cc_pm_put_suspend(dev);
+ }
+}
+
+static inline u32 cc_axi_comp_count(struct cc_drvdata *drvdata)
+{
+ return FIELD_GET(AXIM_MON_COMP_VALUE,
+ cc_ioread(drvdata, drvdata->axim_mon_offset));
+}
+
+/* Deferred service handler, run as interrupt-fired tasklet */
+static void comp_handler(unsigned long devarg)
+{
+ struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
+ struct cc_req_mgr_handle *request_mgr_handle =
+ drvdata->request_mgr_handle;
+
+ u32 irq;
+
+ irq = (drvdata->irq & CC_COMP_IRQ_MASK);
+
+ if (irq & CC_COMP_IRQ_MASK) {
+ /* To avoid the interrupt from firing as we unmask it,
+ * we clear it now
+ */
+ cc_iowrite(drvdata, CC_REG(HOST_ICR), CC_COMP_IRQ_MASK);
+
+ /* Avoid race with above clear: Test completion counter
+ * once more
+ */
+ request_mgr_handle->axi_completed +=
+ cc_axi_comp_count(drvdata);
+
+ while (request_mgr_handle->axi_completed) {
+ do {
+ proc_completions(drvdata);
+ /* At this point (after proc_completions()),
+ * request_mgr_handle->axi_completed is 0.
+ */
+ request_mgr_handle->axi_completed =
+ cc_axi_comp_count(drvdata);
+ } while (request_mgr_handle->axi_completed > 0);
+
+ cc_iowrite(drvdata, CC_REG(HOST_ICR),
+ CC_COMP_IRQ_MASK);
+
+ request_mgr_handle->axi_completed +=
+ cc_axi_comp_count(drvdata);
+ }
+ }
+ /* after verifing that there is nothing to do,
+ * unmask AXI completion interrupt
+ */
+ cc_iowrite(drvdata, CC_REG(HOST_IMR),
+ cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~irq);
+
+ cc_proc_backlog(drvdata);
+}
+
+/*
+ * resume the queue configuration - no need to take the lock as this happens
+ * inside the spin lock protection
+ */
+#if defined(CONFIG_PM)
+int cc_resume_req_queue(struct cc_drvdata *drvdata)
+{
+ struct cc_req_mgr_handle *request_mgr_handle =
+ drvdata->request_mgr_handle;
+
+ spin_lock_bh(&request_mgr_handle->hw_lock);
+ request_mgr_handle->is_runtime_suspended = false;
+ spin_unlock_bh(&request_mgr_handle->hw_lock);
+
+ return 0;
+}
+
+/*
+ * suspend the queue configuration. Since it is used for the runtime suspend
+ * only verify that the queue can be suspended.
+ */
+int cc_suspend_req_queue(struct cc_drvdata *drvdata)
+{
+ struct cc_req_mgr_handle *request_mgr_handle =
+ drvdata->request_mgr_handle;
+
+ /* lock the send_request */
+ spin_lock_bh(&request_mgr_handle->hw_lock);
+ if (request_mgr_handle->req_queue_head !=
+ request_mgr_handle->req_queue_tail) {
+ spin_unlock_bh(&request_mgr_handle->hw_lock);
+ return -EBUSY;
+ }
+ request_mgr_handle->is_runtime_suspended = true;
+ spin_unlock_bh(&request_mgr_handle->hw_lock);
+
+ return 0;
+}
+
+bool cc_req_queue_suspended(struct cc_drvdata *drvdata)
+{
+ struct cc_req_mgr_handle *request_mgr_handle =
+ drvdata->request_mgr_handle;
+
+ return request_mgr_handle->is_runtime_suspended;
+}
+
+#endif
diff --git a/drivers/crypto/ccree/cc_request_mgr.h b/drivers/crypto/ccree/cc_request_mgr.h
new file mode 100644
index 000000000000..573cb97af085
--- /dev/null
+++ b/drivers/crypto/ccree/cc_request_mgr.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+/* \file cc_request_mgr.h
+ * Request Manager
+ */
+
+#ifndef __REQUEST_MGR_H__
+#define __REQUEST_MGR_H__
+
+#include "cc_hw_queue_defs.h"
+
+int cc_req_mgr_init(struct cc_drvdata *drvdata);
+
+/*!
+ * Enqueue caller request to crypto hardware.
+ *
+ * \param drvdata
+ * \param cc_req The request to enqueue
+ * \param desc The crypto sequence
+ * \param len The crypto sequence length
+ * \param is_dout If "true": completion is handled by the caller
+ * If "false": this function adds a dummy descriptor completion
+ * and waits upon completion signal.
+ *
+ * \return int Returns -EINPROGRESS or error
+ */
+int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
+ struct cc_hw_desc *desc, unsigned int len,
+ struct crypto_async_request *req);
+
+int cc_send_sync_request(struct cc_drvdata *drvdata,
+ struct cc_crypto_req *cc_req, struct cc_hw_desc *desc,
+ unsigned int len);
+
+int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc,
+ unsigned int len);
+
+void complete_request(struct cc_drvdata *drvdata);
+
+void cc_req_mgr_fini(struct cc_drvdata *drvdata);
+
+#if defined(CONFIG_PM)
+int cc_resume_req_queue(struct cc_drvdata *drvdata);
+
+int cc_suspend_req_queue(struct cc_drvdata *drvdata);
+
+bool cc_req_queue_suspended(struct cc_drvdata *drvdata);
+#endif
+
+#endif /*__REQUEST_MGR_H__*/
diff --git a/drivers/crypto/ccree/cc_sram_mgr.c b/drivers/crypto/ccree/cc_sram_mgr.c
new file mode 100644
index 000000000000..c8c276f6dee9
--- /dev/null
+++ b/drivers/crypto/ccree/cc_sram_mgr.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include "cc_driver.h"
+#include "cc_sram_mgr.h"
+
+/**
+ * struct cc_sram_ctx -Internal RAM context manager
+ * @sram_free_offset: the offset to the non-allocated area
+ */
+struct cc_sram_ctx {
+ cc_sram_addr_t sram_free_offset;
+};
+
+/**
+ * cc_sram_mgr_fini() - Cleanup SRAM pool.
+ *
+ * @drvdata: Associated device driver context
+ */
+void cc_sram_mgr_fini(struct cc_drvdata *drvdata)
+{
+ /* Free "this" context */
+ kfree(drvdata->sram_mgr_handle);
+}
+
+/**
+ * cc_sram_mgr_init() - Initializes SRAM pool.
+ * The pool starts right at the beginning of SRAM.
+ * Returns zero for success, negative value otherwise.
+ *
+ * @drvdata: Associated device driver context
+ */
+int cc_sram_mgr_init(struct cc_drvdata *drvdata)
+{
+ struct cc_sram_ctx *ctx;
+ dma_addr_t start = 0;
+ struct device *dev = drvdata_to_dev(drvdata);
+
+ if (drvdata->hw_rev < CC_HW_REV_712) {
+ /* Pool starts after ROM bytes */
+ start = (dma_addr_t)cc_ioread(drvdata,
+ CC_REG(HOST_SEP_SRAM_THRESHOLD));
+
+ if ((start & 0x3) != 0) {
+ dev_err(dev, "Invalid SRAM offset %pad\n", &start);
+ return -EINVAL;
+ }
+ }
+
+ /* Allocate "this" context */
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->sram_free_offset = start;
+ drvdata->sram_mgr_handle = ctx;
+
+ return 0;
+}
+
+/*!
+ * Allocated buffer from SRAM pool.
+ * Note: Caller is responsible to free the LAST allocated buffer.
+ * This function does not taking care of any fragmentation may occur
+ * by the order of calls to alloc/free.
+ *
+ * \param drvdata
+ * \param size The requested bytes to allocate
+ */
+cc_sram_addr_t cc_sram_alloc(struct cc_drvdata *drvdata, u32 size)
+{
+ struct cc_sram_ctx *smgr_ctx = drvdata->sram_mgr_handle;
+ struct device *dev = drvdata_to_dev(drvdata);
+ cc_sram_addr_t p;
+
+ if ((size & 0x3)) {
+ dev_err(dev, "Requested buffer size (%u) is not multiple of 4",
+ size);
+ return NULL_SRAM_ADDR;
+ }
+ if (size > (CC_CC_SRAM_SIZE - smgr_ctx->sram_free_offset)) {
+ dev_err(dev, "Not enough space to allocate %u B (at offset %llu)\n",
+ size, smgr_ctx->sram_free_offset);
+ return NULL_SRAM_ADDR;
+ }
+
+ p = smgr_ctx->sram_free_offset;
+ smgr_ctx->sram_free_offset += size;
+ dev_dbg(dev, "Allocated %u B @ %u\n", size, (unsigned int)p);
+ return p;
+}
+
+/**
+ * cc_set_sram_desc() - Create const descriptors sequence to
+ * set values in given array into SRAM.
+ * Note: each const value can't exceed word size.
+ *
+ * @src: A pointer to array of words to set as consts.
+ * @dst: The target SRAM buffer to set into
+ * @nelements: The number of words in "src" array
+ * @seq: A pointer to the given IN/OUT descriptor sequence
+ * @seq_len: A pointer to the given IN/OUT sequence length
+ */
+void cc_set_sram_desc(const u32 *src, cc_sram_addr_t dst,
+ unsigned int nelement, struct cc_hw_desc *seq,
+ unsigned int *seq_len)
+{
+ u32 i;
+ unsigned int idx = *seq_len;
+
+ for (i = 0; i < nelement; i++, idx++) {
+ hw_desc_init(&seq[idx]);
+ set_din_const(&seq[idx], src[i], sizeof(u32));
+ set_dout_sram(&seq[idx], dst + (i * sizeof(u32)), sizeof(u32));
+ set_flow_mode(&seq[idx], BYPASS);
+ }
+
+ *seq_len = idx;
+}
diff --git a/drivers/crypto/ccree/cc_sram_mgr.h b/drivers/crypto/ccree/cc_sram_mgr.h
new file mode 100644
index 000000000000..d48649fb3323
--- /dev/null
+++ b/drivers/crypto/ccree/cc_sram_mgr.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#ifndef __CC_SRAM_MGR_H__
+#define __CC_SRAM_MGR_H__
+
+#ifndef CC_CC_SRAM_SIZE
+#define CC_CC_SRAM_SIZE 4096
+#endif
+
+struct cc_drvdata;
+
+/**
+ * Address (offset) within CC internal SRAM
+ */
+
+typedef u64 cc_sram_addr_t;
+
+#define NULL_SRAM_ADDR ((cc_sram_addr_t)-1)
+
+/*!
+ * Initializes SRAM pool.
+ * The first X bytes of SRAM are reserved for ROM usage, hence, pool
+ * starts right after X bytes.
+ *
+ * \param drvdata
+ *
+ * \return int Zero for success, negative value otherwise.
+ */
+int cc_sram_mgr_init(struct cc_drvdata *drvdata);
+
+/*!
+ * Uninits SRAM pool.
+ *
+ * \param drvdata
+ */
+void cc_sram_mgr_fini(struct cc_drvdata *drvdata);
+
+/*!
+ * Allocated buffer from SRAM pool.
+ * Note: Caller is responsible to free the LAST allocated buffer.
+ * This function does not taking care of any fragmentation may occur
+ * by the order of calls to alloc/free.
+ *
+ * \param drvdata
+ * \param size The requested bytes to allocate
+ */
+cc_sram_addr_t cc_sram_alloc(struct cc_drvdata *drvdata, u32 size);
+
+/**
+ * cc_set_sram_desc() - Create const descriptors sequence to
+ * set values in given array into SRAM.
+ * Note: each const value can't exceed word size.
+ *
+ * @src: A pointer to array of words to set as consts.
+ * @dst: The target SRAM buffer to set into
+ * @nelements: The number of words in "src" array
+ * @seq: A pointer to the given IN/OUT descriptor sequence
+ * @seq_len: A pointer to the given IN/OUT sequence length
+ */
+void cc_set_sram_desc(const u32 *src, cc_sram_addr_t dst,
+ unsigned int nelement, struct cc_hw_desc *seq,
+ unsigned int *seq_len);
+
+#endif /*__CC_SRAM_MGR_H__*/
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 34a02d690548..59fe6631e73e 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -131,6 +131,11 @@ static inline int is_ofld_imm(const struct sk_buff *skb)
return (skb->len <= SGE_MAX_WR_LEN);
}
+static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
+{
+ memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
+}
+
static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
unsigned int entlen,
unsigned int skip)
@@ -160,41 +165,6 @@ static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
return nents;
}
-static inline void chcr_handle_ahash_resp(struct ahash_request *req,
- unsigned char *input,
- int err)
-{
- struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
- int digestsize, updated_digestsize;
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
-
- if (input == NULL)
- goto out;
- digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
- if (reqctx->is_sg_map)
- chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
- if (reqctx->dma_addr)
- dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->dma_addr,
- reqctx->dma_len, DMA_TO_DEVICE);
- reqctx->dma_addr = 0;
- updated_digestsize = digestsize;
- if (digestsize == SHA224_DIGEST_SIZE)
- updated_digestsize = SHA256_DIGEST_SIZE;
- else if (digestsize == SHA384_DIGEST_SIZE)
- updated_digestsize = SHA512_DIGEST_SIZE;
- if (reqctx->result == 1) {
- reqctx->result = 0;
- memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
- digestsize);
- } else {
- memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
- updated_digestsize);
- }
-out:
- req->base.complete(&req->base, err);
-}
-
static inline int get_aead_subtype(struct crypto_aead *aead)
{
struct aead_alg *alg = crypto_aead_alg(aead);
@@ -247,34 +217,6 @@ static inline void chcr_handle_aead_resp(struct aead_request *req,
req->base.complete(&req->base, err);
}
-/*
- * chcr_handle_resp - Unmap the DMA buffers associated with the request
- * @req: crypto request
- */
-int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
- int err)
-{
- struct crypto_tfm *tfm = req->tfm;
- struct chcr_context *ctx = crypto_tfm_ctx(tfm);
- struct adapter *adap = padap(ctx->dev);
-
- switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
- case CRYPTO_ALG_TYPE_AEAD:
- chcr_handle_aead_resp(aead_request_cast(req), input, err);
- break;
-
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- err = chcr_handle_cipher_resp(ablkcipher_request_cast(req),
- input, err);
- break;
-
- case CRYPTO_ALG_TYPE_AHASH:
- chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
- }
- atomic_inc(&adap->chcr_stats.complete);
- return err;
-}
-
static void get_aes_decrypt_key(unsigned char *dec_key,
const unsigned char *key,
unsigned int keylength)
@@ -563,7 +505,6 @@ static void ulptx_walk_add_sg(struct ulptx_walk *walk,
if (!len)
return;
-
while (sg && skip) {
if (sg_dma_len(sg) <= skip) {
skip -= sg_dma_len(sg);
@@ -653,6 +594,35 @@ static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
}
return 0;
}
+
+static int chcr_hash_ent_in_wr(struct scatterlist *src,
+ unsigned int minsg,
+ unsigned int space,
+ unsigned int srcskip)
+{
+ int srclen = 0;
+ int srcsg = minsg;
+ int soffset = 0, sless;
+
+ if (sg_dma_len(src) == srcskip) {
+ src = sg_next(src);
+ srcskip = 0;
+ }
+ while (src && space > (sgl_ent_len[srcsg + 1])) {
+ sless = min_t(unsigned int, sg_dma_len(src) - soffset - srcskip,
+ CHCR_SRC_SG_SIZE);
+ srclen += sless;
+ soffset += sless;
+ srcsg++;
+ if (sg_dma_len(src) == (soffset + srcskip)) {
+ src = sg_next(src);
+ soffset = 0;
+ srcskip = 0;
+ }
+ }
+ return srclen;
+}
+
static int chcr_sg_ent_in_wr(struct scatterlist *src,
struct scatterlist *dst,
unsigned int minsg,
@@ -662,7 +632,7 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src,
{
int srclen = 0, dstlen = 0;
int srcsg = minsg, dstsg = minsg;
- int offset = 0, less;
+ int offset = 0, soffset = 0, less, sless = 0;
if (sg_dma_len(src) == srcskip) {
src = sg_next(src);
@@ -676,7 +646,9 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src,
while (src && dst &&
space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
- srclen += (sg_dma_len(src) - srcskip);
+ sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
+ CHCR_SRC_SG_SIZE);
+ srclen += sless;
srcsg++;
offset = 0;
while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
@@ -687,15 +659,20 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src,
dstskip, CHCR_DST_SG_SIZE);
dstlen += less;
offset += less;
- if (offset == sg_dma_len(dst)) {
+ if ((offset + dstskip) == sg_dma_len(dst)) {
dst = sg_next(dst);
offset = 0;
}
dstsg++;
dstskip = 0;
}
- src = sg_next(src);
- srcskip = 0;
+ soffset += sless;
+ if ((soffset + srcskip) == sg_dma_len(src)) {
+ src = sg_next(src);
+ srcskip = 0;
+ soffset = 0;
+ }
+
}
return min(srclen, dstlen);
}
@@ -784,14 +761,14 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
reqctx->dst_ofst);
dst_size = get_space_for_phys_dsgl(nents + 1);
- kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
+ kctx_len = roundup(ablkctx->enckey_len, 16);
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
CHCR_SRC_SG_SIZE, reqctx->src_ofst);
- temp = reqctx->imm ? (DIV_ROUND_UP((IV + wrparam->req->nbytes), 16)
- * 16) : (sgl_len(nents + MIN_CIPHER_SG) * 8);
+ temp = reqctx->imm ? roundup(IV + wrparam->req->nbytes, 16) :
+ (sgl_len(nents + MIN_CIPHER_SG) * 8);
transhdr_len += temp;
- transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
+ transhdr_len = roundup(transhdr_len, 16);
skb = alloc_skb(SGE_MAX_WR_LEN, flags);
if (!skb) {
error = -ENOMEM;
@@ -847,6 +824,13 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
transhdr_len, temp,
ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
reqctx->skb = skb;
+
+ if (reqctx->op && (ablkctx->ciph_mode ==
+ CHCR_SCMD_CIPHER_MODE_AES_CBC))
+ sg_pcopy_to_buffer(wrparam->req->src,
+ sg_nents(wrparam->req->src), wrparam->req->info, 16,
+ reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
+
return skb;
err:
return ERR_PTR(error);
@@ -1070,9 +1054,8 @@ static int chcr_update_cipher_iv(struct ablkcipher_request *req,
ret = chcr_update_tweak(req, iv, 0);
else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
if (reqctx->op)
- sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv,
- 16,
- reqctx->processed - AES_BLOCK_SIZE);
+ /*Updated before sending last WR*/
+ memcpy(iv, req->info, AES_BLOCK_SIZE);
else
memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
}
@@ -1100,11 +1083,8 @@ static int chcr_final_cipher_iv(struct ablkcipher_request *req,
else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
ret = chcr_update_tweak(req, iv, 1);
else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
- if (reqctx->op)
- sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv,
- 16,
- reqctx->processed - AES_BLOCK_SIZE);
- else
+ /*Already updated for Decrypt*/
+ if (!reqctx->op)
memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
}
@@ -1143,12 +1123,12 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
}
if (!reqctx->imm) {
bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 1,
- SPACE_LEFT(ablkctx->enckey_len),
+ CIP_SPACE_LEFT(ablkctx->enckey_len),
reqctx->src_ofst, reqctx->dst_ofst);
if ((bytes + reqctx->processed) >= req->nbytes)
bytes = req->nbytes - reqctx->processed;
else
- bytes = ROUND_16(bytes);
+ bytes = rounddown(bytes, 16);
} else {
/*CTR mode counter overfloa*/
bytes = req->nbytes - reqctx->processed;
@@ -1234,7 +1214,7 @@ static int process_cipher(struct ablkcipher_request *req,
CHCR_DST_SG_SIZE, 0);
dnents += 1; // IV
phys_dsgl = get_space_for_phys_dsgl(dnents);
- kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
+ kctx_len = roundup(ablkctx->enckey_len, 16);
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
reqctx->imm = (transhdr_len + IV + req->nbytes) <=
SGE_MAX_WR_LEN;
@@ -1247,12 +1227,12 @@ static int process_cipher(struct ablkcipher_request *req,
if (!reqctx->imm) {
bytes = chcr_sg_ent_in_wr(req->src, req->dst,
MIN_CIPHER_SG,
- SPACE_LEFT(ablkctx->enckey_len),
+ CIP_SPACE_LEFT(ablkctx->enckey_len),
0, 0);
if ((bytes + reqctx->processed) >= req->nbytes)
bytes = req->nbytes - reqctx->processed;
else
- bytes = ROUND_16(bytes);
+ bytes = rounddown(bytes, 16);
} else {
bytes = req->nbytes;
}
@@ -1282,7 +1262,7 @@ static int process_cipher(struct ablkcipher_request *req,
req->src,
req->dst,
req->nbytes,
- req->info,
+ reqctx->iv,
op_type);
goto error;
}
@@ -1503,35 +1483,24 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
struct chcr_wr *chcr_req;
struct ulptx_sgl *ulptx;
- unsigned int nents = 0, transhdr_len, iopad_alignment = 0;
- unsigned int digestsize = crypto_ahash_digestsize(tfm);
- unsigned int kctx_len = 0, temp = 0;
- u8 hash_size_in_response = 0;
+ unsigned int nents = 0, transhdr_len;
+ unsigned int temp = 0;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
struct adapter *adap = padap(h_ctx(tfm)->dev);
int error = 0;
- iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
- kctx_len = param->alg_prm.result_size + iopad_alignment;
- if (param->opad_needed)
- kctx_len += param->alg_prm.result_size + iopad_alignment;
-
- if (req_ctx->result)
- hash_size_in_response = digestsize;
- else
- hash_size_in_response = param->alg_prm.result_size;
- transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
- req_ctx->imm = (transhdr_len + param->bfr_len + param->sg_len) <=
- SGE_MAX_WR_LEN;
- nents = sg_nents_xlen(req->src, param->sg_len, CHCR_SRC_SG_SIZE, 0);
+ transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
+ req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
+ param->sg_len) <= SGE_MAX_WR_LEN;
+ nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
+ CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
nents += param->bfr_len ? 1 : 0;
- transhdr_len += req_ctx->imm ? (DIV_ROUND_UP((param->bfr_len +
- param->sg_len), 16) * 16) :
- (sgl_len(nents) * 8);
- transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
+ transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
+ param->sg_len, 16) : (sgl_len(nents) * 8);
+ transhdr_len = roundup(transhdr_len, 16);
- skb = alloc_skb(SGE_MAX_WR_LEN, flags);
+ skb = alloc_skb(transhdr_len, flags);
if (!skb)
return ERR_PTR(-ENOMEM);
chcr_req = __skb_put_zero(skb, transhdr_len);
@@ -1563,33 +1532,33 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
param->alg_prm.mk_size, 0,
param->opad_needed,
- ((kctx_len +
+ ((param->kctx_len +
sizeof(chcr_req->key_ctx)) >> 4));
chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
- ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + kctx_len +
+ ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
DUMMY_BYTES);
if (param->bfr_len != 0) {
- req_ctx->dma_addr = dma_map_single(&u_ctx->lldi.pdev->dev,
- req_ctx->reqbfr, param->bfr_len,
- DMA_TO_DEVICE);
+ req_ctx->hctx_wr.dma_addr =
+ dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
+ param->bfr_len, DMA_TO_DEVICE);
if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
- req_ctx->dma_addr)) {
+ req_ctx->hctx_wr. dma_addr)) {
error = -ENOMEM;
goto err;
}
- req_ctx->dma_len = param->bfr_len;
+ req_ctx->hctx_wr.dma_len = param->bfr_len;
} else {
- req_ctx->dma_addr = 0;
+ req_ctx->hctx_wr.dma_addr = 0;
}
chcr_add_hash_src_ent(req, ulptx, param);
/* Request upto max wr size */
- temp = kctx_len + DUMMY_BYTES + (req_ctx->imm ? (param->sg_len
- + param->bfr_len) : 0);
+ temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
+ (param->sg_len + param->bfr_len) : 0);
atomic_inc(&adap->chcr_stats.digest_rqst);
- create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->imm,
- hash_size_in_response, transhdr_len,
+ create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
+ param->hash_size, transhdr_len,
temp, 0);
- req_ctx->skb = skb;
+ req_ctx->hctx_wr.skb = skb;
return skb;
err:
kfree_skb(skb);
@@ -1608,7 +1577,6 @@ static int chcr_ahash_update(struct ahash_request *req)
int error;
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
-
u_ctx = ULD_CTX(h_ctx(rtfm));
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
h_ctx(rtfm)->tx_qidx))) {
@@ -1625,17 +1593,26 @@ static int chcr_ahash_update(struct ahash_request *req)
req_ctx->reqlen += nbytes;
return 0;
}
+ chcr_init_hctx_per_wr(req_ctx);
error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
if (error)
return -ENOMEM;
+ get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
+ params.kctx_len = roundup(params.alg_prm.result_size, 16);
+ params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
+ HASH_SPACE_LEFT(params.kctx_len), 0);
+ if (params.sg_len > req->nbytes)
+ params.sg_len = req->nbytes;
+ params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
+ req_ctx->reqlen;
params.opad_needed = 0;
params.more = 1;
params.last = 0;
- params.sg_len = nbytes - req_ctx->reqlen;
params.bfr_len = req_ctx->reqlen;
params.scmd1 = 0;
- get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
- req_ctx->result = 0;
+ req_ctx->hctx_wr.srcsg = req->src;
+
+ params.hash_size = params.alg_prm.result_size;
req_ctx->data_len += params.sg_len + params.bfr_len;
skb = create_hash_wr(req, &params);
if (IS_ERR(skb)) {
@@ -1643,6 +1620,7 @@ static int chcr_ahash_update(struct ahash_request *req)
goto unmap;
}
+ req_ctx->hctx_wr.processed += params.sg_len;
if (remainder) {
/* Swap buffers */
swap(req_ctx->reqbfr, req_ctx->skbfr);
@@ -1680,16 +1658,27 @@ static int chcr_ahash_final(struct ahash_request *req)
struct uld_ctx *u_ctx = NULL;
u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
+ chcr_init_hctx_per_wr(req_ctx);
u_ctx = ULD_CTX(h_ctx(rtfm));
if (is_hmac(crypto_ahash_tfm(rtfm)))
params.opad_needed = 1;
else
params.opad_needed = 0;
params.sg_len = 0;
+ req_ctx->hctx_wr.isfinal = 1;
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
- req_ctx->result = 1;
+ params.kctx_len = roundup(params.alg_prm.result_size, 16);
+ if (is_hmac(crypto_ahash_tfm(rtfm))) {
+ params.opad_needed = 1;
+ params.kctx_len *= 2;
+ } else {
+ params.opad_needed = 0;
+ }
+
+ req_ctx->hctx_wr.result = 1;
params.bfr_len = req_ctx->reqlen;
req_ctx->data_len += params.bfr_len + params.sg_len;
+ req_ctx->hctx_wr.srcsg = req->src;
if (req_ctx->reqlen == 0) {
create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
params.last = 0;
@@ -1702,10 +1691,11 @@ static int chcr_ahash_final(struct ahash_request *req)
params.last = 1;
params.more = 0;
}
+ params.hash_size = crypto_ahash_digestsize(rtfm);
skb = create_hash_wr(req, &params);
if (IS_ERR(skb))
return PTR_ERR(skb);
-
+ req_ctx->reqlen = 0;
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
chcr_send_wr(skb);
@@ -1730,37 +1720,59 @@ static int chcr_ahash_finup(struct ahash_request *req)
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
}
+ chcr_init_hctx_per_wr(req_ctx);
+ error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
+ if (error)
+ return -ENOMEM;
- if (is_hmac(crypto_ahash_tfm(rtfm)))
+ get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
+ params.kctx_len = roundup(params.alg_prm.result_size, 16);
+ if (is_hmac(crypto_ahash_tfm(rtfm))) {
+ params.kctx_len *= 2;
params.opad_needed = 1;
- else
+ } else {
params.opad_needed = 0;
+ }
- params.sg_len = req->nbytes;
+ params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
+ HASH_SPACE_LEFT(params.kctx_len), 0);
+ if (params.sg_len < req->nbytes) {
+ if (is_hmac(crypto_ahash_tfm(rtfm))) {
+ params.kctx_len /= 2;
+ params.opad_needed = 0;
+ }
+ params.last = 0;
+ params.more = 1;
+ params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
+ - req_ctx->reqlen;
+ params.hash_size = params.alg_prm.result_size;
+ params.scmd1 = 0;
+ } else {
+ params.last = 1;
+ params.more = 0;
+ params.sg_len = req->nbytes;
+ params.hash_size = crypto_ahash_digestsize(rtfm);
+ params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
+ params.sg_len;
+ }
params.bfr_len = req_ctx->reqlen;
- get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
req_ctx->data_len += params.bfr_len + params.sg_len;
- req_ctx->result = 1;
+ req_ctx->hctx_wr.result = 1;
+ req_ctx->hctx_wr.srcsg = req->src;
if ((req_ctx->reqlen + req->nbytes) == 0) {
create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
params.last = 0;
params.more = 1;
params.scmd1 = 0;
params.bfr_len = bs;
- } else {
- params.scmd1 = req_ctx->data_len;
- params.last = 1;
- params.more = 0;
}
- error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
- if (error)
- return -ENOMEM;
-
skb = create_hash_wr(req, &params);
if (IS_ERR(skb)) {
error = PTR_ERR(skb);
goto unmap;
}
+ req_ctx->reqlen = 0;
+ req_ctx->hctx_wr.processed += params.sg_len;
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
chcr_send_wr(skb);
@@ -1791,21 +1803,42 @@ static int chcr_ahash_digest(struct ahash_request *req)
return -EBUSY;
}
- if (is_hmac(crypto_ahash_tfm(rtfm)))
- params.opad_needed = 1;
- else
- params.opad_needed = 0;
+ chcr_init_hctx_per_wr(req_ctx);
error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
if (error)
return -ENOMEM;
- params.last = 0;
- params.more = 0;
- params.sg_len = req->nbytes;
- params.bfr_len = 0;
- params.scmd1 = 0;
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
- req_ctx->result = 1;
+ params.kctx_len = roundup(params.alg_prm.result_size, 16);
+ if (is_hmac(crypto_ahash_tfm(rtfm))) {
+ params.kctx_len *= 2;
+ params.opad_needed = 1;
+ } else {
+ params.opad_needed = 0;
+ }
+ params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
+ HASH_SPACE_LEFT(params.kctx_len), 0);
+ if (params.sg_len < req->nbytes) {
+ if (is_hmac(crypto_ahash_tfm(rtfm))) {
+ params.kctx_len /= 2;
+ params.opad_needed = 0;
+ }
+ params.last = 0;
+ params.more = 1;
+ params.scmd1 = 0;
+ params.sg_len = rounddown(params.sg_len, bs);
+ params.hash_size = params.alg_prm.result_size;
+ } else {
+ params.sg_len = req->nbytes;
+ params.hash_size = crypto_ahash_digestsize(rtfm);
+ params.last = 1;
+ params.more = 0;
+ params.scmd1 = req->nbytes + req_ctx->data_len;
+
+ }
+ params.bfr_len = 0;
+ req_ctx->hctx_wr.result = 1;
+ req_ctx->hctx_wr.srcsg = req->src;
req_ctx->data_len += params.bfr_len + params.sg_len;
if (req->nbytes == 0) {
@@ -1819,6 +1852,7 @@ static int chcr_ahash_digest(struct ahash_request *req)
error = PTR_ERR(skb);
goto unmap;
}
+ req_ctx->hctx_wr.processed += params.sg_len;
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
chcr_send_wr(skb);
@@ -1828,6 +1862,151 @@ unmap:
return error;
}
+static int chcr_ahash_continue(struct ahash_request *req)
+{
+ struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
+ struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
+ struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
+ struct uld_ctx *u_ctx = NULL;
+ struct sk_buff *skb;
+ struct hash_wr_param params;
+ u8 bs;
+ int error;
+
+ bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
+ u_ctx = ULD_CTX(h_ctx(rtfm));
+ if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+ h_ctx(rtfm)->tx_qidx))) {
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return -EBUSY;
+ }
+ get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
+ params.kctx_len = roundup(params.alg_prm.result_size, 16);
+ if (is_hmac(crypto_ahash_tfm(rtfm))) {
+ params.kctx_len *= 2;
+ params.opad_needed = 1;
+ } else {
+ params.opad_needed = 0;
+ }
+ params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
+ HASH_SPACE_LEFT(params.kctx_len),
+ hctx_wr->src_ofst);
+ if ((params.sg_len + hctx_wr->processed) > req->nbytes)
+ params.sg_len = req->nbytes - hctx_wr->processed;
+ if (!hctx_wr->result ||
+ ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
+ if (is_hmac(crypto_ahash_tfm(rtfm))) {
+ params.kctx_len /= 2;
+ params.opad_needed = 0;
+ }
+ params.last = 0;
+ params.more = 1;
+ params.sg_len = rounddown(params.sg_len, bs);
+ params.hash_size = params.alg_prm.result_size;
+ params.scmd1 = 0;
+ } else {
+ params.last = 1;
+ params.more = 0;
+ params.hash_size = crypto_ahash_digestsize(rtfm);
+ params.scmd1 = reqctx->data_len + params.sg_len;
+ }
+ params.bfr_len = 0;
+ reqctx->data_len += params.sg_len;
+ skb = create_hash_wr(req, &params);
+ if (IS_ERR(skb)) {
+ error = PTR_ERR(skb);
+ goto err;
+ }
+ hctx_wr->processed += params.sg_len;
+ skb->dev = u_ctx->lldi.ports[0];
+ set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
+ chcr_send_wr(skb);
+ return 0;
+err:
+ return error;
+}
+
+static inline void chcr_handle_ahash_resp(struct ahash_request *req,
+ unsigned char *input,
+ int err)
+{
+ struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
+ struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
+ int digestsize, updated_digestsize;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
+
+ if (input == NULL)
+ goto out;
+ digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
+ updated_digestsize = digestsize;
+ if (digestsize == SHA224_DIGEST_SIZE)
+ updated_digestsize = SHA256_DIGEST_SIZE;
+ else if (digestsize == SHA384_DIGEST_SIZE)
+ updated_digestsize = SHA512_DIGEST_SIZE;
+
+ if (hctx_wr->dma_addr) {
+ dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
+ hctx_wr->dma_len, DMA_TO_DEVICE);
+ hctx_wr->dma_addr = 0;
+ }
+ if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
+ req->nbytes)) {
+ if (hctx_wr->result == 1) {
+ hctx_wr->result = 0;
+ memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
+ digestsize);
+ } else {
+ memcpy(reqctx->partial_hash,
+ input + sizeof(struct cpl_fw6_pld),
+ updated_digestsize);
+
+ }
+ goto unmap;
+ }
+ memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
+ updated_digestsize);
+
+ err = chcr_ahash_continue(req);
+ if (err)
+ goto unmap;
+ return;
+unmap:
+ if (hctx_wr->is_sg_map)
+ chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+
+
+out:
+ req->base.complete(&req->base, err);
+}
+
+/*
+ * chcr_handle_resp - Unmap the DMA buffers associated with the request
+ * @req: crypto request
+ */
+int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
+ int err)
+{
+ struct crypto_tfm *tfm = req->tfm;
+ struct chcr_context *ctx = crypto_tfm_ctx(tfm);
+ struct adapter *adap = padap(ctx->dev);
+
+ switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
+ case CRYPTO_ALG_TYPE_AEAD:
+ chcr_handle_aead_resp(aead_request_cast(req), input, err);
+ break;
+
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ err = chcr_handle_cipher_resp(ablkcipher_request_cast(req),
+ input, err);
+ break;
+
+ case CRYPTO_ALG_TYPE_AHASH:
+ chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
+ }
+ atomic_inc(&adap->chcr_stats.complete);
+ return err;
+}
static int chcr_ahash_export(struct ahash_request *areq, void *out)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
@@ -1835,11 +2014,10 @@ static int chcr_ahash_export(struct ahash_request *areq, void *out)
state->reqlen = req_ctx->reqlen;
state->data_len = req_ctx->data_len;
- state->is_sg_map = 0;
- state->result = 0;
memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
memcpy(state->partial_hash, req_ctx->partial_hash,
CHCR_HASH_MAX_DIGEST_SIZE);
+ chcr_init_hctx_per_wr(state);
return 0;
}
@@ -1852,11 +2030,10 @@ static int chcr_ahash_import(struct ahash_request *areq, const void *in)
req_ctx->data_len = state->data_len;
req_ctx->reqbfr = req_ctx->bfr1;
req_ctx->skbfr = req_ctx->bfr2;
- req_ctx->is_sg_map = 0;
- req_ctx->result = 0;
memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
memcpy(req_ctx->partial_hash, state->partial_hash,
CHCR_HASH_MAX_DIGEST_SIZE);
+ chcr_init_hctx_per_wr(req_ctx);
return 0;
}
@@ -1953,10 +2130,8 @@ static int chcr_sha_init(struct ahash_request *areq)
req_ctx->reqlen = 0;
req_ctx->reqbfr = req_ctx->bfr1;
req_ctx->skbfr = req_ctx->bfr2;
- req_ctx->skb = NULL;
- req_ctx->result = 0;
- req_ctx->is_sg_map = 0;
copy_hash_init_values(req_ctx->partial_hash, digestsize);
+
return 0;
}
@@ -2124,11 +2299,11 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <
SGE_MAX_WR_LEN;
- temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV + req->cryptlen), 16)
- * 16) : (sgl_len(reqctx->src_nents + reqctx->aad_nents
+ temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16)
+ : (sgl_len(reqctx->src_nents + reqctx->aad_nents
+ MIN_GCM_SG) * 8);
transhdr_len += temp;
- transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
+ transhdr_len = roundup(transhdr_len, 16);
if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
transhdr_len, op_type)) {
@@ -2187,9 +2362,8 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
aeadctx->enckey_len);
- memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) <<
- 4), actx->h_iopad, kctx_len -
- (DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4));
+ memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
+ actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
memcpy(reqctx->iv, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
@@ -2398,22 +2572,26 @@ void chcr_add_hash_src_ent(struct ahash_request *req,
struct ulptx_walk ulp_walk;
struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
- if (reqctx->imm) {
+ if (reqctx->hctx_wr.imm) {
u8 *buf = (u8 *)ulptx;
if (param->bfr_len) {
memcpy(buf, reqctx->reqbfr, param->bfr_len);
buf += param->bfr_len;
}
- sg_pcopy_to_buffer(req->src, sg_nents(req->src),
- buf, param->sg_len, 0);
+
+ sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
+ sg_nents(reqctx->hctx_wr.srcsg), buf,
+ param->sg_len, 0);
} else {
ulptx_walk_init(&ulp_walk, ulptx);
if (param->bfr_len)
ulptx_walk_add_page(&ulp_walk, param->bfr_len,
- &reqctx->dma_addr);
- ulptx_walk_add_sg(&ulp_walk, req->src, param->sg_len,
- 0);
+ &reqctx->hctx_wr.dma_addr);
+ ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
+ param->sg_len, reqctx->hctx_wr.src_ofst);
+ reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
+ reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
ulptx_walk_end(&ulp_walk);
}
}
@@ -2430,7 +2608,7 @@ int chcr_hash_dma_map(struct device *dev,
DMA_TO_DEVICE);
if (!error)
return -ENOMEM;
- req_ctx->is_sg_map = 1;
+ req_ctx->hctx_wr.is_sg_map = 1;
return 0;
}
@@ -2444,7 +2622,7 @@ void chcr_hash_dma_unmap(struct device *dev,
dma_unmap_sg(dev, req->src, sg_nents(req->src),
DMA_TO_DEVICE);
- req_ctx->is_sg_map = 0;
+ req_ctx->hctx_wr.is_sg_map = 0;
}
@@ -2636,10 +2814,10 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
0, dst_size);
}
-int aead_ccm_validate_input(unsigned short op_type,
- struct aead_request *req,
- struct chcr_aead_ctx *aeadctx,
- unsigned int sub_type)
+static int aead_ccm_validate_input(unsigned short op_type,
+ struct aead_request *req,
+ struct chcr_aead_ctx *aeadctx,
+ unsigned int sub_type)
{
if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
if (crypto_ccm_check_iv(req->iv)) {
@@ -2696,16 +2874,16 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
CHCR_DST_SG_SIZE, req->assoclen);
dnents += MIN_CCM_SG; // For IV and B0
dst_size = get_space_for_phys_dsgl(dnents);
- kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2;
+ kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen +
reqctx->b0_len) <= SGE_MAX_WR_LEN;
- temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV + req->cryptlen +
- reqctx->b0_len), 16) * 16) :
+ temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen +
+ reqctx->b0_len, 16) :
(sgl_len(reqctx->src_nents + reqctx->aad_nents +
MIN_CCM_SG) * 8);
transhdr_len += temp;
- transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
+ transhdr_len = roundup(transhdr_len, 16);
if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
reqctx->b0_len, transhdr_len, op_type)) {
@@ -2727,8 +2905,8 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
- memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
- 16), aeadctx->key, aeadctx->enckey_len);
+ memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
+ aeadctx->key, aeadctx->enckey_len);
phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
@@ -2798,16 +2976,15 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
CHCR_DST_SG_SIZE, req->assoclen);
dnents += MIN_GCM_SG; // For IV
dst_size = get_space_for_phys_dsgl(dnents);
- kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) +
- AEAD_H_SIZE;
+ kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <=
SGE_MAX_WR_LEN;
- temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV +
- req->cryptlen), 16) * 16) : (sgl_len(reqctx->src_nents +
- reqctx->aad_nents + MIN_GCM_SG) * 8);
+ temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16) :
+ (sgl_len(reqctx->src_nents +
+ reqctx->aad_nents + MIN_GCM_SG) * 8);
transhdr_len += temp;
- transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
+ transhdr_len = roundup(transhdr_len, 16);
if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
transhdr_len, op_type)) {
atomic_inc(&adap->chcr_stats.fallback);
@@ -2846,8 +3023,8 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
0, 0, dst_size);
chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
- memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
- 16), GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
+ memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
+ GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
/* prepare a 16 byte iv */
/* S A L T | IV | 0x00000001 */
@@ -3067,11 +3244,10 @@ static int chcr_ccm_common_setkey(struct crypto_aead *aead,
unsigned char ck_size, mk_size;
int key_ctx_size = 0;
- key_ctx_size = sizeof(struct _key_ctx) +
- ((DIV_ROUND_UP(keylen, 16)) << 4) * 2;
+ key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
if (keylen == AES_KEYSIZE_128) {
- mk_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
} else if (keylen == AES_KEYSIZE_192) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
@@ -3178,10 +3354,9 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
memcpy(aeadctx->key, key, keylen);
aeadctx->enckey_len = keylen;
- key_ctx_size = sizeof(struct _key_ctx) +
- ((DIV_ROUND_UP(keylen, 16)) << 4) +
+ key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
AEAD_H_SIZE;
- aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
+ aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
CHCR_KEYCTX_MAC_KEY_SIZE_128,
0, 0,
key_ctx_size >> 4);
@@ -3281,6 +3456,7 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
if (IS_ERR(base_hash)) {
pr_err("chcr : Base driver cannot be loaded\n");
aeadctx->enckey_len = 0;
+ memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
{
@@ -3325,17 +3501,19 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
chcr_change_order(actx->h_iopad, param.result_size);
chcr_change_order(o_ptr, param.result_size);
key_ctx_len = sizeof(struct _key_ctx) +
- ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4) +
+ roundup(keys.enckeylen, 16) +
(param.result_size + align) * 2;
aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
0, 1, key_ctx_len >> 4);
actx->auth_mode = param.auth_mode;
chcr_free_shash(base_hash);
+ memzero_explicit(&keys, sizeof(keys));
return 0;
}
out:
aeadctx->enckey_len = 0;
+ memzero_explicit(&keys, sizeof(keys));
if (!IS_ERR(base_hash))
chcr_free_shash(base_hash);
return -EINVAL;
@@ -3393,15 +3571,16 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
aeadctx->enckey_len << 3);
}
- key_ctx_len = sizeof(struct _key_ctx)
- + ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4);
+ key_ctx_len = sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
0, key_ctx_len >> 4);
actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
+ memzero_explicit(&keys, sizeof(keys));
return 0;
out:
aeadctx->enckey_len = 0;
+ memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
index f263cd42a84f..dba3dff1e209 100644
--- a/drivers/crypto/chelsio/chcr_algo.h
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -258,15 +258,16 @@
#define FILL_CMD_MORE(immdatalen) htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) |\
ULP_TX_SC_MORE_V((immdatalen)))
#define MAX_NK 8
-#define ROUND_16(bytes) ((bytes) & 0xFFFFFFF0)
#define MAX_DSGL_ENT 32
#define MIN_CIPHER_SG 1 /* IV */
#define MIN_AUTH_SG 1 /* IV */
#define MIN_GCM_SG 1 /* IV */
#define MIN_DIGEST_SG 1 /*Partial Buffer*/
#define MIN_CCM_SG 2 /*IV+B0*/
-#define SPACE_LEFT(len) \
- ((SGE_MAX_WR_LEN - WR_MIN_LEN - (len)))
+#define CIP_SPACE_LEFT(len) \
+ ((SGE_MAX_WR_LEN - CIP_WR_MIN_LEN - (len)))
+#define HASH_SPACE_LEFT(len) \
+ ((SGE_MAX_WR_LEN - HASH_WR_MIN_LEN - (len)))
struct algo_param {
unsigned int auth_mode;
@@ -275,12 +276,14 @@ struct algo_param {
};
struct hash_wr_param {
+ struct algo_param alg_prm;
unsigned int opad_needed;
unsigned int more;
unsigned int last;
- struct algo_param alg_prm;
+ unsigned int kctx_len;
unsigned int sg_len;
unsigned int bfr_len;
+ unsigned int hash_size;
u64 scmd1;
};
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index 77056a90c8e1..1a20424e18c6 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -54,10 +54,14 @@
#define MAC_ERROR_BIT 0
#define CHK_MAC_ERR_BIT(x) (((x) >> MAC_ERROR_BIT) & 1)
#define MAX_SALT 4
-#define WR_MIN_LEN (sizeof(struct chcr_wr) + \
+#define CIP_WR_MIN_LEN (sizeof(struct chcr_wr) + \
sizeof(struct cpl_rx_phys_dsgl) + \
sizeof(struct ulptx_sgl))
+#define HASH_WR_MIN_LEN (sizeof(struct chcr_wr) + \
+ DUMMY_BYTES + \
+ sizeof(struct ulptx_sgl))
+
#define padap(dev) pci_get_drvdata(dev->u_ctx->lldi.pdev)
struct uld_ctx;
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 7daf0a17a7d2..c8e8972af283 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -258,21 +258,32 @@ struct chcr_context {
struct __crypto_ctx crypto_ctx[0];
};
-struct chcr_ahash_req_ctx {
+struct chcr_hctx_per_wr {
+ struct scatterlist *srcsg;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ u32 dma_len;
+ unsigned int src_ofst;
+ unsigned int processed;
u32 result;
- u8 bfr1[CHCR_HASH_MAX_BLOCK_SIZE_128];
- u8 bfr2[CHCR_HASH_MAX_BLOCK_SIZE_128];
+ u8 is_sg_map;
+ u8 imm;
+ /*Final callback called. Driver cannot rely on nbytes to decide
+ * final call
+ */
+ u8 isfinal;
+};
+
+struct chcr_ahash_req_ctx {
+ struct chcr_hctx_per_wr hctx_wr;
u8 *reqbfr;
u8 *skbfr;
- dma_addr_t dma_addr;
- u32 dma_len;
+ /* SKB which is being sent to the hardware for processing */
+ u64 data_len; /* Data len till time */
u8 reqlen;
- u8 imm;
- u8 is_sg_map;
u8 partial_hash[CHCR_HASH_MAX_DIGEST_SIZE];
- u64 data_len; /* Data len till time */
- /* SKB which is being sent to the hardware for processing */
- struct sk_buff *skb;
+ u8 bfr1[CHCR_HASH_MAX_BLOCK_SIZE_128];
+ u8 bfr2[CHCR_HASH_MAX_BLOCK_SIZE_128];
};
struct chcr_blkcipher_req_ctx {
diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c
index db1e241104ed..8e0aa3f175c9 100644
--- a/drivers/crypto/chelsio/chcr_ipsec.c
+++ b/drivers/crypto/chelsio/chcr_ipsec.c
@@ -360,8 +360,7 @@ inline void *copy_cpltx_pktxt(struct sk_buff *skb,
cpl = (struct cpl_tx_pkt_core *)pos;
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
+ cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
TXPKT_PF_V(adap->pf);
if (skb_vlan_tag_present(skb)) {
@@ -475,7 +474,7 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb,
wr->req.ulptx.len = htonl(DIV_ROUND_UP(flits, 2) - 1);
/* Sub-command */
- wr->req.sc_imm.cmd_more = FILL_CMD_MORE(immdatalen);
+ wr->req.sc_imm.cmd_more = FILL_CMD_MORE(!immdatalen);
wr->req.sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
sizeof(wr->req.key_ctx) +
kctx_len +
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 225e74a7f724..d4a81be0d7d2 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -235,7 +235,7 @@ static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
/* Configure DMA tx control */
val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
- val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUG;
+ val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUF;
writel(val,
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
@@ -332,7 +332,7 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
- val |= EIP197_HIA_DSE_CFG_ALLWAYS_BUFFERABLE;
+ val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
/* FIXME: instability issues can occur for EIP97 but disabling it impact
* performances.
*/
@@ -354,7 +354,7 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY;
val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC;
val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1;
- val |= EIP197_ALG_SHA2;
+ val |= EIP197_ALG_SHA2 | EIP197_ALG_HMAC_SHA2;
writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN);
/* Command Descriptor Rings prepare */
@@ -432,20 +432,18 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
}
/* Called with ring's lock taken */
-static int safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
- int ring, int reqs)
+static void safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
+ int ring)
{
- int coal = min_t(int, reqs, EIP197_MAX_BATCH_SZ);
+ int coal = min_t(int, priv->ring[ring].requests, EIP197_MAX_BATCH_SZ);
if (!coal)
- return 0;
+ return;
/* Configure when we want an interrupt */
writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
-
- return coal;
}
void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
@@ -490,6 +488,15 @@ handle_req:
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
+ /* In case the send() helper did not issue any command to push
+ * to the engine because the input data was cached, continue to
+ * dequeue other requests as this is valid and not an error.
+ */
+ if (!commands && !results) {
+ kfree(request);
+ continue;
+ }
+
spin_lock_bh(&priv->ring[ring].egress_lock);
list_add_tail(&request->list, &priv->ring[ring].list);
spin_unlock_bh(&priv->ring[ring].egress_lock);
@@ -512,14 +519,13 @@ finalize:
spin_lock_bh(&priv->ring[ring].egress_lock);
+ priv->ring[ring].requests += nreq;
+
if (!priv->ring[ring].busy) {
- nreq -= safexcel_try_push_requests(priv, ring, nreq);
- if (nreq)
- priv->ring[ring].busy = true;
+ safexcel_try_push_requests(priv, ring);
+ priv->ring[ring].busy = true;
}
- priv->ring[ring].requests_left += nreq;
-
spin_unlock_bh(&priv->ring[ring].egress_lock);
/* let the RDR know we have pending descriptors */
@@ -531,25 +537,6 @@ finalize:
EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
}
-void safexcel_free_context(struct safexcel_crypto_priv *priv,
- struct crypto_async_request *req,
- int result_sz)
-{
- struct safexcel_context *ctx = crypto_tfm_ctx(req->tfm);
-
- if (ctx->result_dma)
- dma_unmap_single(priv->dev, ctx->result_dma, result_sz,
- DMA_FROM_DEVICE);
-
- if (ctx->cache) {
- dma_unmap_single(priv->dev, ctx->cache_dma, ctx->cache_sz,
- DMA_TO_DEVICE);
- kfree(ctx->cache);
- ctx->cache = NULL;
- ctx->cache_sz = 0;
- }
-}
-
void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
{
struct safexcel_command_desc *cdesc;
@@ -623,7 +610,7 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
{
struct safexcel_request *sreq;
struct safexcel_context *ctx;
- int ret, i, nreq, ndesc, tot_descs, done;
+ int ret, i, nreq, ndesc, tot_descs, handled = 0;
bool should_complete;
handle_results:
@@ -659,6 +646,7 @@ handle_results:
kfree(sreq);
tot_descs += ndesc;
+ handled++;
}
acknowledge:
@@ -677,11 +665,10 @@ acknowledge:
requests_left:
spin_lock_bh(&priv->ring[ring].egress_lock);
- done = safexcel_try_push_requests(priv, ring,
- priv->ring[ring].requests_left);
+ priv->ring[ring].requests -= handled;
+ safexcel_try_push_requests(priv, ring);
- priv->ring[ring].requests_left -= done;
- if (!done && !priv->ring[ring].requests_left)
+ if (!priv->ring[ring].requests)
priv->ring[ring].busy = false;
spin_unlock_bh(&priv->ring[ring].egress_lock);
@@ -781,6 +768,8 @@ static struct safexcel_alg_template *safexcel_algs[] = {
&safexcel_alg_sha224,
&safexcel_alg_sha256,
&safexcel_alg_hmac_sha1,
+ &safexcel_alg_hmac_sha224,
+ &safexcel_alg_hmac_sha256,
};
static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
@@ -894,29 +883,44 @@ static int safexcel_probe(struct platform_device *pdev)
return PTR_ERR(priv->base);
}
- priv->clk = of_clk_get(dev->of_node, 0);
- if (!IS_ERR(priv->clk)) {
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ ret = PTR_ERR_OR_ZERO(priv->clk);
+ /* The clock isn't mandatory */
+ if (ret != -ENOENT) {
+ if (ret)
+ return ret;
+
ret = clk_prepare_enable(priv->clk);
if (ret) {
dev_err(dev, "unable to enable clk (%d)\n", ret);
return ret;
}
- } else {
- /* The clock isn't mandatory */
- if (PTR_ERR(priv->clk) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ }
+
+ priv->reg_clk = devm_clk_get(&pdev->dev, "reg");
+ ret = PTR_ERR_OR_ZERO(priv->reg_clk);
+ /* The clock isn't mandatory */
+ if (ret != -ENOENT) {
+ if (ret)
+ goto err_core_clk;
+
+ ret = clk_prepare_enable(priv->reg_clk);
+ if (ret) {
+ dev_err(dev, "unable to enable reg clk (%d)\n", ret);
+ goto err_core_clk;
+ }
}
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret)
- goto err_clk;
+ goto err_reg_clk;
priv->context_pool = dmam_pool_create("safexcel-context", dev,
sizeof(struct safexcel_context_record),
1, 0);
if (!priv->context_pool) {
ret = -ENOMEM;
- goto err_clk;
+ goto err_reg_clk;
}
safexcel_configure(priv);
@@ -931,12 +935,12 @@ static int safexcel_probe(struct platform_device *pdev)
&priv->ring[i].cdr,
&priv->ring[i].rdr);
if (ret)
- goto err_clk;
+ goto err_reg_clk;
ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
if (!ring_irq) {
ret = -ENOMEM;
- goto err_clk;
+ goto err_reg_clk;
}
ring_irq->priv = priv;
@@ -948,7 +952,7 @@ static int safexcel_probe(struct platform_device *pdev)
ring_irq);
if (irq < 0) {
ret = irq;
- goto err_clk;
+ goto err_reg_clk;
}
priv->ring[i].work_data.priv = priv;
@@ -959,10 +963,10 @@ static int safexcel_probe(struct platform_device *pdev)
priv->ring[i].workqueue = create_singlethread_workqueue(wq_name);
if (!priv->ring[i].workqueue) {
ret = -ENOMEM;
- goto err_clk;
+ goto err_reg_clk;
}
- priv->ring[i].requests_left = 0;
+ priv->ring[i].requests = 0;
priv->ring[i].busy = false;
crypto_init_queue(&priv->ring[i].queue,
@@ -980,18 +984,20 @@ static int safexcel_probe(struct platform_device *pdev)
ret = safexcel_hw_init(priv);
if (ret) {
dev_err(dev, "EIP h/w init failed (%d)\n", ret);
- goto err_clk;
+ goto err_reg_clk;
}
ret = safexcel_register_algorithms(priv);
if (ret) {
dev_err(dev, "Failed to register algorithms (%d)\n", ret);
- goto err_clk;
+ goto err_reg_clk;
}
return 0;
-err_clk:
+err_reg_clk:
+ clk_disable_unprepare(priv->reg_clk);
+err_core_clk:
clk_disable_unprepare(priv->clk);
return ret;
}
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 4e219c21608b..b470a849721f 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -135,7 +135,7 @@
/* EIP197_HIA_xDR_DMA_CFG */
#define EIP197_HIA_xDR_WR_RES_BUF BIT(22)
-#define EIP197_HIA_xDR_WR_CTRL_BUG BIT(23)
+#define EIP197_HIA_xDR_WR_CTRL_BUF BIT(23)
#define EIP197_HIA_xDR_WR_OWN_BUF BIT(24)
#define EIP197_HIA_xDR_CFG_WR_CACHE(n) (((n) & 0x7) << 25)
#define EIP197_HIA_xDR_CFG_RD_CACHE(n) (((n) & 0x7) << 29)
@@ -179,7 +179,7 @@
#define EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(n) ((n) << 0)
#define EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(n) (((n) & 0x7) << 4)
#define EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(n) ((n) << 8)
-#define EIP197_HIA_DSE_CFG_ALLWAYS_BUFFERABLE GENMASK(15, 14)
+#define EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE GENMASK(15, 14)
#define EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(n) ((n) << 16)
#define EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(n) (((n) & 0x7) << 20)
#define EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(n) ((n) << 24)
@@ -525,6 +525,7 @@ struct safexcel_crypto_priv {
void __iomem *base;
struct device *dev;
struct clk *clk;
+ struct clk *reg_clk;
struct safexcel_config config;
enum safexcel_eip_version version;
@@ -551,10 +552,8 @@ struct safexcel_crypto_priv {
struct crypto_queue queue;
spinlock_t queue_lock;
- /* Number of requests in the engine that needs the threshold
- * interrupt to be set up.
- */
- int requests_left;
+ /* Number of requests in the engine. */
+ int requests;
/* The ring is currently handling at least one request */
bool busy;
@@ -580,12 +579,6 @@ struct safexcel_context {
int ring;
bool needs_inv;
bool exit_inv;
-
- /* Used for ahash requests */
- dma_addr_t result_dma;
- void *cache;
- dma_addr_t cache_dma;
- unsigned int cache_sz;
};
/*
@@ -609,9 +602,6 @@ struct safexcel_inv_result {
void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring);
void safexcel_complete(struct safexcel_crypto_priv *priv, int ring);
-void safexcel_free_context(struct safexcel_crypto_priv *priv,
- struct crypto_async_request *req,
- int result_sz);
int safexcel_invalidate_cache(struct crypto_async_request *async,
struct safexcel_crypto_priv *priv,
dma_addr_t ctxr_dma, int ring,
@@ -643,5 +633,7 @@ extern struct safexcel_alg_template safexcel_alg_sha1;
extern struct safexcel_alg_template safexcel_alg_sha224;
extern struct safexcel_alg_template safexcel_alg_sha256;
extern struct safexcel_alg_template safexcel_alg_hmac_sha1;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha224;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha256;
#endif
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index 63a8768ed2ae..bafb60505fab 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -58,7 +58,8 @@ static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx,
token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
token[0].packet_length = length;
- token[0].stat = EIP197_TOKEN_STAT_LAST_PACKET;
+ token[0].stat = EIP197_TOKEN_STAT_LAST_PACKET |
+ EIP197_TOKEN_STAT_LAST_HASH;
token[0].instructions = EIP197_TOKEN_INS_LAST |
EIP197_TOKEN_INS_TYPE_CRYTO |
EIP197_TOKEN_INS_TYPE_OUTPUT;
@@ -456,7 +457,7 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
queue_work(priv->ring[ring].workqueue,
&priv->ring[ring].work_data.work);
- wait_for_completion_interruptible(&result.completion);
+ wait_for_completion(&result.completion);
if (result.error) {
dev_warn(priv->dev,
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 122a2a58e98f..317b9e480312 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -21,10 +21,9 @@ struct safexcel_ahash_ctx {
struct safexcel_crypto_priv *priv;
u32 alg;
- u32 digest;
- u32 ipad[SHA1_DIGEST_SIZE / sizeof(u32)];
- u32 opad[SHA1_DIGEST_SIZE / sizeof(u32)];
+ u32 ipad[SHA256_DIGEST_SIZE / sizeof(u32)];
+ u32 opad[SHA256_DIGEST_SIZE / sizeof(u32)];
};
struct safexcel_ahash_req {
@@ -34,6 +33,9 @@ struct safexcel_ahash_req {
bool needs_inv;
int nents;
+ dma_addr_t result_dma;
+
+ u32 digest;
u8 state_sz; /* expected sate size, only set once */
u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
@@ -42,6 +44,9 @@ struct safexcel_ahash_req {
u64 processed;
u8 cache[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
+ dma_addr_t cache_dma;
+ unsigned int cache_sz;
+
u8 cache_next[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
};
@@ -49,6 +54,8 @@ struct safexcel_ahash_export_state {
u64 len;
u64 processed;
+ u32 digest;
+
u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
u8 cache[SHA256_BLOCK_SIZE];
};
@@ -82,9 +89,9 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_OUT;
cdesc->control_data.control0 |= ctx->alg;
- cdesc->control_data.control0 |= ctx->digest;
+ cdesc->control_data.control0 |= req->digest;
- if (ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
+ if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
if (req->processed) {
if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(6);
@@ -112,12 +119,12 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
if (req->finish)
ctx->base.ctxr->data[i] = cpu_to_le32(req->processed / blocksize);
}
- } else if (ctx->digest == CONTEXT_CONTROL_DIGEST_HMAC) {
- cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(10);
+ } else if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) {
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(2 * req->state_sz / sizeof(u32));
- memcpy(ctx->base.ctxr->data, ctx->ipad, digestsize);
- memcpy(ctx->base.ctxr->data + digestsize / sizeof(u32),
- ctx->opad, digestsize);
+ memcpy(ctx->base.ctxr->data, ctx->ipad, req->state_sz);
+ memcpy(ctx->base.ctxr->data + req->state_sz / sizeof(u32),
+ ctx->opad, req->state_sz);
}
}
@@ -149,16 +156,26 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
safexcel_complete(priv, ring);
spin_unlock_bh(&priv->ring[ring].egress_lock);
- if (sreq->finish)
- memcpy(areq->result, sreq->state,
- crypto_ahash_digestsize(ahash));
-
if (sreq->nents) {
dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
sreq->nents = 0;
}
- safexcel_free_context(priv, async, sreq->state_sz);
+ if (sreq->result_dma) {
+ dma_unmap_single(priv->dev, sreq->result_dma, sreq->state_sz,
+ DMA_FROM_DEVICE);
+ sreq->result_dma = 0;
+ }
+
+ if (sreq->cache_dma) {
+ dma_unmap_single(priv->dev, sreq->cache_dma, sreq->cache_sz,
+ DMA_TO_DEVICE);
+ sreq->cache_dma = 0;
+ }
+
+ if (sreq->finish)
+ memcpy(areq->result, sreq->state,
+ crypto_ahash_digestsize(ahash));
cache_len = sreq->len - sreq->processed;
if (cache_len)
@@ -184,7 +201,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
queued = len = req->len - req->processed;
- if (queued < crypto_ahash_blocksize(ahash))
+ if (queued <= crypto_ahash_blocksize(ahash))
cache_len = queued;
else
cache_len = queued - areq->nbytes;
@@ -198,7 +215,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
/* If this is not the last request and the queued data
* is a multiple of a block, cache the last one for now.
*/
- extra = queued - crypto_ahash_blocksize(ahash);
+ extra = crypto_ahash_blocksize(ahash);
if (extra) {
sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
@@ -220,24 +237,17 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
/* Add a command descriptor for the cached data, if any */
if (cache_len) {
- ctx->base.cache = kzalloc(cache_len, EIP197_GFP_FLAGS(*async));
- if (!ctx->base.cache) {
- ret = -ENOMEM;
- goto unlock;
- }
- memcpy(ctx->base.cache, req->cache, cache_len);
- ctx->base.cache_dma = dma_map_single(priv->dev, ctx->base.cache,
- cache_len, DMA_TO_DEVICE);
- if (dma_mapping_error(priv->dev, ctx->base.cache_dma)) {
- ret = -EINVAL;
- goto free_cache;
+ req->cache_dma = dma_map_single(priv->dev, req->cache,
+ cache_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->dev, req->cache_dma)) {
+ spin_unlock_bh(&priv->ring[ring].egress_lock);
+ return -EINVAL;
}
- ctx->base.cache_sz = cache_len;
+ req->cache_sz = cache_len;
first_cdesc = safexcel_add_cdesc(priv, ring, 1,
(cache_len == len),
- ctx->base.cache_dma,
- cache_len, len,
+ req->cache_dma, cache_len, len,
ctx->base.ctxr_dma);
if (IS_ERR(first_cdesc)) {
ret = PTR_ERR(first_cdesc);
@@ -271,7 +281,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
sglen, len, ctx->base.ctxr_dma);
if (IS_ERR(cdesc)) {
ret = PTR_ERR(cdesc);
- goto cdesc_rollback;
+ goto unmap_sg;
}
n_cdesc++;
@@ -291,19 +301,19 @@ send_command:
/* Add the token */
safexcel_hash_token(first_cdesc, len, req->state_sz);
- ctx->base.result_dma = dma_map_single(priv->dev, req->state,
- req->state_sz, DMA_FROM_DEVICE);
- if (dma_mapping_error(priv->dev, ctx->base.result_dma)) {
+ req->result_dma = dma_map_single(priv->dev, req->state, req->state_sz,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(priv->dev, req->result_dma)) {
ret = -EINVAL;
- goto cdesc_rollback;
+ goto unmap_sg;
}
/* Add a result descriptor */
- rdesc = safexcel_add_rdesc(priv, ring, 1, 1, ctx->base.result_dma,
+ rdesc = safexcel_add_rdesc(priv, ring, 1, 1, req->result_dma,
req->state_sz);
if (IS_ERR(rdesc)) {
ret = PTR_ERR(rdesc);
- goto cdesc_rollback;
+ goto unmap_result;
}
spin_unlock_bh(&priv->ring[ring].egress_lock);
@@ -315,20 +325,21 @@ send_command:
*results = 1;
return 0;
+unmap_result:
+ dma_unmap_single(priv->dev, req->result_dma, req->state_sz,
+ DMA_FROM_DEVICE);
+unmap_sg:
+ dma_unmap_sg(priv->dev, areq->src, req->nents, DMA_TO_DEVICE);
cdesc_rollback:
for (i = 0; i < n_cdesc; i++)
safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
unmap_cache:
- if (ctx->base.cache_dma) {
- dma_unmap_single(priv->dev, ctx->base.cache_dma,
- ctx->base.cache_sz, DMA_TO_DEVICE);
- ctx->base.cache_sz = 0;
+ if (req->cache_dma) {
+ dma_unmap_single(priv->dev, req->cache_dma, req->cache_sz,
+ DMA_TO_DEVICE);
+ req->cache_sz = 0;
}
-free_cache:
- kfree(ctx->base.cache);
- ctx->base.cache = NULL;
-unlock:
spin_unlock_bh(&priv->ring[ring].egress_lock);
return ret;
}
@@ -493,7 +504,7 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
queue_work(priv->ring[ring].workqueue,
&priv->ring[ring].work_data.work);
- wait_for_completion_interruptible(&result.completion);
+ wait_for_completion(&result.completion);
if (result.error) {
dev_warn(priv->dev, "hash: completion error (%d)\n",
@@ -550,7 +561,7 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
if (ctx->base.ctxr) {
if (priv->version == EIP197 &&
!ctx->base.needs_inv && req->processed &&
- ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
+ req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
/* We're still setting needs_inv here, even though it is
* cleared right away, because the needs_inv flag can be
* set in other functions and we want to keep the same
@@ -585,7 +596,6 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
static int safexcel_ahash_update(struct ahash_request *areq)
{
- struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
@@ -601,7 +611,7 @@ static int safexcel_ahash_update(struct ahash_request *areq)
* We're not doing partial updates when performing an hmac request.
* Everything will be handled by the final() call.
*/
- if (ctx->digest == CONTEXT_CONTROL_DIGEST_HMAC)
+ if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
return 0;
if (req->hmac)
@@ -660,6 +670,8 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out)
export->len = req->len;
export->processed = req->processed;
+ export->digest = req->digest;
+
memcpy(export->state, req->state, req->state_sz);
memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash));
@@ -680,6 +692,8 @@ static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
req->len = export->len;
req->processed = export->processed;
+ req->digest = export->digest;
+
memcpy(req->cache, export->cache, crypto_ahash_blocksize(ahash));
memcpy(req->state, export->state, req->state_sz);
@@ -716,7 +730,7 @@ static int safexcel_sha1_init(struct ahash_request *areq)
req->state[4] = SHA1_H4;
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
- ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA1_DIGEST_SIZE;
return 0;
@@ -783,10 +797,10 @@ struct safexcel_alg_template safexcel_alg_sha1 = {
static int safexcel_hmac_sha1_init(struct ahash_request *areq)
{
- struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
safexcel_sha1_init(areq);
- ctx->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+ req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
return 0;
}
@@ -839,7 +853,7 @@ static int safexcel_hmac_init_pad(struct ahash_request *areq,
init_completion(&result.completion);
ret = crypto_ahash_digest(areq);
- if (ret == -EINPROGRESS) {
+ if (ret == -EINPROGRESS || ret == -EBUSY) {
wait_for_completion_interruptible(&result.completion);
ret = result.error;
}
@@ -949,20 +963,21 @@ free_ahash:
return ret;
}
-static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
- unsigned int keylen)
+static int safexcel_hmac_alg_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen, const char *alg,
+ unsigned int state_sz)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
struct safexcel_crypto_priv *priv = ctx->priv;
struct safexcel_ahash_export_state istate, ostate;
int ret, i;
- ret = safexcel_hmac_setkey("safexcel-sha1", key, keylen, &istate, &ostate);
+ ret = safexcel_hmac_setkey(alg, key, keylen, &istate, &ostate);
if (ret)
return ret;
if (priv->version == EIP197 && ctx->base.ctxr) {
- for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
+ for (i = 0; i < state_sz / sizeof(u32); i++) {
if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
ctx->base.needs_inv = true;
@@ -971,12 +986,19 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
}
}
- memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE);
- memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE);
+ memcpy(ctx->ipad, &istate.state, state_sz);
+ memcpy(ctx->opad, &ostate.state, state_sz);
return 0;
}
+static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha1",
+ SHA1_DIGEST_SIZE);
+}
+
struct safexcel_alg_template safexcel_alg_hmac_sha1 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
.alg.ahash = {
@@ -1024,7 +1046,7 @@ static int safexcel_sha256_init(struct ahash_request *areq)
req->state[7] = SHA256_H7;
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
- ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA256_DIGEST_SIZE;
return 0;
@@ -1086,7 +1108,7 @@ static int safexcel_sha224_init(struct ahash_request *areq)
req->state[7] = SHA224_H7;
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
- ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
req->state_sz = SHA256_DIGEST_SIZE;
return 0;
@@ -1130,3 +1152,115 @@ struct safexcel_alg_template safexcel_alg_sha224 = {
},
},
};
+
+static int safexcel_hmac_sha224_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha224",
+ SHA256_DIGEST_SIZE);
+}
+
+static int safexcel_hmac_sha224_init(struct ahash_request *areq)
+{
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ safexcel_sha224_init(areq);
+ req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+ return 0;
+}
+
+static int safexcel_hmac_sha224_digest(struct ahash_request *areq)
+{
+ int ret = safexcel_hmac_sha224_init(areq);
+
+ if (ret)
+ return ret;
+
+ return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sha224 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .alg.ahash = {
+ .init = safexcel_hmac_sha224_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_hmac_sha224_digest,
+ .setkey = safexcel_hmac_sha224_setkey,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "hmac(sha224)",
+ .cra_driver_name = "safexcel-hmac-sha224",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_ahash_cra_init,
+ .cra_exit = safexcel_ahash_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_hmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha256",
+ SHA256_DIGEST_SIZE);
+}
+
+static int safexcel_hmac_sha256_init(struct ahash_request *areq)
+{
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ safexcel_sha256_init(areq);
+ req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+ return 0;
+}
+
+static int safexcel_hmac_sha256_digest(struct ahash_request *areq)
+{
+ int ret = safexcel_hmac_sha256_init(areq);
+
+ if (ret)
+ return ret;
+
+ return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sha256 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .alg.ahash = {
+ .init = safexcel_hmac_sha256_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_hmac_sha256_digest,
+ .setkey = safexcel_hmac_sha256_setkey,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "hmac(sha256)",
+ .cra_driver_name = "safexcel-hmac-sha256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_ahash_cra_init,
+ .cra_exit = safexcel_ahash_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 717a26607bdb..27f7dad2d45d 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -1167,9 +1167,11 @@ static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
ctx->authkey_len = keys.authkeylen;
ctx->enckey_len = keys.enckeylen;
+ memzero_explicit(&keys, sizeof(keys));
return aead_setup(tfm, crypto_aead_authsize(tfm));
badkey:
crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index aca2373fa1de..f81fa4a3e66b 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -25,7 +25,6 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/dma-direct.h> /* XXX: drivers shall never use this directly! */
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_platform.h>
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index 764be3e6933c..a10c418d4e5c 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -759,6 +759,16 @@ static int dcp_sha_digest(struct ahash_request *req)
return dcp_sha_finup(req);
}
+static int dcp_sha_noimport(struct ahash_request *req, const void *in)
+{
+ return -ENOSYS;
+}
+
+static int dcp_sha_noexport(struct ahash_request *req, void *out)
+{
+ return -ENOSYS;
+}
+
static int dcp_sha_cra_init(struct crypto_tfm *tfm)
{
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
@@ -829,6 +839,8 @@ static struct ahash_alg dcp_sha1_alg = {
.final = dcp_sha_final,
.finup = dcp_sha_finup,
.digest = dcp_sha_digest,
+ .import = dcp_sha_noimport,
+ .export = dcp_sha_noexport,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
.base = {
@@ -853,6 +865,8 @@ static struct ahash_alg dcp_sha256_alg = {
.final = dcp_sha_final,
.finup = dcp_sha_finup,
.digest = dcp_sha_digest,
+ .import = dcp_sha_noimport,
+ .export = dcp_sha_noexport,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
.base = {
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 662e709812cc..80e9c842aad4 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -359,6 +359,16 @@ static int n2_hash_async_finup(struct ahash_request *req)
return crypto_ahash_finup(&rctx->fallback_req);
}
+static int n2_hash_async_noimport(struct ahash_request *req, const void *in)
+{
+ return -ENOSYS;
+}
+
+static int n2_hash_async_noexport(struct ahash_request *req, void *out)
+{
+ return -ENOSYS;
+}
+
static int n2_hash_cra_init(struct crypto_tfm *tfm)
{
const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
@@ -1467,6 +1477,8 @@ static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
ahash->final = n2_hash_async_final;
ahash->finup = n2_hash_async_finup;
ahash->digest = n2_hash_async_digest;
+ ahash->export = n2_hash_async_noexport;
+ ahash->import = n2_hash_async_noimport;
halg = &ahash->halg;
halg->digestsize = tmpl->digest_size;
diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c
index bf52cd1d7fca..66869976cfa2 100644
--- a/drivers/crypto/nx/nx-842-pseries.c
+++ b/drivers/crypto/nx/nx-842-pseries.c
@@ -1105,10 +1105,9 @@ static int __init nx842_pseries_init(void)
RCU_INIT_POINTER(devdata, NULL);
new_devdata = kzalloc(sizeof(*new_devdata), GFP_KERNEL);
- if (!new_devdata) {
- pr_err("Could not allocate memory for device data\n");
+ if (!new_devdata)
return -ENOMEM;
- }
+
RCU_INIT_POINTER(devdata, new_devdata);
ret = vio_register_driver(&nx842_vio_driver);
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index fbec0a2e76dd..9019f6b67986 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -47,6 +47,8 @@
static LIST_HEAD(dev_list);
static DEFINE_SPINLOCK(list_lock);
+static int aes_fallback_sz = 200;
+
#ifdef DEBUG
#define omap_aes_read(dd, offset) \
({ \
@@ -388,7 +390,7 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
pr_debug("err: %d\n", err);
- crypto_finalize_cipher_request(dd->engine, req, err);
+ crypto_finalize_ablkcipher_request(dd->engine, req, err);
pm_runtime_mark_last_busy(dd->dev);
pm_runtime_put_autosuspend(dd->dev);
@@ -408,14 +410,15 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
struct ablkcipher_request *req)
{
if (req)
- return crypto_transfer_cipher_request_to_engine(dd->engine, req);
+ return crypto_transfer_ablkcipher_request_to_engine(dd->engine, req);
return 0;
}
static int omap_aes_prepare_req(struct crypto_engine *engine,
- struct ablkcipher_request *req)
+ void *areq)
{
+ struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base);
struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
crypto_ablkcipher_reqtfm(req));
struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
@@ -468,8 +471,9 @@ static int omap_aes_prepare_req(struct crypto_engine *engine,
}
static int omap_aes_crypt_req(struct crypto_engine *engine,
- struct ablkcipher_request *req)
+ void *areq)
{
+ struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base);
struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
struct omap_aes_dev *dd = rctx->dd;
@@ -517,7 +521,7 @@ static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
!!(mode & FLAGS_ENCRYPT),
!!(mode & FLAGS_CBC));
- if (req->nbytes < 200) {
+ if (req->nbytes < aes_fallback_sz) {
SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
skcipher_request_set_tfm(subreq, ctx->fallback);
@@ -601,6 +605,11 @@ static int omap_aes_ctr_decrypt(struct ablkcipher_request *req)
return omap_aes_crypt(req, FLAGS_CTR);
}
+static int omap_aes_prepare_req(struct crypto_engine *engine,
+ void *req);
+static int omap_aes_crypt_req(struct crypto_engine *engine,
+ void *req);
+
static int omap_aes_cra_init(struct crypto_tfm *tfm)
{
const char *name = crypto_tfm_alg_name(tfm);
@@ -616,6 +625,10 @@ static int omap_aes_cra_init(struct crypto_tfm *tfm)
tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx);
+ ctx->enginectx.op.prepare_request = omap_aes_prepare_req;
+ ctx->enginectx.op.unprepare_request = NULL;
+ ctx->enginectx.op.do_one_request = omap_aes_crypt_req;
+
return 0;
}
@@ -1029,6 +1042,87 @@ err:
return err;
}
+static ssize_t fallback_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", aes_fallback_sz);
+}
+
+static ssize_t fallback_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ ssize_t status;
+ long value;
+
+ status = kstrtol(buf, 0, &value);
+ if (status)
+ return status;
+
+ /* HW accelerator only works with buffers > 9 */
+ if (value < 9) {
+ dev_err(dev, "minimum fallback size 9\n");
+ return -EINVAL;
+ }
+
+ aes_fallback_sz = value;
+
+ return size;
+}
+
+static ssize_t queue_len_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct omap_aes_dev *dd = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", dd->engine->queue.max_qlen);
+}
+
+static ssize_t queue_len_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t size)
+{
+ struct omap_aes_dev *dd;
+ ssize_t status;
+ long value;
+ unsigned long flags;
+
+ status = kstrtol(buf, 0, &value);
+ if (status)
+ return status;
+
+ if (value < 1)
+ return -EINVAL;
+
+ /*
+ * Changing the queue size in fly is safe, if size becomes smaller
+ * than current size, it will just not accept new entries until
+ * it has shrank enough.
+ */
+ spin_lock_bh(&list_lock);
+ list_for_each_entry(dd, &dev_list, list) {
+ spin_lock_irqsave(&dd->lock, flags);
+ dd->engine->queue.max_qlen = value;
+ dd->aead_queue.base.max_qlen = value;
+ spin_unlock_irqrestore(&dd->lock, flags);
+ }
+ spin_unlock_bh(&list_lock);
+
+ return size;
+}
+
+static DEVICE_ATTR_RW(queue_len);
+static DEVICE_ATTR_RW(fallback);
+
+static struct attribute *omap_aes_attrs[] = {
+ &dev_attr_queue_len.attr,
+ &dev_attr_fallback.attr,
+ NULL,
+};
+
+static struct attribute_group omap_aes_attr_group = {
+ .attrs = omap_aes_attrs,
+};
+
static int omap_aes_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1119,8 +1213,6 @@ static int omap_aes_probe(struct platform_device *pdev)
goto err_engine;
}
- dd->engine->prepare_cipher_request = omap_aes_prepare_req;
- dd->engine->cipher_one_request = omap_aes_crypt_req;
err = crypto_engine_start(dd->engine);
if (err)
goto err_engine;
@@ -1159,6 +1251,12 @@ static int omap_aes_probe(struct platform_device *pdev)
}
}
+ err = sysfs_create_group(&dev->kobj, &omap_aes_attr_group);
+ if (err) {
+ dev_err(dev, "could not create sysfs device attrs\n");
+ goto err_aead_algs;
+ }
+
return 0;
err_aead_algs:
for (i = dd->pdata->aead_algs_info->registered - 1; i >= 0; i--) {
diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h
index 8906342e2b9a..fc3b46a85809 100644
--- a/drivers/crypto/omap-aes.h
+++ b/drivers/crypto/omap-aes.h
@@ -13,6 +13,8 @@
#ifndef __OMAP_AES_H__
#define __OMAP_AES_H__
+#include <crypto/engine.h>
+
#define DST_MAXBURST 4
#define DMA_MIN (DST_MAXBURST * sizeof(u32))
@@ -95,6 +97,7 @@ struct omap_aes_gcm_result {
};
struct omap_aes_ctx {
+ struct crypto_engine_ctx enginectx;
int keylen;
u32 key[AES_KEYSIZE_256 / sizeof(u32)];
u8 nonce[4];
diff --git a/drivers/crypto/omap-crypto.c b/drivers/crypto/omap-crypto.c
index 23e37779317e..2c42e4b4a6e9 100644
--- a/drivers/crypto/omap-crypto.c
+++ b/drivers/crypto/omap-crypto.c
@@ -104,6 +104,10 @@ static int omap_crypto_check_sg(struct scatterlist *sg, int total, int bs,
return OMAP_CRYPTO_NOT_ALIGNED;
if (!IS_ALIGNED(sg->length, bs))
return OMAP_CRYPTO_NOT_ALIGNED;
+#ifdef CONFIG_ZONE_DMA
+ if (page_zonenum(sg_page(sg)) != ZONE_DMA)
+ return OMAP_CRYPTO_NOT_ALIGNED;
+#endif
len += sg->length;
sg = sg_next(sg);
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index ebc5c0f11f03..eb95b0d7f184 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -86,6 +86,7 @@
#define FLAGS_OUT_DATA_ST_SHIFT 10
struct omap_des_ctx {
+ struct crypto_engine_ctx enginectx;
struct omap_des_dev *dd;
int keylen;
@@ -498,7 +499,7 @@ static void omap_des_finish_req(struct omap_des_dev *dd, int err)
pr_debug("err: %d\n", err);
- crypto_finalize_cipher_request(dd->engine, req, err);
+ crypto_finalize_ablkcipher_request(dd->engine, req, err);
pm_runtime_mark_last_busy(dd->dev);
pm_runtime_put_autosuspend(dd->dev);
@@ -520,14 +521,15 @@ static int omap_des_handle_queue(struct omap_des_dev *dd,
struct ablkcipher_request *req)
{
if (req)
- return crypto_transfer_cipher_request_to_engine(dd->engine, req);
+ return crypto_transfer_ablkcipher_request_to_engine(dd->engine, req);
return 0;
}
static int omap_des_prepare_req(struct crypto_engine *engine,
- struct ablkcipher_request *req)
+ void *areq)
{
+ struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base);
struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(
crypto_ablkcipher_reqtfm(req));
struct omap_des_dev *dd = omap_des_find_dev(ctx);
@@ -582,8 +584,9 @@ static int omap_des_prepare_req(struct crypto_engine *engine,
}
static int omap_des_crypt_req(struct crypto_engine *engine,
- struct ablkcipher_request *req)
+ void *areq)
{
+ struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base);
struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(
crypto_ablkcipher_reqtfm(req));
struct omap_des_dev *dd = omap_des_find_dev(ctx);
@@ -695,12 +698,23 @@ static int omap_des_cbc_decrypt(struct ablkcipher_request *req)
return omap_des_crypt(req, FLAGS_CBC);
}
+static int omap_des_prepare_req(struct crypto_engine *engine,
+ void *areq);
+static int omap_des_crypt_req(struct crypto_engine *engine,
+ void *areq);
+
static int omap_des_cra_init(struct crypto_tfm *tfm)
{
+ struct omap_des_ctx *ctx = crypto_tfm_ctx(tfm);
+
pr_debug("enter\n");
tfm->crt_ablkcipher.reqsize = sizeof(struct omap_des_reqctx);
+ ctx->enginectx.op.prepare_request = omap_des_prepare_req;
+ ctx->enginectx.op.unprepare_request = NULL;
+ ctx->enginectx.op.do_one_request = omap_des_crypt_req;
+
return 0;
}
@@ -1046,8 +1060,6 @@ static int omap_des_probe(struct platform_device *pdev)
goto err_engine;
}
- dd->engine->prepare_cipher_request = omap_des_prepare_req;
- dd->engine->cipher_one_request = omap_des_crypt_req;
err = crypto_engine_start(dd->engine);
if (err)
goto err_engine;
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 86b89ace836f..ad02aa63b519 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -229,6 +229,7 @@ struct omap_sham_dev {
u8 xmit_buf[BUFLEN] OMAP_ALIGNED;
unsigned long flags;
+ int fallback_sz;
struct crypto_queue queue;
struct ahash_request *req;
@@ -759,6 +760,13 @@ static int omap_sham_align_sgs(struct scatterlist *sg,
while (nbytes > 0 && sg_tmp) {
n++;
+#ifdef CONFIG_ZONE_DMA
+ if (page_zonenum(sg_page(sg_tmp)) != ZONE_DMA) {
+ aligned = false;
+ break;
+ }
+#endif
+
if (offset < sg_tmp->length) {
if (!IS_ALIGNED(offset + sg_tmp->offset, 4)) {
aligned = false;
@@ -809,9 +817,6 @@ static int omap_sham_prepare_request(struct ahash_request *req, bool update)
bool final = rctx->flags & BIT(FLAGS_FINUP);
int xmit_len, hash_later;
- if (!req)
- return 0;
-
bs = get_block_size(rctx);
if (update)
@@ -1002,7 +1007,7 @@ static int omap_sham_update_req(struct omap_sham_dev *dd)
ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0);
if (ctx->total < get_block_size(ctx) ||
- ctx->total < OMAP_SHA_DMA_THRESHOLD)
+ ctx->total < dd->fallback_sz)
ctx->flags |= BIT(FLAGS_CPU);
if (ctx->flags & BIT(FLAGS_CPU))
@@ -1258,11 +1263,11 @@ static int omap_sham_final(struct ahash_request *req)
/*
* OMAP HW accel works only with buffers >= 9.
* HMAC is always >= 9 because ipad == block size.
- * If buffersize is less than DMA_THRESHOLD, we use fallback
+ * If buffersize is less than fallback_sz, we use fallback
* SW encoding, as using DMA + HW in this case doesn't provide
* any benefit.
*/
- if (!ctx->digcnt && ctx->bufcnt < OMAP_SHA_DMA_THRESHOLD)
+ if (!ctx->digcnt && ctx->bufcnt < ctx->dd->fallback_sz)
return omap_sham_final_shash(req);
else if (ctx->bufcnt)
return omap_sham_enqueue(req, OP_FINAL);
@@ -1761,7 +1766,7 @@ static void omap_sham_done_task(unsigned long data)
if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
/* hash or semi-hash ready */
clear_bit(FLAGS_DMA_READY, &dd->flags);
- goto finish;
+ goto finish;
}
}
@@ -2013,6 +2018,85 @@ err:
return err;
}
+static ssize_t fallback_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct omap_sham_dev *dd = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", dd->fallback_sz);
+}
+
+static ssize_t fallback_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct omap_sham_dev *dd = dev_get_drvdata(dev);
+ ssize_t status;
+ long value;
+
+ status = kstrtol(buf, 0, &value);
+ if (status)
+ return status;
+
+ /* HW accelerator only works with buffers > 9 */
+ if (value < 9) {
+ dev_err(dev, "minimum fallback size 9\n");
+ return -EINVAL;
+ }
+
+ dd->fallback_sz = value;
+
+ return size;
+}
+
+static ssize_t queue_len_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct omap_sham_dev *dd = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", dd->queue.max_qlen);
+}
+
+static ssize_t queue_len_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t size)
+{
+ struct omap_sham_dev *dd = dev_get_drvdata(dev);
+ ssize_t status;
+ long value;
+ unsigned long flags;
+
+ status = kstrtol(buf, 0, &value);
+ if (status)
+ return status;
+
+ if (value < 1)
+ return -EINVAL;
+
+ /*
+ * Changing the queue size in fly is safe, if size becomes smaller
+ * than current size, it will just not accept new entries until
+ * it has shrank enough.
+ */
+ spin_lock_irqsave(&dd->lock, flags);
+ dd->queue.max_qlen = value;
+ spin_unlock_irqrestore(&dd->lock, flags);
+
+ return size;
+}
+
+static DEVICE_ATTR_RW(queue_len);
+static DEVICE_ATTR_RW(fallback);
+
+static struct attribute *omap_sham_attrs[] = {
+ &dev_attr_queue_len.attr,
+ &dev_attr_fallback.attr,
+ NULL,
+};
+
+static struct attribute_group omap_sham_attr_group = {
+ .attrs = omap_sham_attrs,
+};
+
static int omap_sham_probe(struct platform_device *pdev)
{
struct omap_sham_dev *dd;
@@ -2074,6 +2158,8 @@ static int omap_sham_probe(struct platform_device *pdev)
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
+ dd->fallback_sz = OMAP_SHA_DMA_THRESHOLD;
+
pm_runtime_enable(dev);
pm_runtime_irq_safe(dev);
@@ -2111,6 +2197,12 @@ static int omap_sham_probe(struct platform_device *pdev)
}
}
+ err = sysfs_create_group(&dev->kobj, &omap_sham_attr_group);
+ if (err) {
+ dev_err(dev, "could not create sysfs device attrs\n");
+ goto err_algs;
+ }
+
return 0;
err_algs:
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 4ef52c9d72fc..a4df966adbf6 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -499,10 +499,12 @@ static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
memcpy(ctx->hash_ctx, keys.authkey, keys.authkeylen);
ctx->hash_key_len = keys.authkeylen;
+ memzero_explicit(&keys, sizeof(keys));
return 0;
badkey:
crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index baffae817259..1138e41d6805 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -546,11 +546,14 @@ static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
goto error;
+ memzero_explicit(&keys, sizeof(keys));
return 0;
bad_key:
crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
error:
+ memzero_explicit(&keys, sizeof(keys));
return -EFAULT;
}
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index 13c52d6bf630..320e7854b4ee 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -969,7 +969,8 @@ unmap_src:
return ret;
}
-int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value, size_t vlen)
+static int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value,
+ size_t vlen)
{
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev = &GET_DEV(inst->accel_dev);
@@ -1000,7 +1001,8 @@ err:
return ret;
}
-int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value, size_t vlen)
+static int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value,
+ size_t vlen)
{
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev = &GET_DEV(inst->accel_dev);
@@ -1024,7 +1026,8 @@ int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value, size_t vlen)
return 0;
}
-int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value, size_t vlen)
+static int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value,
+ size_t vlen)
{
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev = &GET_DEV(inst->accel_dev);
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 5d64c08b7f47..bf7163042569 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -404,29 +404,31 @@ static const struct of_device_id s5p_sss_dt_match[] = {
};
MODULE_DEVICE_TABLE(of, s5p_sss_dt_match);
-static inline struct samsung_aes_variant *find_s5p_sss_version
- (struct platform_device *pdev)
+static inline const struct samsung_aes_variant *find_s5p_sss_version
+ (const struct platform_device *pdev)
{
if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node)) {
const struct of_device_id *match;
match = of_match_node(s5p_sss_dt_match,
pdev->dev.of_node);
- return (struct samsung_aes_variant *)match->data;
+ return (const struct samsung_aes_variant *)match->data;
}
- return (struct samsung_aes_variant *)
+ return (const struct samsung_aes_variant *)
platform_get_device_id(pdev)->driver_data;
}
static struct s5p_aes_dev *s5p_dev;
-static void s5p_set_dma_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
+static void s5p_set_dma_indata(struct s5p_aes_dev *dev,
+ const struct scatterlist *sg)
{
SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg));
SSS_WRITE(dev, FCBRDMAL, sg_dma_len(sg));
}
-static void s5p_set_dma_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
+static void s5p_set_dma_outdata(struct s5p_aes_dev *dev,
+ const struct scatterlist *sg)
{
SSS_WRITE(dev, FCBTDMAS, sg_dma_address(sg));
SSS_WRITE(dev, FCBTDMAL, sg_dma_len(sg));
@@ -619,7 +621,7 @@ static inline void s5p_hash_write(struct s5p_aes_dev *dd,
* @sg: scatterlist ready to DMA transmit
*/
static void s5p_set_dma_hashdata(struct s5p_aes_dev *dev,
- struct scatterlist *sg)
+ const struct scatterlist *sg)
{
dev->hash_sg_cnt--;
SSS_WRITE(dev, FCHRDMAS, sg_dma_address(sg));
@@ -792,9 +794,9 @@ static void s5p_hash_read_msg(struct ahash_request *req)
* @ctx: request context
*/
static void s5p_hash_write_ctx_iv(struct s5p_aes_dev *dd,
- struct s5p_hash_reqctx *ctx)
+ const struct s5p_hash_reqctx *ctx)
{
- u32 *hash = (u32 *)ctx->digest;
+ const u32 *hash = (const u32 *)ctx->digest;
unsigned int i;
for (i = 0; i < ctx->nregs; i++)
@@ -818,7 +820,7 @@ static void s5p_hash_write_iv(struct ahash_request *req)
*/
static void s5p_hash_copy_result(struct ahash_request *req)
{
- struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+ const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
if (!req->result)
return;
@@ -1210,9 +1212,6 @@ static int s5p_hash_prepare_request(struct ahash_request *req, bool update)
int xmit_len, hash_later, nbytes;
int ret;
- if (!req)
- return 0;
-
if (update)
nbytes = req->nbytes;
else
@@ -1293,7 +1292,7 @@ static int s5p_hash_prepare_request(struct ahash_request *req, bool update)
*/
static void s5p_hash_update_dma_stop(struct s5p_aes_dev *dd)
{
- struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
+ const struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
clear_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
@@ -1720,7 +1719,7 @@ static void s5p_hash_cra_exit(struct crypto_tfm *tfm)
*/
static int s5p_hash_export(struct ahash_request *req, void *out)
{
- struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+ const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
memcpy(out, ctx, sizeof(*ctx) + ctx->bufcnt);
@@ -1834,7 +1833,8 @@ static struct ahash_alg algs_sha1_md5_sha256[] = {
};
static void s5p_set_aes(struct s5p_aes_dev *dev,
- uint8_t *key, uint8_t *iv, unsigned int keylen)
+ const uint8_t *key, const uint8_t *iv,
+ unsigned int keylen)
{
void __iomem *keystart;
@@ -2153,7 +2153,7 @@ static int s5p_aes_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
int i, j, err = -ENODEV;
- struct samsung_aes_variant *variant;
+ const struct samsung_aes_variant *variant;
struct s5p_aes_dev *pdata;
struct resource *res;
unsigned int hash_i;
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 08e7bdcaa6e3..0f2245e1af2b 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -1397,11 +1397,9 @@ static int sahara_probe(struct platform_device *pdev)
int err;
int i;
- dev = devm_kzalloc(&pdev->dev, sizeof(struct sahara_dev), GFP_KERNEL);
- if (dev == NULL) {
- dev_err(&pdev->dev, "unable to alloc data struct.\n");
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
return -ENOMEM;
- }
dev->device = &pdev->dev;
platform_set_drvdata(pdev, dev);
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index 4a06a7a665ee..c5d3efc54a4f 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -17,6 +17,7 @@
#include <crypto/des.h>
#include <crypto/engine.h>
#include <crypto/scatterwalk.h>
+#include <crypto/internal/aead.h>
#define DRIVER_NAME "stm32-cryp"
@@ -29,8 +30,12 @@
#define FLG_ECB BIT(4)
#define FLG_CBC BIT(5)
#define FLG_CTR BIT(6)
+#define FLG_GCM BIT(7)
+#define FLG_CCM BIT(8)
/* Mode mask = bits [15..0] */
#define FLG_MODE_MASK GENMASK(15, 0)
+/* Bit [31..16] status */
+#define FLG_CCM_PADDED_WA BIT(16)
/* Registers */
#define CRYP_CR 0x00000000
@@ -53,6 +58,8 @@
#define CRYP_IV0RR 0x00000044
#define CRYP_IV1LR 0x00000048
#define CRYP_IV1RR 0x0000004C
+#define CRYP_CSGCMCCM0R 0x00000050
+#define CRYP_CSGCM0R 0x00000070
/* Registers values */
#define CR_DEC_NOT_ENC 0x00000004
@@ -64,6 +71,8 @@
#define CR_AES_CBC 0x00000028
#define CR_AES_CTR 0x00000030
#define CR_AES_KP 0x00000038
+#define CR_AES_GCM 0x00080000
+#define CR_AES_CCM 0x00080008
#define CR_AES_UNKNOWN 0xFFFFFFFF
#define CR_ALGO_MASK 0x00080038
#define CR_DATA32 0x00000000
@@ -75,6 +84,12 @@
#define CR_KEY256 0x00000200
#define CR_FFLUSH 0x00004000
#define CR_CRYPEN 0x00008000
+#define CR_PH_INIT 0x00000000
+#define CR_PH_HEADER 0x00010000
+#define CR_PH_PAYLOAD 0x00020000
+#define CR_PH_FINAL 0x00030000
+#define CR_PH_MASK 0x00030000
+#define CR_NBPBL_SHIFT 20
#define SR_BUSY 0x00000010
#define SR_OFNE 0x00000004
@@ -87,10 +102,17 @@
/* Misc */
#define AES_BLOCK_32 (AES_BLOCK_SIZE / sizeof(u32))
+#define GCM_CTR_INIT 2
#define _walked_in (cryp->in_walk.offset - cryp->in_sg->offset)
#define _walked_out (cryp->out_walk.offset - cryp->out_sg->offset)
+struct stm32_cryp_caps {
+ bool swap_final;
+ bool padding_wa;
+};
+
struct stm32_cryp_ctx {
+ struct crypto_engine_ctx enginectx;
struct stm32_cryp *cryp;
int keylen;
u32 key[AES_KEYSIZE_256 / sizeof(u32)];
@@ -108,13 +130,16 @@ struct stm32_cryp {
struct clk *clk;
unsigned long flags;
u32 irq_status;
+ const struct stm32_cryp_caps *caps;
struct stm32_cryp_ctx *ctx;
struct crypto_engine *engine;
- struct mutex lock; /* protects req */
+ struct mutex lock; /* protects req / areq */
struct ablkcipher_request *req;
+ struct aead_request *areq;
+ size_t authsize;
size_t hw_blocksize;
size_t total_in;
@@ -137,6 +162,7 @@ struct stm32_cryp {
struct scatter_walk out_walk;
u32 last_ctr[4];
+ u32 gcm_ctr;
};
struct stm32_cryp_list {
@@ -179,6 +205,16 @@ static inline bool is_ctr(struct stm32_cryp *cryp)
return cryp->flags & FLG_CTR;
}
+static inline bool is_gcm(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_GCM;
+}
+
+static inline bool is_ccm(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_CCM;
+}
+
static inline bool is_encrypt(struct stm32_cryp *cryp)
{
return cryp->flags & FLG_ENCRYPT;
@@ -207,6 +243,24 @@ static inline int stm32_cryp_wait_busy(struct stm32_cryp *cryp)
!(status & SR_BUSY), 10, 100000);
}
+static inline int stm32_cryp_wait_enable(struct stm32_cryp *cryp)
+{
+ u32 status;
+
+ return readl_relaxed_poll_timeout(cryp->regs + CRYP_CR, status,
+ !(status & CR_CRYPEN), 10, 100000);
+}
+
+static inline int stm32_cryp_wait_output(struct stm32_cryp *cryp)
+{
+ u32 status;
+
+ return readl_relaxed_poll_timeout(cryp->regs + CRYP_SR, status,
+ status & SR_OFNE, 10, 100000);
+}
+
+static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp);
+
static struct stm32_cryp *stm32_cryp_find_dev(struct stm32_cryp_ctx *ctx)
{
struct stm32_cryp *tmp, *cryp = NULL;
@@ -365,6 +419,12 @@ static u32 stm32_cryp_get_hw_mode(struct stm32_cryp *cryp)
if (is_aes(cryp) && is_ctr(cryp))
return CR_AES_CTR;
+ if (is_aes(cryp) && is_gcm(cryp))
+ return CR_AES_GCM;
+
+ if (is_aes(cryp) && is_ccm(cryp))
+ return CR_AES_CCM;
+
if (is_des(cryp) && is_ecb(cryp))
return CR_DES_ECB;
@@ -381,6 +441,79 @@ static u32 stm32_cryp_get_hw_mode(struct stm32_cryp *cryp)
return CR_AES_UNKNOWN;
}
+static unsigned int stm32_cryp_get_input_text_len(struct stm32_cryp *cryp)
+{
+ return is_encrypt(cryp) ? cryp->areq->cryptlen :
+ cryp->areq->cryptlen - cryp->authsize;
+}
+
+static int stm32_cryp_gcm_init(struct stm32_cryp *cryp, u32 cfg)
+{
+ int ret;
+ u32 iv[4];
+
+ /* Phase 1 : init */
+ memcpy(iv, cryp->areq->iv, 12);
+ iv[3] = cpu_to_be32(GCM_CTR_INIT);
+ cryp->gcm_ctr = GCM_CTR_INIT;
+ stm32_cryp_hw_write_iv(cryp, iv);
+
+ stm32_cryp_write(cryp, CRYP_CR, cfg | CR_PH_INIT | CR_CRYPEN);
+
+ /* Wait for end of processing */
+ ret = stm32_cryp_wait_enable(cryp);
+ if (ret)
+ dev_err(cryp->dev, "Timeout (gcm init)\n");
+
+ return ret;
+}
+
+static int stm32_cryp_ccm_init(struct stm32_cryp *cryp, u32 cfg)
+{
+ int ret;
+ u8 iv[AES_BLOCK_SIZE], b0[AES_BLOCK_SIZE];
+ u32 *d;
+ unsigned int i, textlen;
+
+ /* Phase 1 : init. Firstly set the CTR value to 1 (not 0) */
+ memcpy(iv, cryp->areq->iv, AES_BLOCK_SIZE);
+ memset(iv + AES_BLOCK_SIZE - 1 - iv[0], 0, iv[0] + 1);
+ iv[AES_BLOCK_SIZE - 1] = 1;
+ stm32_cryp_hw_write_iv(cryp, (u32 *)iv);
+
+ /* Build B0 */
+ memcpy(b0, iv, AES_BLOCK_SIZE);
+
+ b0[0] |= (8 * ((cryp->authsize - 2) / 2));
+
+ if (cryp->areq->assoclen)
+ b0[0] |= 0x40;
+
+ textlen = stm32_cryp_get_input_text_len(cryp);
+
+ b0[AES_BLOCK_SIZE - 2] = textlen >> 8;
+ b0[AES_BLOCK_SIZE - 1] = textlen & 0xFF;
+
+ /* Enable HW */
+ stm32_cryp_write(cryp, CRYP_CR, cfg | CR_PH_INIT | CR_CRYPEN);
+
+ /* Write B0 */
+ d = (u32 *)b0;
+
+ for (i = 0; i < AES_BLOCK_32; i++) {
+ if (!cryp->caps->padding_wa)
+ *d = cpu_to_be32(*d);
+ stm32_cryp_write(cryp, CRYP_DIN, *d++);
+ }
+
+ /* Wait for end of processing */
+ ret = stm32_cryp_wait_enable(cryp);
+ if (ret)
+ dev_err(cryp->dev, "Timeout (ccm init)\n");
+
+ return ret;
+}
+
static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
{
int ret;
@@ -436,6 +569,29 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
stm32_cryp_write(cryp, CRYP_CR, cfg);
switch (hw_mode) {
+ case CR_AES_GCM:
+ case CR_AES_CCM:
+ /* Phase 1 : init */
+ if (hw_mode == CR_AES_CCM)
+ ret = stm32_cryp_ccm_init(cryp, cfg);
+ else
+ ret = stm32_cryp_gcm_init(cryp, cfg);
+
+ if (ret)
+ return ret;
+
+ /* Phase 2 : header (authenticated data) */
+ if (cryp->areq->assoclen) {
+ cfg |= CR_PH_HEADER;
+ } else if (stm32_cryp_get_input_text_len(cryp)) {
+ cfg |= CR_PH_PAYLOAD;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+ } else {
+ cfg |= CR_PH_INIT;
+ }
+
+ break;
+
case CR_DES_CBC:
case CR_TDES_CBC:
case CR_AES_CBC:
@@ -452,12 +608,16 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
stm32_cryp_write(cryp, CRYP_CR, cfg);
+ cryp->flags &= ~FLG_CCM_PADDED_WA;
+
return 0;
}
-static void stm32_cryp_finish_req(struct stm32_cryp *cryp)
+static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err)
{
- int err = 0;
+ if (!err && (is_gcm(cryp) || is_ccm(cryp)))
+ /* Phase 4 : output tag */
+ err = stm32_cryp_read_auth_tag(cryp);
if (cryp->sgs_copied) {
void *buf_in, *buf_out;
@@ -478,8 +638,14 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp)
free_pages((unsigned long)buf_out, pages);
}
- crypto_finalize_cipher_request(cryp->engine, cryp->req, err);
- cryp->req = NULL;
+ if (is_gcm(cryp) || is_ccm(cryp)) {
+ crypto_finalize_aead_request(cryp->engine, cryp->areq, err);
+ cryp->areq = NULL;
+ } else {
+ crypto_finalize_ablkcipher_request(cryp->engine, cryp->req,
+ err);
+ cryp->req = NULL;
+ }
memset(cryp->ctx->key, 0, cryp->ctx->keylen);
@@ -494,10 +660,36 @@ static int stm32_cryp_cpu_start(struct stm32_cryp *cryp)
return 0;
}
+static int stm32_cryp_cipher_one_req(struct crypto_engine *engine, void *areq);
+static int stm32_cryp_prepare_cipher_req(struct crypto_engine *engine,
+ void *areq);
+
static int stm32_cryp_cra_init(struct crypto_tfm *tfm)
{
+ struct stm32_cryp_ctx *ctx = crypto_tfm_ctx(tfm);
+
tfm->crt_ablkcipher.reqsize = sizeof(struct stm32_cryp_reqctx);
+ ctx->enginectx.op.do_one_request = stm32_cryp_cipher_one_req;
+ ctx->enginectx.op.prepare_request = stm32_cryp_prepare_cipher_req;
+ ctx->enginectx.op.unprepare_request = NULL;
+ return 0;
+}
+
+static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq);
+static int stm32_cryp_prepare_aead_req(struct crypto_engine *engine,
+ void *areq);
+
+static int stm32_cryp_aes_aead_init(struct crypto_aead *tfm)
+{
+ struct stm32_cryp_ctx *ctx = crypto_aead_ctx(tfm);
+
+ tfm->reqsize = sizeof(struct stm32_cryp_reqctx);
+
+ ctx->enginectx.op.do_one_request = stm32_cryp_aead_one_req;
+ ctx->enginectx.op.prepare_request = stm32_cryp_prepare_aead_req;
+ ctx->enginectx.op.unprepare_request = NULL;
+
return 0;
}
@@ -513,7 +705,21 @@ static int stm32_cryp_crypt(struct ablkcipher_request *req, unsigned long mode)
rctx->mode = mode;
- return crypto_transfer_cipher_request_to_engine(cryp->engine, req);
+ return crypto_transfer_ablkcipher_request_to_engine(cryp->engine, req);
+}
+
+static int stm32_cryp_aead_crypt(struct aead_request *req, unsigned long mode)
+{
+ struct stm32_cryp_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct stm32_cryp_reqctx *rctx = aead_request_ctx(req);
+ struct stm32_cryp *cryp = stm32_cryp_find_dev(ctx);
+
+ if (!cryp)
+ return -ENODEV;
+
+ rctx->mode = mode;
+
+ return crypto_transfer_aead_request_to_engine(cryp->engine, req);
}
static int stm32_cryp_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
@@ -555,6 +761,46 @@ static int stm32_cryp_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return stm32_cryp_setkey(tfm, key, keylen);
}
+static int stm32_cryp_aes_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct stm32_cryp_ctx *ctx = crypto_aead_ctx(tfm);
+
+ if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
+ keylen != AES_KEYSIZE_256)
+ return -EINVAL;
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ return 0;
+}
+
+static int stm32_cryp_aes_gcm_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ return authsize == AES_BLOCK_SIZE ? 0 : -EINVAL;
+}
+
+static int stm32_cryp_aes_ccm_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ switch (authsize) {
+ case 4:
+ case 6:
+ case 8:
+ case 10:
+ case 12:
+ case 14:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int stm32_cryp_aes_ecb_encrypt(struct ablkcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_AES | FLG_ECB | FLG_ENCRYPT);
@@ -585,6 +831,26 @@ static int stm32_cryp_aes_ctr_decrypt(struct ablkcipher_request *req)
return stm32_cryp_crypt(req, FLG_AES | FLG_CTR);
}
+static int stm32_cryp_aes_gcm_encrypt(struct aead_request *req)
+{
+ return stm32_cryp_aead_crypt(req, FLG_AES | FLG_GCM | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_aes_gcm_decrypt(struct aead_request *req)
+{
+ return stm32_cryp_aead_crypt(req, FLG_AES | FLG_GCM);
+}
+
+static int stm32_cryp_aes_ccm_encrypt(struct aead_request *req)
+{
+ return stm32_cryp_aead_crypt(req, FLG_AES | FLG_CCM | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_aes_ccm_decrypt(struct aead_request *req)
+{
+ return stm32_cryp_aead_crypt(req, FLG_AES | FLG_CCM);
+}
+
static int stm32_cryp_des_ecb_encrypt(struct ablkcipher_request *req)
{
return stm32_cryp_crypt(req, FLG_DES | FLG_ECB | FLG_ENCRYPT);
@@ -625,18 +891,19 @@ static int stm32_cryp_tdes_cbc_decrypt(struct ablkcipher_request *req)
return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC);
}
-static int stm32_cryp_prepare_req(struct crypto_engine *engine,
- struct ablkcipher_request *req)
+static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
+ struct aead_request *areq)
{
struct stm32_cryp_ctx *ctx;
struct stm32_cryp *cryp;
struct stm32_cryp_reqctx *rctx;
int ret;
- if (!req)
+ if (!req && !areq)
return -EINVAL;
- ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+ ctx = req ? crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)) :
+ crypto_aead_ctx(crypto_aead_reqtfm(areq));
cryp = ctx->cryp;
@@ -645,7 +912,7 @@ static int stm32_cryp_prepare_req(struct crypto_engine *engine,
mutex_lock(&cryp->lock);
- rctx = ablkcipher_request_ctx(req);
+ rctx = req ? ablkcipher_request_ctx(req) : aead_request_ctx(areq);
rctx->mode &= FLG_MODE_MASK;
ctx->cryp = cryp;
@@ -654,15 +921,48 @@ static int stm32_cryp_prepare_req(struct crypto_engine *engine,
cryp->hw_blocksize = is_aes(cryp) ? AES_BLOCK_SIZE : DES_BLOCK_SIZE;
cryp->ctx = ctx;
- cryp->req = req;
- cryp->total_in = req->nbytes;
- cryp->total_out = cryp->total_in;
+ if (req) {
+ cryp->req = req;
+ cryp->total_in = req->nbytes;
+ cryp->total_out = cryp->total_in;
+ } else {
+ /*
+ * Length of input and output data:
+ * Encryption case:
+ * INPUT = AssocData || PlainText
+ * <- assoclen -> <- cryptlen ->
+ * <------- total_in ----------->
+ *
+ * OUTPUT = AssocData || CipherText || AuthTag
+ * <- assoclen -> <- cryptlen -> <- authsize ->
+ * <---------------- total_out ----------------->
+ *
+ * Decryption case:
+ * INPUT = AssocData || CipherText || AuthTag
+ * <- assoclen -> <--------- cryptlen --------->
+ * <- authsize ->
+ * <---------------- total_in ------------------>
+ *
+ * OUTPUT = AssocData || PlainText
+ * <- assoclen -> <- crypten - authsize ->
+ * <---------- total_out ----------------->
+ */
+ cryp->areq = areq;
+ cryp->authsize = crypto_aead_authsize(crypto_aead_reqtfm(areq));
+ cryp->total_in = areq->assoclen + areq->cryptlen;
+ if (is_encrypt(cryp))
+ /* Append auth tag to output */
+ cryp->total_out = cryp->total_in + cryp->authsize;
+ else
+ /* No auth tag in output */
+ cryp->total_out = cryp->total_in - cryp->authsize;
+ }
cryp->total_in_save = cryp->total_in;
cryp->total_out_save = cryp->total_out;
- cryp->in_sg = req->src;
- cryp->out_sg = req->dst;
+ cryp->in_sg = req ? req->src : areq->src;
+ cryp->out_sg = req ? req->dst : areq->dst;
cryp->out_sg_save = cryp->out_sg;
cryp->in_sg_len = sg_nents_for_len(cryp->in_sg, cryp->total_in);
@@ -686,6 +986,12 @@ static int stm32_cryp_prepare_req(struct crypto_engine *engine,
scatterwalk_start(&cryp->in_walk, cryp->in_sg);
scatterwalk_start(&cryp->out_walk, cryp->out_sg);
+ if (is_gcm(cryp) || is_ccm(cryp)) {
+ /* In output, jump after assoc data */
+ scatterwalk_advance(&cryp->out_walk, cryp->areq->assoclen);
+ cryp->total_out -= cryp->areq->assoclen;
+ }
+
ret = stm32_cryp_hw_init(cryp);
out:
if (ret)
@@ -695,14 +1001,20 @@ out:
}
static int stm32_cryp_prepare_cipher_req(struct crypto_engine *engine,
- struct ablkcipher_request *req)
+ void *areq)
{
- return stm32_cryp_prepare_req(engine, req);
+ struct ablkcipher_request *req = container_of(areq,
+ struct ablkcipher_request,
+ base);
+
+ return stm32_cryp_prepare_req(req, NULL);
}
-static int stm32_cryp_cipher_one_req(struct crypto_engine *engine,
- struct ablkcipher_request *req)
+static int stm32_cryp_cipher_one_req(struct crypto_engine *engine, void *areq)
{
+ struct ablkcipher_request *req = container_of(areq,
+ struct ablkcipher_request,
+ base);
struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx(
crypto_ablkcipher_reqtfm(req));
struct stm32_cryp *cryp = ctx->cryp;
@@ -713,6 +1025,34 @@ static int stm32_cryp_cipher_one_req(struct crypto_engine *engine,
return stm32_cryp_cpu_start(cryp);
}
+static int stm32_cryp_prepare_aead_req(struct crypto_engine *engine, void *areq)
+{
+ struct aead_request *req = container_of(areq, struct aead_request,
+ base);
+
+ return stm32_cryp_prepare_req(NULL, req);
+}
+
+static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq)
+{
+ struct aead_request *req = container_of(areq, struct aead_request,
+ base);
+ struct stm32_cryp_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct stm32_cryp *cryp = ctx->cryp;
+
+ if (!cryp)
+ return -ENODEV;
+
+ if (unlikely(!cryp->areq->assoclen &&
+ !stm32_cryp_get_input_text_len(cryp))) {
+ /* No input data to process: get tag and finish */
+ stm32_cryp_finish_req(cryp, 0);
+ return 0;
+ }
+
+ return stm32_cryp_cpu_start(cryp);
+}
+
static u32 *stm32_cryp_next_out(struct stm32_cryp *cryp, u32 *dst,
unsigned int n)
{
@@ -745,6 +1085,111 @@ static u32 *stm32_cryp_next_in(struct stm32_cryp *cryp, u32 *src,
return (u32 *)((u8 *)src + n);
}
+static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp)
+{
+ u32 cfg, size_bit, *dst, d32;
+ u8 *d8;
+ unsigned int i, j;
+ int ret = 0;
+
+ /* Update Config */
+ cfg = stm32_cryp_read(cryp, CRYP_CR);
+
+ cfg &= ~CR_PH_MASK;
+ cfg |= CR_PH_FINAL;
+ cfg &= ~CR_DEC_NOT_ENC;
+ cfg |= CR_CRYPEN;
+
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ if (is_gcm(cryp)) {
+ /* GCM: write aad and payload size (in bits) */
+ size_bit = cryp->areq->assoclen * 8;
+ if (cryp->caps->swap_final)
+ size_bit = cpu_to_be32(size_bit);
+
+ stm32_cryp_write(cryp, CRYP_DIN, 0);
+ stm32_cryp_write(cryp, CRYP_DIN, size_bit);
+
+ size_bit = is_encrypt(cryp) ? cryp->areq->cryptlen :
+ cryp->areq->cryptlen - AES_BLOCK_SIZE;
+ size_bit *= 8;
+ if (cryp->caps->swap_final)
+ size_bit = cpu_to_be32(size_bit);
+
+ stm32_cryp_write(cryp, CRYP_DIN, 0);
+ stm32_cryp_write(cryp, CRYP_DIN, size_bit);
+ } else {
+ /* CCM: write CTR0 */
+ u8 iv[AES_BLOCK_SIZE];
+ u32 *iv32 = (u32 *)iv;
+
+ memcpy(iv, cryp->areq->iv, AES_BLOCK_SIZE);
+ memset(iv + AES_BLOCK_SIZE - 1 - iv[0], 0, iv[0] + 1);
+
+ for (i = 0; i < AES_BLOCK_32; i++) {
+ if (!cryp->caps->padding_wa)
+ *iv32 = cpu_to_be32(*iv32);
+ stm32_cryp_write(cryp, CRYP_DIN, *iv32++);
+ }
+ }
+
+ /* Wait for output data */
+ ret = stm32_cryp_wait_output(cryp);
+ if (ret) {
+ dev_err(cryp->dev, "Timeout (read tag)\n");
+ return ret;
+ }
+
+ if (is_encrypt(cryp)) {
+ /* Get and write tag */
+ dst = sg_virt(cryp->out_sg) + _walked_out;
+
+ for (i = 0; i < AES_BLOCK_32; i++) {
+ if (cryp->total_out >= sizeof(u32)) {
+ /* Read a full u32 */
+ *dst = stm32_cryp_read(cryp, CRYP_DOUT);
+
+ dst = stm32_cryp_next_out(cryp, dst,
+ sizeof(u32));
+ cryp->total_out -= sizeof(u32);
+ } else if (!cryp->total_out) {
+ /* Empty fifo out (data from input padding) */
+ stm32_cryp_read(cryp, CRYP_DOUT);
+ } else {
+ /* Read less than an u32 */
+ d32 = stm32_cryp_read(cryp, CRYP_DOUT);
+ d8 = (u8 *)&d32;
+
+ for (j = 0; j < cryp->total_out; j++) {
+ *((u8 *)dst) = *(d8++);
+ dst = stm32_cryp_next_out(cryp, dst, 1);
+ }
+ cryp->total_out = 0;
+ }
+ }
+ } else {
+ /* Get and check tag */
+ u32 in_tag[AES_BLOCK_32], out_tag[AES_BLOCK_32];
+
+ scatterwalk_map_and_copy(in_tag, cryp->in_sg,
+ cryp->total_in_save - cryp->authsize,
+ cryp->authsize, 0);
+
+ for (i = 0; i < AES_BLOCK_32; i++)
+ out_tag[i] = stm32_cryp_read(cryp, CRYP_DOUT);
+
+ if (crypto_memneq(in_tag, out_tag, cryp->authsize))
+ ret = -EBADMSG;
+ }
+
+ /* Disable cryp */
+ cfg &= ~CR_CRYPEN;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ return ret;
+}
+
static void stm32_cryp_check_ctr_counter(struct stm32_cryp *cryp)
{
u32 cr;
@@ -777,17 +1222,24 @@ static bool stm32_cryp_irq_read_data(struct stm32_cryp *cryp)
unsigned int i, j;
u32 d32, *dst;
u8 *d8;
+ size_t tag_size;
+
+ /* Do no read tag now (if any) */
+ if (is_encrypt(cryp) && (is_gcm(cryp) || is_ccm(cryp)))
+ tag_size = cryp->authsize;
+ else
+ tag_size = 0;
dst = sg_virt(cryp->out_sg) + _walked_out;
for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) {
- if (likely(cryp->total_out >= sizeof(u32))) {
+ if (likely(cryp->total_out - tag_size >= sizeof(u32))) {
/* Read a full u32 */
*dst = stm32_cryp_read(cryp, CRYP_DOUT);
dst = stm32_cryp_next_out(cryp, dst, sizeof(u32));
cryp->total_out -= sizeof(u32);
- } else if (!cryp->total_out) {
+ } else if (cryp->total_out == tag_size) {
/* Empty fifo out (data from input padding) */
d32 = stm32_cryp_read(cryp, CRYP_DOUT);
} else {
@@ -795,15 +1247,15 @@ static bool stm32_cryp_irq_read_data(struct stm32_cryp *cryp)
d32 = stm32_cryp_read(cryp, CRYP_DOUT);
d8 = (u8 *)&d32;
- for (j = 0; j < cryp->total_out; j++) {
+ for (j = 0; j < cryp->total_out - tag_size; j++) {
*((u8 *)dst) = *(d8++);
dst = stm32_cryp_next_out(cryp, dst, 1);
}
- cryp->total_out = 0;
+ cryp->total_out = tag_size;
}
}
- return !cryp->total_out || !cryp->total_in;
+ return !(cryp->total_out - tag_size) || !cryp->total_in;
}
static void stm32_cryp_irq_write_block(struct stm32_cryp *cryp)
@@ -811,33 +1263,219 @@ static void stm32_cryp_irq_write_block(struct stm32_cryp *cryp)
unsigned int i, j;
u32 *src;
u8 d8[4];
+ size_t tag_size;
+
+ /* Do no write tag (if any) */
+ if (is_decrypt(cryp) && (is_gcm(cryp) || is_ccm(cryp)))
+ tag_size = cryp->authsize;
+ else
+ tag_size = 0;
src = sg_virt(cryp->in_sg) + _walked_in;
for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) {
- if (likely(cryp->total_in >= sizeof(u32))) {
+ if (likely(cryp->total_in - tag_size >= sizeof(u32))) {
/* Write a full u32 */
stm32_cryp_write(cryp, CRYP_DIN, *src);
src = stm32_cryp_next_in(cryp, src, sizeof(u32));
cryp->total_in -= sizeof(u32);
- } else if (!cryp->total_in) {
+ } else if (cryp->total_in == tag_size) {
/* Write padding data */
stm32_cryp_write(cryp, CRYP_DIN, 0);
} else {
/* Write less than an u32 */
memset(d8, 0, sizeof(u32));
- for (j = 0; j < cryp->total_in; j++) {
+ for (j = 0; j < cryp->total_in - tag_size; j++) {
d8[j] = *((u8 *)src);
src = stm32_cryp_next_in(cryp, src, 1);
}
stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8);
- cryp->total_in = 0;
+ cryp->total_in = tag_size;
}
}
}
+static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp)
+{
+ int err;
+ u32 cfg, tmp[AES_BLOCK_32];
+ size_t total_in_ori = cryp->total_in;
+ struct scatterlist *out_sg_ori = cryp->out_sg;
+ unsigned int i;
+
+ /* 'Special workaround' procedure described in the datasheet */
+
+ /* a) disable ip */
+ stm32_cryp_write(cryp, CRYP_IMSCR, 0);
+ cfg = stm32_cryp_read(cryp, CRYP_CR);
+ cfg &= ~CR_CRYPEN;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ /* b) Update IV1R */
+ stm32_cryp_write(cryp, CRYP_IV1RR, cryp->gcm_ctr - 2);
+
+ /* c) change mode to CTR */
+ cfg &= ~CR_ALGO_MASK;
+ cfg |= CR_AES_CTR;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ /* a) enable IP */
+ cfg |= CR_CRYPEN;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ /* b) pad and write the last block */
+ stm32_cryp_irq_write_block(cryp);
+ cryp->total_in = total_in_ori;
+ err = stm32_cryp_wait_output(cryp);
+ if (err) {
+ dev_err(cryp->dev, "Timeout (write gcm header)\n");
+ return stm32_cryp_finish_req(cryp, err);
+ }
+
+ /* c) get and store encrypted data */
+ stm32_cryp_irq_read_data(cryp);
+ scatterwalk_map_and_copy(tmp, out_sg_ori,
+ cryp->total_in_save - total_in_ori,
+ total_in_ori, 0);
+
+ /* d) change mode back to AES GCM */
+ cfg &= ~CR_ALGO_MASK;
+ cfg |= CR_AES_GCM;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ /* e) change phase to Final */
+ cfg &= ~CR_PH_MASK;
+ cfg |= CR_PH_FINAL;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ /* f) write padded data */
+ for (i = 0; i < AES_BLOCK_32; i++) {
+ if (cryp->total_in)
+ stm32_cryp_write(cryp, CRYP_DIN, tmp[i]);
+ else
+ stm32_cryp_write(cryp, CRYP_DIN, 0);
+
+ cryp->total_in -= min_t(size_t, sizeof(u32), cryp->total_in);
+ }
+
+ /* g) Empty fifo out */
+ err = stm32_cryp_wait_output(cryp);
+ if (err) {
+ dev_err(cryp->dev, "Timeout (write gcm header)\n");
+ return stm32_cryp_finish_req(cryp, err);
+ }
+
+ for (i = 0; i < AES_BLOCK_32; i++)
+ stm32_cryp_read(cryp, CRYP_DOUT);
+
+ /* h) run the he normal Final phase */
+ stm32_cryp_finish_req(cryp, 0);
+}
+
+static void stm32_cryp_irq_set_npblb(struct stm32_cryp *cryp)
+{
+ u32 cfg, payload_bytes;
+
+ /* disable ip, set NPBLB and reneable ip */
+ cfg = stm32_cryp_read(cryp, CRYP_CR);
+ cfg &= ~CR_CRYPEN;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ payload_bytes = is_decrypt(cryp) ? cryp->total_in - cryp->authsize :
+ cryp->total_in;
+ cfg |= (cryp->hw_blocksize - payload_bytes) << CR_NBPBL_SHIFT;
+ cfg |= CR_CRYPEN;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+}
+
+static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp)
+{
+ int err = 0;
+ u32 cfg, iv1tmp;
+ u32 cstmp1[AES_BLOCK_32], cstmp2[AES_BLOCK_32], tmp[AES_BLOCK_32];
+ size_t last_total_out, total_in_ori = cryp->total_in;
+ struct scatterlist *out_sg_ori = cryp->out_sg;
+ unsigned int i;
+
+ /* 'Special workaround' procedure described in the datasheet */
+ cryp->flags |= FLG_CCM_PADDED_WA;
+
+ /* a) disable ip */
+ stm32_cryp_write(cryp, CRYP_IMSCR, 0);
+
+ cfg = stm32_cryp_read(cryp, CRYP_CR);
+ cfg &= ~CR_CRYPEN;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ /* b) get IV1 from CRYP_CSGCMCCM7 */
+ iv1tmp = stm32_cryp_read(cryp, CRYP_CSGCMCCM0R + 7 * 4);
+
+ /* c) Load CRYP_CSGCMCCMxR */
+ for (i = 0; i < ARRAY_SIZE(cstmp1); i++)
+ cstmp1[i] = stm32_cryp_read(cryp, CRYP_CSGCMCCM0R + i * 4);
+
+ /* d) Write IV1R */
+ stm32_cryp_write(cryp, CRYP_IV1RR, iv1tmp);
+
+ /* e) change mode to CTR */
+ cfg &= ~CR_ALGO_MASK;
+ cfg |= CR_AES_CTR;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ /* a) enable IP */
+ cfg |= CR_CRYPEN;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ /* b) pad and write the last block */
+ stm32_cryp_irq_write_block(cryp);
+ cryp->total_in = total_in_ori;
+ err = stm32_cryp_wait_output(cryp);
+ if (err) {
+ dev_err(cryp->dev, "Timeout (wite ccm padded data)\n");
+ return stm32_cryp_finish_req(cryp, err);
+ }
+
+ /* c) get and store decrypted data */
+ last_total_out = cryp->total_out;
+ stm32_cryp_irq_read_data(cryp);
+
+ memset(tmp, 0, sizeof(tmp));
+ scatterwalk_map_and_copy(tmp, out_sg_ori,
+ cryp->total_out_save - last_total_out,
+ last_total_out, 0);
+
+ /* d) Load again CRYP_CSGCMCCMxR */
+ for (i = 0; i < ARRAY_SIZE(cstmp2); i++)
+ cstmp2[i] = stm32_cryp_read(cryp, CRYP_CSGCMCCM0R + i * 4);
+
+ /* e) change mode back to AES CCM */
+ cfg &= ~CR_ALGO_MASK;
+ cfg |= CR_AES_CCM;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ /* f) change phase to header */
+ cfg &= ~CR_PH_MASK;
+ cfg |= CR_PH_HEADER;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ /* g) XOR and write padded data */
+ for (i = 0; i < ARRAY_SIZE(tmp); i++) {
+ tmp[i] ^= cstmp1[i];
+ tmp[i] ^= cstmp2[i];
+ stm32_cryp_write(cryp, CRYP_DIN, tmp[i]);
+ }
+
+ /* h) wait for completion */
+ err = stm32_cryp_wait_busy(cryp);
+ if (err)
+ dev_err(cryp->dev, "Timeout (wite ccm padded data)\n");
+
+ /* i) run the he normal Final phase */
+ stm32_cryp_finish_req(cryp, err);
+}
+
static void stm32_cryp_irq_write_data(struct stm32_cryp *cryp)
{
if (unlikely(!cryp->total_in)) {
@@ -845,28 +1483,220 @@ static void stm32_cryp_irq_write_data(struct stm32_cryp *cryp)
return;
}
+ if (unlikely(cryp->total_in < AES_BLOCK_SIZE &&
+ (stm32_cryp_get_hw_mode(cryp) == CR_AES_GCM) &&
+ is_encrypt(cryp))) {
+ /* Padding for AES GCM encryption */
+ if (cryp->caps->padding_wa)
+ /* Special case 1 */
+ return stm32_cryp_irq_write_gcm_padded_data(cryp);
+
+ /* Setting padding bytes (NBBLB) */
+ stm32_cryp_irq_set_npblb(cryp);
+ }
+
+ if (unlikely((cryp->total_in - cryp->authsize < AES_BLOCK_SIZE) &&
+ (stm32_cryp_get_hw_mode(cryp) == CR_AES_CCM) &&
+ is_decrypt(cryp))) {
+ /* Padding for AES CCM decryption */
+ if (cryp->caps->padding_wa)
+ /* Special case 2 */
+ return stm32_cryp_irq_write_ccm_padded_data(cryp);
+
+ /* Setting padding bytes (NBBLB) */
+ stm32_cryp_irq_set_npblb(cryp);
+ }
+
if (is_aes(cryp) && is_ctr(cryp))
stm32_cryp_check_ctr_counter(cryp);
stm32_cryp_irq_write_block(cryp);
}
+static void stm32_cryp_irq_write_gcm_header(struct stm32_cryp *cryp)
+{
+ int err;
+ unsigned int i, j;
+ u32 cfg, *src;
+
+ src = sg_virt(cryp->in_sg) + _walked_in;
+
+ for (i = 0; i < AES_BLOCK_32; i++) {
+ stm32_cryp_write(cryp, CRYP_DIN, *src);
+
+ src = stm32_cryp_next_in(cryp, src, sizeof(u32));
+ cryp->total_in -= min_t(size_t, sizeof(u32), cryp->total_in);
+
+ /* Check if whole header written */
+ if ((cryp->total_in_save - cryp->total_in) ==
+ cryp->areq->assoclen) {
+ /* Write padding if needed */
+ for (j = i + 1; j < AES_BLOCK_32; j++)
+ stm32_cryp_write(cryp, CRYP_DIN, 0);
+
+ /* Wait for completion */
+ err = stm32_cryp_wait_busy(cryp);
+ if (err) {
+ dev_err(cryp->dev, "Timeout (gcm header)\n");
+ return stm32_cryp_finish_req(cryp, err);
+ }
+
+ if (stm32_cryp_get_input_text_len(cryp)) {
+ /* Phase 3 : payload */
+ cfg = stm32_cryp_read(cryp, CRYP_CR);
+ cfg &= ~CR_CRYPEN;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ cfg &= ~CR_PH_MASK;
+ cfg |= CR_PH_PAYLOAD;
+ cfg |= CR_CRYPEN;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+ } else {
+ /* Phase 4 : tag */
+ stm32_cryp_write(cryp, CRYP_IMSCR, 0);
+ stm32_cryp_finish_req(cryp, 0);
+ }
+
+ break;
+ }
+
+ if (!cryp->total_in)
+ break;
+ }
+}
+
+static void stm32_cryp_irq_write_ccm_header(struct stm32_cryp *cryp)
+{
+ int err;
+ unsigned int i = 0, j, k;
+ u32 alen, cfg, *src;
+ u8 d8[4];
+
+ src = sg_virt(cryp->in_sg) + _walked_in;
+ alen = cryp->areq->assoclen;
+
+ if (!_walked_in) {
+ if (cryp->areq->assoclen <= 65280) {
+ /* Write first u32 of B1 */
+ d8[0] = (alen >> 8) & 0xFF;
+ d8[1] = alen & 0xFF;
+ d8[2] = *((u8 *)src);
+ src = stm32_cryp_next_in(cryp, src, 1);
+ d8[3] = *((u8 *)src);
+ src = stm32_cryp_next_in(cryp, src, 1);
+
+ stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8);
+ i++;
+
+ cryp->total_in -= min_t(size_t, 2, cryp->total_in);
+ } else {
+ /* Build the two first u32 of B1 */
+ d8[0] = 0xFF;
+ d8[1] = 0xFE;
+ d8[2] = alen & 0xFF000000;
+ d8[3] = alen & 0x00FF0000;
+
+ stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8);
+ i++;
+
+ d8[0] = alen & 0x0000FF00;
+ d8[1] = alen & 0x000000FF;
+ d8[2] = *((u8 *)src);
+ src = stm32_cryp_next_in(cryp, src, 1);
+ d8[3] = *((u8 *)src);
+ src = stm32_cryp_next_in(cryp, src, 1);
+
+ stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8);
+ i++;
+
+ cryp->total_in -= min_t(size_t, 2, cryp->total_in);
+ }
+ }
+
+ /* Write next u32 */
+ for (; i < AES_BLOCK_32; i++) {
+ /* Build an u32 */
+ memset(d8, 0, sizeof(u32));
+ for (k = 0; k < sizeof(u32); k++) {
+ d8[k] = *((u8 *)src);
+ src = stm32_cryp_next_in(cryp, src, 1);
+
+ cryp->total_in -= min_t(size_t, 1, cryp->total_in);
+ if ((cryp->total_in_save - cryp->total_in) == alen)
+ break;
+ }
+
+ stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8);
+
+ if ((cryp->total_in_save - cryp->total_in) == alen) {
+ /* Write padding if needed */
+ for (j = i + 1; j < AES_BLOCK_32; j++)
+ stm32_cryp_write(cryp, CRYP_DIN, 0);
+
+ /* Wait for completion */
+ err = stm32_cryp_wait_busy(cryp);
+ if (err) {
+ dev_err(cryp->dev, "Timeout (ccm header)\n");
+ return stm32_cryp_finish_req(cryp, err);
+ }
+
+ if (stm32_cryp_get_input_text_len(cryp)) {
+ /* Phase 3 : payload */
+ cfg = stm32_cryp_read(cryp, CRYP_CR);
+ cfg &= ~CR_CRYPEN;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ cfg &= ~CR_PH_MASK;
+ cfg |= CR_PH_PAYLOAD;
+ cfg |= CR_CRYPEN;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+ } else {
+ /* Phase 4 : tag */
+ stm32_cryp_write(cryp, CRYP_IMSCR, 0);
+ stm32_cryp_finish_req(cryp, 0);
+ }
+
+ break;
+ }
+ }
+}
+
static irqreturn_t stm32_cryp_irq_thread(int irq, void *arg)
{
struct stm32_cryp *cryp = arg;
+ u32 ph;
if (cryp->irq_status & MISR_OUT)
/* Output FIFO IRQ: read data */
if (unlikely(stm32_cryp_irq_read_data(cryp))) {
/* All bytes processed, finish */
stm32_cryp_write(cryp, CRYP_IMSCR, 0);
- stm32_cryp_finish_req(cryp);
+ stm32_cryp_finish_req(cryp, 0);
return IRQ_HANDLED;
}
if (cryp->irq_status & MISR_IN) {
- /* Input FIFO IRQ: write data */
- stm32_cryp_irq_write_data(cryp);
+ if (is_gcm(cryp)) {
+ ph = stm32_cryp_read(cryp, CRYP_CR) & CR_PH_MASK;
+ if (unlikely(ph == CR_PH_HEADER))
+ /* Write Header */
+ stm32_cryp_irq_write_gcm_header(cryp);
+ else
+ /* Input FIFO IRQ: write data */
+ stm32_cryp_irq_write_data(cryp);
+ cryp->gcm_ctr++;
+ } else if (is_ccm(cryp)) {
+ ph = stm32_cryp_read(cryp, CRYP_CR) & CR_PH_MASK;
+ if (unlikely(ph == CR_PH_HEADER))
+ /* Write Header */
+ stm32_cryp_irq_write_ccm_header(cryp);
+ else
+ /* Input FIFO IRQ: write data */
+ stm32_cryp_irq_write_data(cryp);
+ } else {
+ /* Input FIFO IRQ: write data */
+ stm32_cryp_irq_write_data(cryp);
+ }
}
return IRQ_HANDLED;
@@ -1028,8 +1858,62 @@ static struct crypto_alg crypto_algs[] = {
},
};
+static struct aead_alg aead_algs[] = {
+{
+ .setkey = stm32_cryp_aes_aead_setkey,
+ .setauthsize = stm32_cryp_aes_gcm_setauthsize,
+ .encrypt = stm32_cryp_aes_gcm_encrypt,
+ .decrypt = stm32_cryp_aes_gcm_decrypt,
+ .init = stm32_cryp_aes_aead_init,
+ .ivsize = 12,
+ .maxauthsize = AES_BLOCK_SIZE,
+
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "stm32-gcm-aes",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_module = THIS_MODULE,
+ },
+},
+{
+ .setkey = stm32_cryp_aes_aead_setkey,
+ .setauthsize = stm32_cryp_aes_ccm_setauthsize,
+ .encrypt = stm32_cryp_aes_ccm_encrypt,
+ .decrypt = stm32_cryp_aes_ccm_decrypt,
+ .init = stm32_cryp_aes_aead_init,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+
+ .base = {
+ .cra_name = "ccm(aes)",
+ .cra_driver_name = "stm32-ccm-aes",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_module = THIS_MODULE,
+ },
+},
+};
+
+static const struct stm32_cryp_caps f7_data = {
+ .swap_final = true,
+ .padding_wa = true,
+};
+
+static const struct stm32_cryp_caps mp1_data = {
+ .swap_final = false,
+ .padding_wa = false,
+};
+
static const struct of_device_id stm32_dt_ids[] = {
- { .compatible = "st,stm32f756-cryp", },
+ { .compatible = "st,stm32f756-cryp", .data = &f7_data},
+ { .compatible = "st,stm32mp1-cryp", .data = &mp1_data},
{},
};
MODULE_DEVICE_TABLE(of, stm32_dt_ids);
@@ -1046,6 +1930,10 @@ static int stm32_cryp_probe(struct platform_device *pdev)
if (!cryp)
return -ENOMEM;
+ cryp->caps = of_device_get_match_data(dev);
+ if (!cryp->caps)
+ return -ENODEV;
+
cryp->dev = dev;
mutex_init(&cryp->lock);
@@ -1102,9 +1990,6 @@ static int stm32_cryp_probe(struct platform_device *pdev)
goto err_engine1;
}
- cryp->engine->prepare_cipher_request = stm32_cryp_prepare_cipher_req;
- cryp->engine->cipher_one_request = stm32_cryp_cipher_one_req;
-
ret = crypto_engine_start(cryp->engine);
if (ret) {
dev_err(dev, "Could not start crypto engine\n");
@@ -1117,10 +2002,16 @@ static int stm32_cryp_probe(struct platform_device *pdev)
goto err_algs;
}
+ ret = crypto_register_aeads(aead_algs, ARRAY_SIZE(aead_algs));
+ if (ret)
+ goto err_aead_algs;
+
dev_info(dev, "Initialized\n");
return 0;
+err_aead_algs:
+ crypto_unregister_algs(crypto_algs, ARRAY_SIZE(crypto_algs));
err_algs:
err_engine2:
crypto_engine_exit(cryp->engine);
@@ -1141,6 +2032,7 @@ static int stm32_cryp_remove(struct platform_device *pdev)
if (!cryp)
return -ENODEV;
+ crypto_unregister_aeads(aead_algs, ARRAY_SIZE(aead_algs));
crypto_unregister_algs(crypto_algs, ARRAY_SIZE(crypto_algs));
crypto_engine_exit(cryp->engine);
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index 4ca4a264a833..981e45692695 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -122,6 +122,7 @@ enum stm32_hash_data_format {
#define HASH_DMA_THRESHOLD 50
struct stm32_hash_ctx {
+ struct crypto_engine_ctx enginectx;
struct stm32_hash_dev *hdev;
unsigned long flags;
@@ -626,7 +627,7 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
writesl(hdev->io_base + HASH_DIN, buffer,
DIV_ROUND_UP(ncp, sizeof(u32)));
}
- stm32_hash_set_nblw(hdev, DIV_ROUND_UP(ncp, sizeof(u32)));
+ stm32_hash_set_nblw(hdev, ncp);
reg = stm32_hash_read(hdev, HASH_STR);
reg |= HASH_STR_DCAL;
stm32_hash_write(hdev, HASH_STR, reg);
@@ -743,13 +744,15 @@ static int stm32_hash_final_req(struct stm32_hash_dev *hdev)
struct ahash_request *req = hdev->req;
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
int err;
+ int buflen = rctx->bufcnt;
+
+ rctx->bufcnt = 0;
if (!(rctx->flags & HASH_FLAGS_CPU))
err = stm32_hash_dma_send(hdev);
else
- err = stm32_hash_xmit_cpu(hdev, rctx->buffer, rctx->bufcnt, 1);
+ err = stm32_hash_xmit_cpu(hdev, rctx->buffer, buflen, 1);
- rctx->bufcnt = 0;
return err;
}
@@ -828,15 +831,19 @@ static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
return 0;
}
+static int stm32_hash_one_request(struct crypto_engine *engine, void *areq);
+static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq);
+
static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev,
struct ahash_request *req)
{
return crypto_transfer_hash_request_to_engine(hdev->engine, req);
}
-static int stm32_hash_prepare_req(struct crypto_engine *engine,
- struct ahash_request *req)
+static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq)
{
+ struct ahash_request *req = container_of(areq, struct ahash_request,
+ base);
struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
struct stm32_hash_request_ctx *rctx;
@@ -854,9 +861,10 @@ static int stm32_hash_prepare_req(struct crypto_engine *engine,
return stm32_hash_hw_init(hdev, rctx);
}
-static int stm32_hash_one_request(struct crypto_engine *engine,
- struct ahash_request *req)
+static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
{
+ struct ahash_request *req = container_of(areq, struct ahash_request,
+ base);
struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
struct stm32_hash_request_ctx *rctx;
@@ -1033,6 +1041,9 @@ static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm,
if (algs_hmac_name)
ctx->flags |= HASH_FLAGS_HMAC;
+ ctx->enginectx.op.do_one_request = stm32_hash_one_request;
+ ctx->enginectx.op.prepare_request = stm32_hash_prepare_req;
+ ctx->enginectx.op.unprepare_request = NULL;
return 0;
}
@@ -1096,6 +1107,8 @@ static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id)
reg &= ~HASH_SR_OUTPUT_READY;
stm32_hash_write(hdev, HASH_SR, reg);
hdev->flags |= HASH_FLAGS_OUTPUT_READY;
+ /* Disable IT*/
+ stm32_hash_write(hdev, HASH_IMR, 0);
return IRQ_WAKE_THREAD;
}
@@ -1404,18 +1417,19 @@ MODULE_DEVICE_TABLE(of, stm32_hash_of_match);
static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev,
struct device *dev)
{
- int err;
-
hdev->pdata = of_device_get_match_data(dev);
if (!hdev->pdata) {
dev_err(dev, "no compatible OF match\n");
return -EINVAL;
}
- err = of_property_read_u32(dev->of_node, "dma-maxburst",
- &hdev->dma_maxburst);
+ if (of_property_read_u32(dev->of_node, "dma-maxburst",
+ &hdev->dma_maxburst)) {
+ dev_info(dev, "dma-maxburst not specified, using 0\n");
+ hdev->dma_maxburst = 0;
+ }
- return err;
+ return 0;
}
static int stm32_hash_probe(struct platform_device *pdev)
@@ -1493,9 +1507,6 @@ static int stm32_hash_probe(struct platform_device *pdev)
goto err_engine;
}
- hdev->engine->prepare_hash_request = stm32_hash_prepare_req;
- hdev->engine->hash_one_request = stm32_hash_one_request;
-
ret = crypto_engine_start(hdev->engine);
if (ret)
goto err_engine_start;
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
index 1547cbe13dc2..a81d89b3b7d8 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
@@ -451,6 +451,7 @@ static struct platform_driver sun4i_ss_driver = {
module_platform_driver(sun4i_ss_driver);
+MODULE_ALIAS("platform:sun4i-ss");
MODULE_DESCRIPTION("Allwinner Security System cryptographic accelerator");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Corentin LABBE <clabbe.montjoie@gmail.com>");
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 6882fa2f8bad..7cebf0a6ffbc 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -104,16 +104,34 @@ static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
/*
* map virtual single (contiguous) pointer to h/w descriptor pointer
*/
+static void __map_single_talitos_ptr(struct device *dev,
+ struct talitos_ptr *ptr,
+ unsigned int len, void *data,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ dma_addr_t dma_addr = dma_map_single_attrs(dev, data, len, dir, attrs);
+ struct talitos_private *priv = dev_get_drvdata(dev);
+ bool is_sec1 = has_ftr_sec1(priv);
+
+ to_talitos_ptr(ptr, dma_addr, len, is_sec1);
+}
+
static void map_single_talitos_ptr(struct device *dev,
struct talitos_ptr *ptr,
unsigned int len, void *data,
enum dma_data_direction dir)
{
- dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
- struct talitos_private *priv = dev_get_drvdata(dev);
- bool is_sec1 = has_ftr_sec1(priv);
+ __map_single_talitos_ptr(dev, ptr, len, data, dir, 0);
+}
- to_talitos_ptr(ptr, dma_addr, len, is_sec1);
+static void map_single_talitos_ptr_nosync(struct device *dev,
+ struct talitos_ptr *ptr,
+ unsigned int len, void *data,
+ enum dma_data_direction dir)
+{
+ __map_single_talitos_ptr(dev, ptr, len, data, dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
}
/*
@@ -832,8 +850,6 @@ struct talitos_ctx {
unsigned int keylen;
unsigned int enckeylen;
unsigned int authkeylen;
- dma_addr_t dma_buf;
- dma_addr_t dma_hw_context;
};
#define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
@@ -888,10 +904,12 @@ static int aead_setkey(struct crypto_aead *authenc,
ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
DMA_TO_DEVICE);
+ memzero_explicit(&keys, sizeof(keys));
return 0;
badkey:
crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ memzero_explicit(&keys, sizeof(keys));
return -EINVAL;
}
@@ -1130,10 +1148,10 @@ next:
return count;
}
-static int talitos_sg_map(struct device *dev, struct scatterlist *src,
- unsigned int len, struct talitos_edesc *edesc,
- struct talitos_ptr *ptr,
- int sg_count, unsigned int offset, int tbl_off)
+static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
+ unsigned int len, struct talitos_edesc *edesc,
+ struct talitos_ptr *ptr, int sg_count,
+ unsigned int offset, int tbl_off, int elen)
{
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
@@ -1142,6 +1160,7 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src,
to_talitos_ptr(ptr, 0, 0, is_sec1);
return 1;
}
+ to_talitos_ptr_ext_set(ptr, elen, is_sec1);
if (sg_count == 1) {
to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
return sg_count;
@@ -1150,7 +1169,7 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src,
to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
return sg_count;
}
- sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len,
+ sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len + elen,
&edesc->link_tbl[tbl_off]);
if (sg_count == 1) {
/* Only one segment now, so no link tbl needed*/
@@ -1164,6 +1183,15 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src,
return sg_count;
}
+static int talitos_sg_map(struct device *dev, struct scatterlist *src,
+ unsigned int len, struct talitos_edesc *edesc,
+ struct talitos_ptr *ptr, int sg_count,
+ unsigned int offset, int tbl_off)
+{
+ return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
+ tbl_off, 0);
+}
+
/*
* fill in and submit ipsec_esp descriptor
*/
@@ -1181,7 +1209,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
unsigned int ivsize = crypto_aead_ivsize(aead);
int tbl_off = 0;
int sg_count, ret;
- int sg_link_tbl_len;
+ int elen = 0;
bool sync_needed = false;
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
@@ -1223,17 +1251,11 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
* extent is bytes of HMAC postpended to ciphertext,
* typically 12 for ipsec
*/
- sg_link_tbl_len = cryptlen;
-
- if (is_ipsec_esp) {
- to_talitos_ptr_ext_set(&desc->ptr[4], authsize, is_sec1);
+ if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
+ elen = authsize;
- if (desc->hdr & DESC_HDR_MODE1_MDEU_CICV)
- sg_link_tbl_len += authsize;
- }
-
- ret = talitos_sg_map(dev, areq->src, sg_link_tbl_len, edesc,
- &desc->ptr[4], sg_count, areq->assoclen, tbl_off);
+ ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
+ sg_count, areq->assoclen, tbl_off, elen);
if (ret > 1) {
tbl_off += ret;
@@ -1404,7 +1426,6 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
edesc = kmalloc(alloc_len, GFP_DMA | flags);
if (!edesc) {
- dev_err(dev, "could not allocate edescriptor\n");
err = ERR_PTR(-ENOMEM);
goto error_sg;
}
@@ -1690,9 +1711,30 @@ static void common_nonsnoop_hash_unmap(struct device *dev,
struct ahash_request *areq)
{
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ struct talitos_private *priv = dev_get_drvdata(dev);
+ bool is_sec1 = has_ftr_sec1(priv);
+ struct talitos_desc *desc = &edesc->desc;
+ struct talitos_desc *desc2 = desc + 1;
+
+ unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
+ if (desc->next_desc &&
+ desc->ptr[5].ptr != desc2->ptr[5].ptr)
+ unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
+ /* When using hashctx-in, must unmap it. */
+ if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
+ unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
+ DMA_TO_DEVICE);
+ else if (desc->next_desc)
+ unmap_single_talitos_ptr(dev, &desc2->ptr[1],
+ DMA_TO_DEVICE);
+
+ if (is_sec1 && req_ctx->nbuf)
+ unmap_single_talitos_ptr(dev, &desc->ptr[3],
+ DMA_TO_DEVICE);
+
if (edesc->dma_len)
dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
DMA_BIDIRECTIONAL);
@@ -1766,8 +1808,10 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
/* hash context in */
if (!req_ctx->first || req_ctx->swinit) {
- to_talitos_ptr(&desc->ptr[1], ctx->dma_hw_context,
- req_ctx->hw_context_size, is_sec1);
+ map_single_talitos_ptr_nosync(dev, &desc->ptr[1],
+ req_ctx->hw_context_size,
+ req_ctx->hw_context,
+ DMA_TO_DEVICE);
req_ctx->swinit = 0;
}
/* Indicate next op is not the first. */
@@ -1793,10 +1837,9 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
* data in
*/
if (is_sec1 && req_ctx->nbuf) {
- dma_addr_t dma_buf = ctx->dma_buf + req_ctx->buf_idx *
- HASH_MAX_BLOCK_SIZE;
-
- to_talitos_ptr(&desc->ptr[3], dma_buf, req_ctx->nbuf, is_sec1);
+ map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf,
+ req_ctx->buf[req_ctx->buf_idx],
+ DMA_TO_DEVICE);
} else {
sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
&desc->ptr[3], sg_count, offset, 0);
@@ -1812,8 +1855,10 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
crypto_ahash_digestsize(tfm),
areq->result, DMA_FROM_DEVICE);
else
- to_talitos_ptr(&desc->ptr[5], ctx->dma_hw_context,
- req_ctx->hw_context_size, is_sec1);
+ map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
+ req_ctx->hw_context_size,
+ req_ctx->hw_context,
+ DMA_FROM_DEVICE);
/* last DWORD empty */
@@ -1832,9 +1877,14 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
- to_talitos_ptr(&desc2->ptr[1], ctx->dma_hw_context,
- req_ctx->hw_context_size, is_sec1);
-
+ if (desc->ptr[1].ptr)
+ copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1],
+ is_sec1);
+ else
+ map_single_talitos_ptr_nosync(dev, &desc2->ptr[1],
+ req_ctx->hw_context_size,
+ req_ctx->hw_context,
+ DMA_TO_DEVICE);
copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
&desc2->ptr[3], sg_count, offset, 0);
@@ -1842,8 +1892,10 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
sync_needed = true;
copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
if (req_ctx->last)
- to_talitos_ptr(&desc->ptr[5], ctx->dma_hw_context,
- req_ctx->hw_context_size, is_sec1);
+ map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
+ req_ctx->hw_context_size,
+ req_ctx->hw_context,
+ DMA_FROM_DEVICE);
next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
DMA_BIDIRECTIONAL);
@@ -1885,8 +1937,7 @@ static int ahash_init(struct ahash_request *areq)
struct device *dev = ctx->dev;
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
unsigned int size;
- struct talitos_private *priv = dev_get_drvdata(dev);
- bool is_sec1 = has_ftr_sec1(priv);
+ dma_addr_t dma;
/* Initialize the context */
req_ctx->buf_idx = 0;
@@ -1898,18 +1949,10 @@ static int ahash_init(struct ahash_request *areq)
: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
req_ctx->hw_context_size = size;
- if (ctx->dma_hw_context)
- dma_unmap_single(dev, ctx->dma_hw_context, size,
- DMA_BIDIRECTIONAL);
- ctx->dma_hw_context = dma_map_single(dev, req_ctx->hw_context, size,
- DMA_BIDIRECTIONAL);
- if (ctx->dma_buf)
- dma_unmap_single(dev, ctx->dma_buf, sizeof(req_ctx->buf),
- DMA_TO_DEVICE);
- if (is_sec1)
- ctx->dma_buf = dma_map_single(dev, req_ctx->buf,
- sizeof(req_ctx->buf),
- DMA_TO_DEVICE);
+ dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
+ DMA_TO_DEVICE);
+ dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
+
return 0;
}
@@ -1920,12 +1963,6 @@ static int ahash_init(struct ahash_request *areq)
static int ahash_init_sha224_swinit(struct ahash_request *areq)
{
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
- struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
- struct device *dev = ctx->dev;
-
- ahash_init(areq);
- req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
req_ctx->hw_context[0] = SHA224_H0;
req_ctx->hw_context[1] = SHA224_H1;
@@ -1940,8 +1977,8 @@ static int ahash_init_sha224_swinit(struct ahash_request *areq)
req_ctx->hw_context[8] = 0;
req_ctx->hw_context[9] = 0;
- dma_sync_single_for_device(dev, ctx->dma_hw_context,
- req_ctx->hw_context_size, DMA_TO_DEVICE);
+ ahash_init(areq);
+ req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
return 0;
}
@@ -2046,13 +2083,6 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
/* request SEC to INIT hash. */
if (req_ctx->first && !req_ctx->swinit)
edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
- if (is_sec1) {
- dma_addr_t dma_buf = ctx->dma_buf + req_ctx->buf_idx *
- HASH_MAX_BLOCK_SIZE;
-
- dma_sync_single_for_device(dev, dma_buf,
- req_ctx->nbuf, DMA_TO_DEVICE);
- }
/* When the tfm context has a keylen, it's an HMAC.
* A first or last (ie. not middle) descriptor must request HMAC.
@@ -2106,12 +2136,15 @@ static int ahash_export(struct ahash_request *areq, void *out)
{
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
struct talitos_export_state *export = out;
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
- struct talitos_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
struct device *dev = ctx->dev;
+ dma_addr_t dma;
+
+ dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
+ DMA_FROM_DEVICE);
+ dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_FROM_DEVICE);
- dma_sync_single_for_cpu(dev, ctx->dma_hw_context,
- req_ctx->hw_context_size, DMA_FROM_DEVICE);
memcpy(export->hw_context, req_ctx->hw_context,
req_ctx->hw_context_size);
memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
@@ -2128,39 +2161,29 @@ static int ahash_import(struct ahash_request *areq, const void *in)
{
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
- const struct talitos_export_state *export = in;
- unsigned int size;
struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
struct device *dev = ctx->dev;
- struct talitos_private *priv = dev_get_drvdata(dev);
- bool is_sec1 = has_ftr_sec1(priv);
+ const struct talitos_export_state *export = in;
+ unsigned int size;
+ dma_addr_t dma;
memset(req_ctx, 0, sizeof(*req_ctx));
size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
req_ctx->hw_context_size = size;
- if (ctx->dma_hw_context)
- dma_unmap_single(dev, ctx->dma_hw_context, size,
- DMA_BIDIRECTIONAL);
-
memcpy(req_ctx->hw_context, export->hw_context, size);
- ctx->dma_hw_context = dma_map_single(dev, req_ctx->hw_context, size,
- DMA_BIDIRECTIONAL);
- if (ctx->dma_buf)
- dma_unmap_single(dev, ctx->dma_buf, sizeof(req_ctx->buf),
- DMA_TO_DEVICE);
memcpy(req_ctx->buf[0], export->buf, export->nbuf);
- if (is_sec1)
- ctx->dma_buf = dma_map_single(dev, req_ctx->buf,
- sizeof(req_ctx->buf),
- DMA_TO_DEVICE);
req_ctx->swinit = export->swinit;
req_ctx->first = export->first;
req_ctx->last = export->last;
req_ctx->to_hash_later = export->to_hash_later;
req_ctx->nbuf = export->nbuf;
+ dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
+ DMA_TO_DEVICE);
+ dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
+
return 0;
}
@@ -3064,27 +3087,6 @@ static void talitos_cra_exit(struct crypto_tfm *tfm)
dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
}
-static void talitos_cra_exit_ahash(struct crypto_tfm *tfm)
-{
- struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
- struct device *dev = ctx->dev;
- unsigned int size;
-
- talitos_cra_exit(tfm);
-
- size = (crypto_ahash_digestsize(__crypto_ahash_cast(tfm)) <=
- SHA256_DIGEST_SIZE)
- ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
- : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
-
- if (ctx->dma_hw_context)
- dma_unmap_single(dev, ctx->dma_hw_context, size,
- DMA_BIDIRECTIONAL);
- if (ctx->dma_buf)
- dma_unmap_single(dev, ctx->dma_buf, HASH_MAX_BLOCK_SIZE * 2,
- DMA_TO_DEVICE);
-}
-
/*
* given the alg's descriptor header template, determine whether descriptor
* type and primary/secondary execution units required match the hw
@@ -3183,7 +3185,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
case CRYPTO_ALG_TYPE_AHASH:
alg = &t_alg->algt.alg.hash.halg.base;
alg->cra_init = talitos_cra_init_ahash;
- alg->cra_exit = talitos_cra_exit_ahash;
+ alg->cra_exit = talitos_cra_exit;
alg->cra_type = &crypto_ahash_type;
t_alg->algt.alg.hash.init = ahash_init;
t_alg->algt.alg.hash.update = ahash_update;
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 765f53e548ab..cb31b59c9d53 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -1404,9 +1404,8 @@ static void cryp_algs_unregister_all(void)
static int ux500_cryp_probe(struct platform_device *pdev)
{
int ret;
- int cryp_error = 0;
- struct resource *res = NULL;
- struct resource *res_irq = NULL;
+ struct resource *res;
+ struct resource *res_irq;
struct cryp_device_data *device_data;
struct cryp_protection_config prot = {
.privilege_access = CRYP_STATE_ENABLE
@@ -1416,7 +1415,6 @@ static int ux500_cryp_probe(struct platform_device *pdev)
dev_dbg(dev, "[%s]", __func__);
device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_ATOMIC);
if (!device_data) {
- dev_err(dev, "[%s]: kzalloc() failed!", __func__);
ret = -ENOMEM;
goto out;
}
@@ -1479,15 +1477,13 @@ static int ux500_cryp_probe(struct platform_device *pdev)
goto out_clk_unprepare;
}
- cryp_error = cryp_check(device_data);
- if (cryp_error != 0) {
- dev_err(dev, "[%s]: cryp_init() failed!", __func__);
+ if (cryp_check(device_data)) {
+ dev_err(dev, "[%s]: cryp_check() failed!", __func__);
ret = -EINVAL;
goto out_power;
}
- cryp_error = cryp_configure_protection(device_data, &prot);
- if (cryp_error != 0) {
+ if (cryp_configure_protection(device_data, &prot)) {
dev_err(dev, "[%s]: cryp_configure_protection() failed!",
__func__);
ret = -EINVAL;
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index 9acccad26928..2d0a677bcc76 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -1403,6 +1403,16 @@ out:
return ret1 ? ret1 : ret2;
}
+static int ahash_noimport(struct ahash_request *req, const void *in)
+{
+ return -ENOSYS;
+}
+
+static int ahash_noexport(struct ahash_request *req, void *out)
+{
+ return -ENOSYS;
+}
+
static int hmac_sha1_init(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -1507,6 +1517,8 @@ static struct hash_algo_template hash_algs[] = {
.update = ahash_update,
.final = ahash_final,
.digest = ahash_sha1_digest,
+ .export = ahash_noexport,
+ .import = ahash_noimport,
.halg.digestsize = SHA1_DIGEST_SIZE,
.halg.statesize = sizeof(struct hash_ctx),
.halg.base = {
@@ -1529,6 +1541,8 @@ static struct hash_algo_template hash_algs[] = {
.update = ahash_update,
.final = ahash_final,
.digest = ahash_sha256_digest,
+ .export = ahash_noexport,
+ .import = ahash_noimport,
.halg.digestsize = SHA256_DIGEST_SIZE,
.halg.statesize = sizeof(struct hash_ctx),
.halg.base = {
@@ -1553,6 +1567,8 @@ static struct hash_algo_template hash_algs[] = {
.final = ahash_final,
.digest = hmac_sha1_digest,
.setkey = hmac_sha1_setkey,
+ .export = ahash_noexport,
+ .import = ahash_noimport,
.halg.digestsize = SHA1_DIGEST_SIZE,
.halg.statesize = sizeof(struct hash_ctx),
.halg.base = {
@@ -1577,6 +1593,8 @@ static struct hash_algo_template hash_algs[] = {
.final = ahash_final,
.digest = hmac_sha256_digest,
.setkey = hmac_sha256_setkey,
+ .export = ahash_noexport,
+ .import = ahash_noimport,
.halg.digestsize = SHA256_DIGEST_SIZE,
.halg.statesize = sizeof(struct hash_ctx),
.halg.base = {
diff --git a/drivers/crypto/virtio/Kconfig b/drivers/crypto/virtio/Kconfig
index 5db07495ddc5..a4324b1383a4 100644
--- a/drivers/crypto/virtio/Kconfig
+++ b/drivers/crypto/virtio/Kconfig
@@ -2,7 +2,6 @@ config CRYPTO_DEV_VIRTIO
tristate "VirtIO crypto driver"
depends on VIRTIO
select CRYPTO_AEAD
- select CRYPTO_AUTHENC
select CRYPTO_BLKCIPHER
select CRYPTO_ENGINE
default m
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
index abe8c15450df..ba190cfa7aa1 100644
--- a/drivers/crypto/virtio/virtio_crypto_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_algs.c
@@ -29,6 +29,7 @@
struct virtio_crypto_ablkcipher_ctx {
+ struct crypto_engine_ctx enginectx;
struct virtio_crypto *vcrypto;
struct crypto_tfm *tfm;
@@ -491,7 +492,7 @@ static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
vc_sym_req->ablkcipher_req = req;
vc_sym_req->encrypt = true;
- return crypto_transfer_cipher_request_to_engine(data_vq->engine, req);
+ return crypto_transfer_ablkcipher_request_to_engine(data_vq->engine, req);
}
static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
@@ -511,7 +512,7 @@ static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
vc_sym_req->ablkcipher_req = req;
vc_sym_req->encrypt = false;
- return crypto_transfer_cipher_request_to_engine(data_vq->engine, req);
+ return crypto_transfer_ablkcipher_request_to_engine(data_vq->engine, req);
}
static int virtio_crypto_ablkcipher_init(struct crypto_tfm *tfm)
@@ -521,6 +522,9 @@ static int virtio_crypto_ablkcipher_init(struct crypto_tfm *tfm)
tfm->crt_ablkcipher.reqsize = sizeof(struct virtio_crypto_sym_request);
ctx->tfm = tfm;
+ ctx->enginectx.op.do_one_request = virtio_crypto_ablkcipher_crypt_req;
+ ctx->enginectx.op.prepare_request = NULL;
+ ctx->enginectx.op.unprepare_request = NULL;
return 0;
}
@@ -538,9 +542,9 @@ static void virtio_crypto_ablkcipher_exit(struct crypto_tfm *tfm)
}
int virtio_crypto_ablkcipher_crypt_req(
- struct crypto_engine *engine,
- struct ablkcipher_request *req)
+ struct crypto_engine *engine, void *vreq)
{
+ struct ablkcipher_request *req = container_of(vreq, struct ablkcipher_request, base);
struct virtio_crypto_sym_request *vc_sym_req =
ablkcipher_request_ctx(req);
struct virtio_crypto_request *vc_req = &vc_sym_req->base;
@@ -561,8 +565,8 @@ static void virtio_crypto_ablkcipher_finalize_req(
struct ablkcipher_request *req,
int err)
{
- crypto_finalize_cipher_request(vc_sym_req->base.dataq->engine,
- req, err);
+ crypto_finalize_ablkcipher_request(vc_sym_req->base.dataq->engine,
+ req, err);
kzfree(vc_sym_req->iv);
virtcrypto_clear_request(&vc_sym_req->base);
}
diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
index e976539a05d9..66501a5a2b7b 100644
--- a/drivers/crypto/virtio/virtio_crypto_common.h
+++ b/drivers/crypto/virtio/virtio_crypto_common.h
@@ -24,7 +24,6 @@
#include <linux/spinlock.h>
#include <crypto/aead.h>
#include <crypto/aes.h>
-#include <crypto/authenc.h>
#include <crypto/engine.h>
@@ -107,8 +106,7 @@ struct virtio_crypto *virtcrypto_get_dev_node(int node);
int virtcrypto_dev_start(struct virtio_crypto *vcrypto);
void virtcrypto_dev_stop(struct virtio_crypto *vcrypto);
int virtio_crypto_ablkcipher_crypt_req(
- struct crypto_engine *engine,
- struct ablkcipher_request *req);
+ struct crypto_engine *engine, void *vreq);
void
virtcrypto_clear_request(struct virtio_crypto_request *vc_req);
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
index ff1410a32c2b..83326986c113 100644
--- a/drivers/crypto/virtio/virtio_crypto_core.c
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -111,9 +111,6 @@ static int virtcrypto_find_vqs(struct virtio_crypto *vi)
ret = -ENOMEM;
goto err_engine;
}
-
- vi->data_vq[i].engine->cipher_one_request =
- virtio_crypto_ablkcipher_crypt_req;
}
kfree(names);
diff --git a/drivers/staging/ccree/Kconfig b/drivers/staging/ccree/Kconfig
index c94dfe8adb63..168191fa0357 100644
--- a/drivers/staging/ccree/Kconfig
+++ b/drivers/staging/ccree/Kconfig
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
-config CRYPTO_DEV_CCREE
+config CRYPTO_DEV_CCREE_OLD
tristate "Support for ARM TrustZone CryptoCell C7XX family of Crypto accelerators"
- depends on CRYPTO && CRYPTO_HW && OF && HAS_DMA
+ depends on CRYPTO && CRYPTO_HW && OF && HAS_DMA && BROKEN
default n
select CRYPTO_HASH
select CRYPTO_BLKCIPHER
diff --git a/drivers/staging/ccree/Makefile b/drivers/staging/ccree/Makefile
index bdc27970f95f..553db5c45354 100644
--- a/drivers/staging/ccree/Makefile
+++ b/drivers/staging/ccree/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o
+obj-$(CONFIG_CRYPTO_DEV_CCREE_OLD) := ccree.o
ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_hash.o cc_aead.o cc_ivgen.o cc_sram_mgr.o
ccree-$(CONFIG_CRYPTO_FIPS) += cc_fips.o
ccree-$(CONFIG_DEBUG_FS) += cc_debugfs.o