summaryrefslogtreecommitdiff
path: root/drivers/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-05-02 14:53:12 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-02 14:53:12 -0700
commit797994f81a8b2bdca2eecffa415c1e7a89a4f961 (patch)
tree1383dc469c26ad37fdf960f682d9a48c782935c5 /drivers/crypto
parentc8d8566952fda026966784a62f324c8352f77430 (diff)
parent3862de1f6c442d53bd828d39f86d07d933a70605 (diff)
downloadlinux-3.10-797994f81a8b2bdca2eecffa415c1e7a89a4f961.tar.gz
linux-3.10-797994f81a8b2bdca2eecffa415c1e7a89a4f961.tar.bz2
linux-3.10-797994f81a8b2bdca2eecffa415c1e7a89a4f961.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu: - XTS mode optimisation for twofish/cast6/camellia/aes on x86 - AVX2/x86_64 implementation for blowfish/twofish/serpent/camellia - SSSE3/AVX/AVX2 optimisations for sha256/sha512 - Added driver for SAHARA2 crypto accelerator - Fix for GMAC when used in non-IPsec secnarios - Added generic CMAC implementation (including IPsec glue) - IP update for crypto/atmel - Support for more than one device in hwrng/timeriomem - Added Broadcom BCM2835 RNG driver - Misc fixes * git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (59 commits) crypto: caam - fix job ring cleanup code crypto: camellia - add AVX2/AES-NI/x86_64 assembler implementation of camellia cipher crypto: serpent - add AVX2/x86_64 assembler implementation of serpent cipher crypto: twofish - add AVX2/x86_64 assembler implementation of twofish cipher crypto: blowfish - add AVX2/x86_64 implementation of blowfish cipher crypto: tcrypt - add async cipher speed tests for blowfish crypto: testmgr - extend camellia test-vectors for camellia-aesni/avx2 crypto: aesni_intel - fix Kconfig problem with CRYPTO_GLUE_HELPER_X86 crypto: aesni_intel - add more optimized XTS mode for x86-64 crypto: x86/camellia-aesni-avx - add more optimized XTS code crypto: cast6-avx: use new optimized XTS code crypto: x86/twofish-avx - use optimized XTS code crypto: x86 - add more optimized XTS-mode for serpent-avx xfrm: add rfc4494 AES-CMAC-96 support crypto: add CMAC support to CryptoAPI crypto: testmgr - add empty test vectors for null ciphers crypto: testmgr - add AES GMAC test vectors crypto: gcm - fix rfc4543 to handle async crypto correctly crypto: gcm - make GMAC work when dst and src are different hwrng: timeriomem - added devicetree hooks ...
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/Kconfig18
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/atmel-aes.c471
-rw-r--r--drivers/crypto/atmel-sha-regs.h7
-rw-r--r--drivers/crypto/atmel-sha.c586
-rw-r--r--drivers/crypto/atmel-tdes-regs.h2
-rw-r--r--drivers/crypto/atmel-tdes.c394
-rw-r--r--drivers/crypto/bfin_crc.c6
-rw-r--r--drivers/crypto/caam/Kconfig2
-rw-r--r--drivers/crypto/caam/caamalg.c6
-rw-r--r--drivers/crypto/caam/caamhash.c4
-rw-r--r--drivers/crypto/caam/ctrl.c3
-rw-r--r--drivers/crypto/caam/error.c10
-rw-r--r--drivers/crypto/caam/intern.h1
-rw-r--r--drivers/crypto/caam/jr.c4
-rw-r--r--drivers/crypto/caam/key_gen.c2
-rw-r--r--drivers/crypto/caam/key_gen.h2
-rw-r--r--drivers/crypto/caam/regs.h4
-rw-r--r--drivers/crypto/omap-aes.c15
-rw-r--r--drivers/crypto/omap-sham.c15
-rw-r--r--drivers/crypto/picoxcell_crypto.c4
-rw-r--r--drivers/crypto/sahara.c1070
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c6
23 files changed, 2312 insertions, 321 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 87ec4d027c2..dffb8552536 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -276,6 +276,16 @@ config CRYPTO_DEV_PICOXCELL
Saying m here will build a module named pipcoxcell_crypto.
+config CRYPTO_DEV_SAHARA
+ tristate "Support for SAHARA crypto accelerator"
+ depends on ARCH_MXC && EXPERIMENTAL && OF
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_AES
+ select CRYPTO_ECB
+ help
+ This option enables support for the SAHARA HW crypto accelerator
+ found in some Freescale i.MX chips.
+
config CRYPTO_DEV_S5P
tristate "Support for Samsung S5PV210 crypto accelerator"
depends on ARCH_S5PV210
@@ -361,15 +371,17 @@ config CRYPTO_DEV_ATMEL_TDES
will be called atmel-tdes.
config CRYPTO_DEV_ATMEL_SHA
- tristate "Support for Atmel SHA1/SHA256 hw accelerator"
+ tristate "Support for Atmel SHA hw accelerator"
depends on ARCH_AT91
select CRYPTO_SHA1
select CRYPTO_SHA256
+ select CRYPTO_SHA512
select CRYPTO_ALGAPI
help
- Some Atmel processors have SHA1/SHA256 hw accelerator.
+ Some Atmel processors have SHA1/SHA224/SHA256/SHA384/SHA512
+ hw accelerator.
Select this if you want to use the Atmel module for
- SHA1/SHA256 algorithms.
+ SHA1/SHA224/SHA256/SHA384/SHA512 algorithms.
To compile this driver as a module, choose M here: the module
will be called atmel-sha.
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 880a47b0b02..38ce13d3b79 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o
obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
+obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
obj-$(CONFIG_CRYPTO_DEV_TEGRA_AES) += tegra-aes.o
obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 6f22ba51f96..c1efd910d97 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -38,7 +38,7 @@
#include <crypto/aes.h>
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
-#include <linux/platform_data/atmel-aes.h>
+#include <linux/platform_data/crypto-atmel.h>
#include "atmel-aes-regs.h"
#define CFB8_BLOCK_SIZE 1
@@ -47,7 +47,7 @@
#define CFB64_BLOCK_SIZE 8
/* AES flags */
-#define AES_FLAGS_MODE_MASK 0x01ff
+#define AES_FLAGS_MODE_MASK 0x03ff
#define AES_FLAGS_ENCRYPT BIT(0)
#define AES_FLAGS_CBC BIT(1)
#define AES_FLAGS_CFB BIT(2)
@@ -55,21 +55,26 @@
#define AES_FLAGS_CFB16 BIT(4)
#define AES_FLAGS_CFB32 BIT(5)
#define AES_FLAGS_CFB64 BIT(6)
-#define AES_FLAGS_OFB BIT(7)
-#define AES_FLAGS_CTR BIT(8)
+#define AES_FLAGS_CFB128 BIT(7)
+#define AES_FLAGS_OFB BIT(8)
+#define AES_FLAGS_CTR BIT(9)
#define AES_FLAGS_INIT BIT(16)
#define AES_FLAGS_DMA BIT(17)
#define AES_FLAGS_BUSY BIT(18)
+#define AES_FLAGS_FAST BIT(19)
-#define AES_FLAGS_DUALBUFF BIT(24)
-
-#define ATMEL_AES_QUEUE_LENGTH 1
-#define ATMEL_AES_CACHE_SIZE 0
+#define ATMEL_AES_QUEUE_LENGTH 50
#define ATMEL_AES_DMA_THRESHOLD 16
+struct atmel_aes_caps {
+ bool has_dualbuff;
+ bool has_cfb64;
+ u32 max_burst_size;
+};
+
struct atmel_aes_dev;
struct atmel_aes_ctx {
@@ -77,6 +82,8 @@ struct atmel_aes_ctx {
int keylen;
u32 key[AES_KEYSIZE_256 / sizeof(u32)];
+
+ u16 block_size;
};
struct atmel_aes_reqctx {
@@ -112,20 +119,27 @@ struct atmel_aes_dev {
struct scatterlist *in_sg;
unsigned int nb_in_sg;
-
+ size_t in_offset;
struct scatterlist *out_sg;
unsigned int nb_out_sg;
+ size_t out_offset;
size_t bufcnt;
+ size_t buflen;
+ size_t dma_size;
- u8 buf_in[ATMEL_AES_DMA_THRESHOLD] __aligned(sizeof(u32));
- int dma_in;
+ void *buf_in;
+ int dma_in;
+ dma_addr_t dma_addr_in;
struct atmel_aes_dma dma_lch_in;
- u8 buf_out[ATMEL_AES_DMA_THRESHOLD] __aligned(sizeof(u32));
- int dma_out;
+ void *buf_out;
+ int dma_out;
+ dma_addr_t dma_addr_out;
struct atmel_aes_dma dma_lch_out;
+ struct atmel_aes_caps caps;
+
u32 hw_version;
};
@@ -165,6 +179,37 @@ static int atmel_aes_sg_length(struct ablkcipher_request *req,
return sg_nb;
}
+static int atmel_aes_sg_copy(struct scatterlist **sg, size_t *offset,
+ void *buf, size_t buflen, size_t total, int out)
+{
+ unsigned int count, off = 0;
+
+ while (buflen && total) {
+ count = min((*sg)->length - *offset, total);
+ count = min(count, buflen);
+
+ if (!count)
+ return off;
+
+ scatterwalk_map_and_copy(buf + off, *sg, *offset, count, out);
+
+ off += count;
+ buflen -= count;
+ *offset += count;
+ total -= count;
+
+ if (*offset == (*sg)->length) {
+ *sg = sg_next(*sg);
+ if (*sg)
+ *offset = 0;
+ else
+ total = 0;
+ }
+ }
+
+ return off;
+}
+
static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset)
{
return readl_relaxed(dd->io_base + offset);
@@ -190,14 +235,6 @@ static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
atmel_aes_write(dd, offset, *value);
}
-static void atmel_aes_dualbuff_test(struct atmel_aes_dev *dd)
-{
- atmel_aes_write(dd, AES_MR, AES_MR_DUALBUFF);
-
- if (atmel_aes_read(dd, AES_MR) & AES_MR_DUALBUFF)
- dd->flags |= AES_FLAGS_DUALBUFF;
-}
-
static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_ctx *ctx)
{
struct atmel_aes_dev *aes_dd = NULL;
@@ -225,7 +262,7 @@ static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
if (!(dd->flags & AES_FLAGS_INIT)) {
atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
- atmel_aes_dualbuff_test(dd);
+ atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
dd->flags |= AES_FLAGS_INIT;
dd->err = 0;
}
@@ -233,11 +270,19 @@ static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
return 0;
}
+static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev *dd)
+{
+ return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff;
+}
+
static void atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
{
atmel_aes_hw_init(dd);
- dd->hw_version = atmel_aes_read(dd, AES_HW_VERSION);
+ dd->hw_version = atmel_aes_get_version(dd);
+
+ dev_info(dd->dev,
+ "version: 0x%x\n", dd->hw_version);
clk_disable_unprepare(dd->iclk);
}
@@ -260,50 +305,77 @@ static void atmel_aes_dma_callback(void *data)
tasklet_schedule(&dd->done_task);
}
-static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd)
+static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd,
+ dma_addr_t dma_addr_in, dma_addr_t dma_addr_out, int length)
{
+ struct scatterlist sg[2];
struct dma_async_tx_descriptor *in_desc, *out_desc;
- int nb_dma_sg_in, nb_dma_sg_out;
- dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg);
- if (!dd->nb_in_sg)
- goto exit_err;
+ dd->dma_size = length;
- nb_dma_sg_in = dma_map_sg(dd->dev, dd->in_sg, dd->nb_in_sg,
- DMA_TO_DEVICE);
- if (!nb_dma_sg_in)
- goto exit_err;
+ if (!(dd->flags & AES_FLAGS_FAST)) {
+ dma_sync_single_for_device(dd->dev, dma_addr_in, length,
+ DMA_TO_DEVICE);
+ }
- in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, dd->in_sg,
- nb_dma_sg_in, DMA_MEM_TO_DEV,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (dd->flags & AES_FLAGS_CFB8) {
+ dd->dma_lch_in.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dd->dma_lch_out.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_1_BYTE;
+ } else if (dd->flags & AES_FLAGS_CFB16) {
+ dd->dma_lch_in.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_2_BYTES;
+ dd->dma_lch_out.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_2_BYTES;
+ } else {
+ dd->dma_lch_in.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_out.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ }
- if (!in_desc)
- goto unmap_in;
+ if (dd->flags & (AES_FLAGS_CFB8 | AES_FLAGS_CFB16 |
+ AES_FLAGS_CFB32 | AES_FLAGS_CFB64)) {
+ dd->dma_lch_in.dma_conf.src_maxburst = 1;
+ dd->dma_lch_in.dma_conf.dst_maxburst = 1;
+ dd->dma_lch_out.dma_conf.src_maxburst = 1;
+ dd->dma_lch_out.dma_conf.dst_maxburst = 1;
+ } else {
+ dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size;
+ dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size;
+ dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size;
+ dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size;
+ }
- /* callback not needed */
+ dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
+ dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
- dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg);
- if (!dd->nb_out_sg)
- goto unmap_in;
+ dd->flags |= AES_FLAGS_DMA;
- nb_dma_sg_out = dma_map_sg(dd->dev, dd->out_sg, dd->nb_out_sg,
- DMA_FROM_DEVICE);
- if (!nb_dma_sg_out)
- goto unmap_out;
+ sg_init_table(&sg[0], 1);
+ sg_dma_address(&sg[0]) = dma_addr_in;
+ sg_dma_len(&sg[0]) = length;
- out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, dd->out_sg,
- nb_dma_sg_out, DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ sg_init_table(&sg[1], 1);
+ sg_dma_address(&sg[1]) = dma_addr_out;
+ sg_dma_len(&sg[1]) = length;
+
+ in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0],
+ 1, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!in_desc)
+ return -EINVAL;
+ out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1],
+ 1, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!out_desc)
- goto unmap_out;
+ return -EINVAL;
out_desc->callback = atmel_aes_dma_callback;
out_desc->callback_param = dd;
- dd->total -= dd->req->nbytes;
-
dmaengine_submit(out_desc);
dma_async_issue_pending(dd->dma_lch_out.chan);
@@ -311,15 +383,6 @@ static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd)
dma_async_issue_pending(dd->dma_lch_in.chan);
return 0;
-
-unmap_out:
- dma_unmap_sg(dd->dev, dd->out_sg, dd->nb_out_sg,
- DMA_FROM_DEVICE);
-unmap_in:
- dma_unmap_sg(dd->dev, dd->in_sg, dd->nb_in_sg,
- DMA_TO_DEVICE);
-exit_err:
- return -EINVAL;
}
static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd)
@@ -352,30 +415,66 @@ static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd)
static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd)
{
- int err;
+ int err, fast = 0, in, out;
+ size_t count;
+ dma_addr_t addr_in, addr_out;
+
+ if ((!dd->in_offset) && (!dd->out_offset)) {
+ /* check for alignment */
+ in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) &&
+ IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size);
+ out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) &&
+ IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size);
+ fast = in && out;
+
+ if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg))
+ fast = 0;
+ }
+
+
+ if (fast) {
+ count = min(dd->total, sg_dma_len(dd->in_sg));
+ count = min(count, sg_dma_len(dd->out_sg));
+
+ err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
+ if (!err) {
+ dev_err(dd->dev, "dma_map_sg() error\n");
+ return -EINVAL;
+ }
+
+ err = dma_map_sg(dd->dev, dd->out_sg, 1,
+ DMA_FROM_DEVICE);
+ if (!err) {
+ dev_err(dd->dev, "dma_map_sg() error\n");
+ dma_unmap_sg(dd->dev, dd->in_sg, 1,
+ DMA_TO_DEVICE);
+ return -EINVAL;
+ }
+
+ addr_in = sg_dma_address(dd->in_sg);
+ addr_out = sg_dma_address(dd->out_sg);
+
+ dd->flags |= AES_FLAGS_FAST;
- if (dd->flags & AES_FLAGS_CFB8) {
- dd->dma_lch_in.dma_conf.dst_addr_width =
- DMA_SLAVE_BUSWIDTH_1_BYTE;
- dd->dma_lch_out.dma_conf.src_addr_width =
- DMA_SLAVE_BUSWIDTH_1_BYTE;
- } else if (dd->flags & AES_FLAGS_CFB16) {
- dd->dma_lch_in.dma_conf.dst_addr_width =
- DMA_SLAVE_BUSWIDTH_2_BYTES;
- dd->dma_lch_out.dma_conf.src_addr_width =
- DMA_SLAVE_BUSWIDTH_2_BYTES;
} else {
- dd->dma_lch_in.dma_conf.dst_addr_width =
- DMA_SLAVE_BUSWIDTH_4_BYTES;
- dd->dma_lch_out.dma_conf.src_addr_width =
- DMA_SLAVE_BUSWIDTH_4_BYTES;
+ /* use cache buffers */
+ count = atmel_aes_sg_copy(&dd->in_sg, &dd->in_offset,
+ dd->buf_in, dd->buflen, dd->total, 0);
+
+ addr_in = dd->dma_addr_in;
+ addr_out = dd->dma_addr_out;
+
+ dd->flags &= ~AES_FLAGS_FAST;
}
- dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
- dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
+ dd->total -= count;
- dd->flags |= AES_FLAGS_DMA;
- err = atmel_aes_crypt_dma(dd);
+ err = atmel_aes_crypt_dma(dd, addr_in, addr_out, count);
+
+ if (err && (dd->flags & AES_FLAGS_FAST)) {
+ dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
+ dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
+ }
return err;
}
@@ -410,6 +509,8 @@ static int atmel_aes_write_ctrl(struct atmel_aes_dev *dd)
valmr |= AES_MR_CFBS_32b;
else if (dd->flags & AES_FLAGS_CFB64)
valmr |= AES_MR_CFBS_64b;
+ else if (dd->flags & AES_FLAGS_CFB128)
+ valmr |= AES_MR_CFBS_128b;
} else if (dd->flags & AES_FLAGS_OFB) {
valmr |= AES_MR_OPMOD_OFB;
} else if (dd->flags & AES_FLAGS_CTR) {
@@ -423,7 +524,7 @@ static int atmel_aes_write_ctrl(struct atmel_aes_dev *dd)
if (dd->total > ATMEL_AES_DMA_THRESHOLD) {
valmr |= AES_MR_SMOD_IDATAR0;
- if (dd->flags & AES_FLAGS_DUALBUFF)
+ if (dd->caps.has_dualbuff)
valmr |= AES_MR_DUALBUFF;
} else {
valmr |= AES_MR_SMOD_AUTO;
@@ -477,7 +578,9 @@ static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
/* assign new request to device */
dd->req = req;
dd->total = req->nbytes;
+ dd->in_offset = 0;
dd->in_sg = req->src;
+ dd->out_offset = 0;
dd->out_sg = req->dst;
rctx = ablkcipher_request_ctx(req);
@@ -506,18 +609,86 @@ static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
static int atmel_aes_crypt_dma_stop(struct atmel_aes_dev *dd)
{
int err = -EINVAL;
+ size_t count;
if (dd->flags & AES_FLAGS_DMA) {
- dma_unmap_sg(dd->dev, dd->out_sg,
- dd->nb_out_sg, DMA_FROM_DEVICE);
- dma_unmap_sg(dd->dev, dd->in_sg,
- dd->nb_in_sg, DMA_TO_DEVICE);
err = 0;
+ if (dd->flags & AES_FLAGS_FAST) {
+ dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
+ dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
+ } else {
+ dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
+ dd->dma_size, DMA_FROM_DEVICE);
+
+ /* copy data */
+ count = atmel_aes_sg_copy(&dd->out_sg, &dd->out_offset,
+ dd->buf_out, dd->buflen, dd->dma_size, 1);
+ if (count != dd->dma_size) {
+ err = -EINVAL;
+ pr_err("not all data converted: %u\n", count);
+ }
+ }
}
return err;
}
+
+static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
+{
+ int err = -ENOMEM;
+
+ dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0);
+ dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0);
+ dd->buflen = PAGE_SIZE;
+ dd->buflen &= ~(AES_BLOCK_SIZE - 1);
+
+ if (!dd->buf_in || !dd->buf_out) {
+ dev_err(dd->dev, "unable to alloc pages.\n");
+ goto err_alloc;
+ }
+
+ /* MAP here */
+ dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in,
+ dd->buflen, DMA_TO_DEVICE);
+ if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
+ dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
+ err = -EINVAL;
+ goto err_map_in;
+ }
+
+ dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out,
+ dd->buflen, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
+ dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
+ err = -EINVAL;
+ goto err_map_out;
+ }
+
+ return 0;
+
+err_map_out:
+ dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
+ DMA_TO_DEVICE);
+err_map_in:
+ free_page((unsigned long)dd->buf_out);
+ free_page((unsigned long)dd->buf_in);
+err_alloc:
+ if (err)
+ pr_err("error: %d\n", err);
+ return err;
+}
+
+static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
+{
+ dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
+ DMA_FROM_DEVICE);
+ dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
+ DMA_TO_DEVICE);
+ free_page((unsigned long)dd->buf_out);
+ free_page((unsigned long)dd->buf_in);
+}
+
static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
{
struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(
@@ -525,9 +696,30 @@ static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
struct atmel_aes_dev *dd;
- if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
- pr_err("request size is not exact amount of AES blocks\n");
- return -EINVAL;
+ if (mode & AES_FLAGS_CFB8) {
+ if (!IS_ALIGNED(req->nbytes, CFB8_BLOCK_SIZE)) {
+ pr_err("request size is not exact amount of CFB8 blocks\n");
+ return -EINVAL;
+ }
+ ctx->block_size = CFB8_BLOCK_SIZE;
+ } else if (mode & AES_FLAGS_CFB16) {
+ if (!IS_ALIGNED(req->nbytes, CFB16_BLOCK_SIZE)) {
+ pr_err("request size is not exact amount of CFB16 blocks\n");
+ return -EINVAL;
+ }
+ ctx->block_size = CFB16_BLOCK_SIZE;
+ } else if (mode & AES_FLAGS_CFB32) {
+ if (!IS_ALIGNED(req->nbytes, CFB32_BLOCK_SIZE)) {
+ pr_err("request size is not exact amount of CFB32 blocks\n");
+ return -EINVAL;
+ }
+ ctx->block_size = CFB32_BLOCK_SIZE;
+ } else {
+ if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
+ pr_err("request size is not exact amount of AES blocks\n");
+ return -EINVAL;
+ }
+ ctx->block_size = AES_BLOCK_SIZE;
}
dd = atmel_aes_find_dev(ctx);
@@ -551,14 +743,12 @@ static bool atmel_aes_filter(struct dma_chan *chan, void *slave)
}
}
-static int atmel_aes_dma_init(struct atmel_aes_dev *dd)
+static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
+ struct crypto_platform_data *pdata)
{
int err = -ENOMEM;
- struct aes_platform_data *pdata;
dma_cap_mask_t mask_in, mask_out;
- pdata = dd->dev->platform_data;
-
if (pdata && pdata->dma_slave->txdata.dma_dev &&
pdata->dma_slave->rxdata.dma_dev) {
@@ -568,28 +758,38 @@ static int atmel_aes_dma_init(struct atmel_aes_dev *dd)
dd->dma_lch_in.chan = dma_request_channel(mask_in,
atmel_aes_filter, &pdata->dma_slave->rxdata);
+
if (!dd->dma_lch_in.chan)
goto err_dma_in;
dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
AES_IDATAR(0);
- dd->dma_lch_in.dma_conf.src_maxburst = 1;
- dd->dma_lch_in.dma_conf.dst_maxburst = 1;
+ dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size;
+ dd->dma_lch_in.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size;
+ dd->dma_lch_in.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
dd->dma_lch_in.dma_conf.device_fc = false;
dma_cap_zero(mask_out);
dma_cap_set(DMA_SLAVE, mask_out);
dd->dma_lch_out.chan = dma_request_channel(mask_out,
atmel_aes_filter, &pdata->dma_slave->txdata);
+
if (!dd->dma_lch_out.chan)
goto err_dma_out;
dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
AES_ODATAR(0);
- dd->dma_lch_out.dma_conf.src_maxburst = 1;
- dd->dma_lch_out.dma_conf.dst_maxburst = 1;
+ dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size;
+ dd->dma_lch_out.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size;
+ dd->dma_lch_out.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
dd->dma_lch_out.dma_conf.device_fc = false;
return 0;
@@ -665,13 +865,13 @@ static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req)
static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req)
{
return atmel_aes_crypt(req,
- AES_FLAGS_ENCRYPT | AES_FLAGS_CFB);
+ AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB128);
}
static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req)
{
return atmel_aes_crypt(req,
- AES_FLAGS_CFB);
+ AES_FLAGS_CFB | AES_FLAGS_CFB128);
}
static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req)
@@ -753,7 +953,7 @@ static struct crypto_alg aes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0x0,
+ .cra_alignmask = 0xf,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
@@ -773,7 +973,7 @@ static struct crypto_alg aes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0x0,
+ .cra_alignmask = 0xf,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
@@ -794,7 +994,7 @@ static struct crypto_alg aes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0x0,
+ .cra_alignmask = 0xf,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
@@ -815,7 +1015,7 @@ static struct crypto_alg aes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0x0,
+ .cra_alignmask = 0xf,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
@@ -836,7 +1036,7 @@ static struct crypto_alg aes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = CFB32_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0x0,
+ .cra_alignmask = 0x3,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
@@ -857,7 +1057,7 @@ static struct crypto_alg aes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = CFB16_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0x0,
+ .cra_alignmask = 0x1,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
@@ -899,7 +1099,7 @@ static struct crypto_alg aes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0x0,
+ .cra_alignmask = 0xf,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
@@ -915,15 +1115,14 @@ static struct crypto_alg aes_algs[] = {
},
};
-static struct crypto_alg aes_cfb64_alg[] = {
-{
+static struct crypto_alg aes_cfb64_alg = {
.cra_name = "cfb64(aes)",
.cra_driver_name = "atmel-cfb64-aes",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = CFB64_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_aes_ctx),
- .cra_alignmask = 0x0,
+ .cra_alignmask = 0x7,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
@@ -936,7 +1135,6 @@ static struct crypto_alg aes_cfb64_alg[] = {
.encrypt = atmel_aes_cfb64_encrypt,
.decrypt = atmel_aes_cfb64_decrypt,
}
-},
};
static void atmel_aes_queue_task(unsigned long data)
@@ -969,7 +1167,14 @@ static void atmel_aes_done_task(unsigned long data)
err = dd->err ? : err;
if (dd->total && !err) {
- err = atmel_aes_crypt_dma_start(dd);
+ if (dd->flags & AES_FLAGS_FAST) {
+ dd->in_sg = sg_next(dd->in_sg);
+ dd->out_sg = sg_next(dd->out_sg);
+ if (!dd->in_sg || !dd->out_sg)
+ err = -EINVAL;
+ }
+ if (!err)
+ err = atmel_aes_crypt_dma_start(dd);
if (!err)
return; /* DMA started. Not fininishing. */
}
@@ -1003,8 +1208,8 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
crypto_unregister_alg(&aes_algs[i]);
- if (dd->hw_version >= 0x130)
- crypto_unregister_alg(&aes_cfb64_alg[0]);
+ if (dd->caps.has_cfb64)
+ crypto_unregister_alg(&aes_cfb64_alg);
}
static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
@@ -1017,10 +1222,8 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
goto err_aes_algs;
}
- atmel_aes_hw_version_init(dd);
-
- if (dd->hw_version >= 0x130) {
- err = crypto_register_alg(&aes_cfb64_alg[0]);
+ if (dd->caps.has_cfb64) {
+ err = crypto_register_alg(&aes_cfb64_alg);
if (err)
goto err_aes_cfb64_alg;
}
@@ -1036,10 +1239,32 @@ err_aes_algs:
return err;
}
+static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
+{
+ dd->caps.has_dualbuff = 0;
+ dd->caps.has_cfb64 = 0;
+ dd->caps.max_burst_size = 1;
+
+ /* keep only major version number */
+ switch (dd->hw_version & 0xff0) {
+ case 0x130:
+ dd->caps.has_dualbuff = 1;
+ dd->caps.has_cfb64 = 1;
+ dd->caps.max_burst_size = 4;
+ break;
+ case 0x120:
+ break;
+ default:
+ dev_warn(dd->dev,
+ "Unmanaged aes version, set minimum capabilities\n");
+ break;
+ }
+}
+
static int atmel_aes_probe(struct platform_device *pdev)
{
struct atmel_aes_dev *aes_dd;
- struct aes_platform_data *pdata;
+ struct crypto_platform_data *pdata;
struct device *dev = &pdev->dev;
struct resource *aes_res;
unsigned long aes_phys_size;
@@ -1099,7 +1324,7 @@ static int atmel_aes_probe(struct platform_device *pdev)
}
/* Initializing the clock */
- aes_dd->iclk = clk_get(&pdev->dev, NULL);
+ aes_dd->iclk = clk_get(&pdev->dev, "aes_clk");
if (IS_ERR(aes_dd->iclk)) {
dev_err(dev, "clock intialization failed.\n");
err = PTR_ERR(aes_dd->iclk);
@@ -1113,7 +1338,15 @@ static int atmel_aes_probe(struct platform_device *pdev)
goto aes_io_err;
}
- err = atmel_aes_dma_init(aes_dd);
+ atmel_aes_hw_version_init(aes_dd);
+
+ atmel_aes_get_cap(aes_dd);
+
+ err = atmel_aes_buff_init(aes_dd);
+ if (err)
+ goto err_aes_buff;
+
+ err = atmel_aes_dma_init(aes_dd, pdata);
if (err)
goto err_aes_dma;
@@ -1135,6 +1368,8 @@ err_algs:
spin_unlock(&atmel_aes.lock);
atmel_aes_dma_cleanup(aes_dd);
err_aes_dma:
+ atmel_aes_buff_cleanup(aes_dd);
+err_aes_buff:
iounmap(aes_dd->io_base);
aes_io_err:
clk_put(aes_dd->iclk);
diff --git a/drivers/crypto/atmel-sha-regs.h b/drivers/crypto/atmel-sha-regs.h
index dc53a20d7da..83b2d742566 100644
--- a/drivers/crypto/atmel-sha-regs.h
+++ b/drivers/crypto/atmel-sha-regs.h
@@ -14,10 +14,13 @@
#define SHA_MR_MODE_MANUAL 0x0
#define SHA_MR_MODE_AUTO 0x1
#define SHA_MR_MODE_PDC 0x2
-#define SHA_MR_DUALBUFF (1 << 3)
#define SHA_MR_PROCDLY (1 << 4)
#define SHA_MR_ALGO_SHA1 (0 << 8)
#define SHA_MR_ALGO_SHA256 (1 << 8)
+#define SHA_MR_ALGO_SHA384 (2 << 8)
+#define SHA_MR_ALGO_SHA512 (3 << 8)
+#define SHA_MR_ALGO_SHA224 (4 << 8)
+#define SHA_MR_DUALBUFF (1 << 16)
#define SHA_IER 0x10
#define SHA_IDR 0x14
@@ -33,6 +36,8 @@
#define SHA_ISR_URAT_MR (0x2 << 12)
#define SHA_ISR_URAT_WO (0x5 << 12)
+#define SHA_HW_VERSION 0xFC
+
#define SHA_TPR 0x108
#define SHA_TCR 0x10C
#define SHA_TNPR 0x118
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 4918e9424d3..eaed8bf183b 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -38,6 +38,7 @@
#include <crypto/sha.h>
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
+#include <linux/platform_data/crypto-atmel.h>
#include "atmel-sha-regs.h"
/* SHA flags */
@@ -52,11 +53,12 @@
#define SHA_FLAGS_FINUP BIT(16)
#define SHA_FLAGS_SG BIT(17)
#define SHA_FLAGS_SHA1 BIT(18)
-#define SHA_FLAGS_SHA256 BIT(19)
-#define SHA_FLAGS_ERROR BIT(20)
-#define SHA_FLAGS_PAD BIT(21)
-
-#define SHA_FLAGS_DUALBUFF BIT(24)
+#define SHA_FLAGS_SHA224 BIT(19)
+#define SHA_FLAGS_SHA256 BIT(20)
+#define SHA_FLAGS_SHA384 BIT(21)
+#define SHA_FLAGS_SHA512 BIT(22)
+#define SHA_FLAGS_ERROR BIT(23)
+#define SHA_FLAGS_PAD BIT(24)
#define SHA_OP_UPDATE 1
#define SHA_OP_FINAL 2
@@ -65,6 +67,12 @@
#define ATMEL_SHA_DMA_THRESHOLD 56
+struct atmel_sha_caps {
+ bool has_dma;
+ bool has_dualbuff;
+ bool has_sha224;
+ bool has_sha_384_512;
+};
struct atmel_sha_dev;
@@ -73,8 +81,8 @@ struct atmel_sha_reqctx {
unsigned long flags;
unsigned long op;
- u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
- size_t digcnt;
+ u8 digest[SHA512_DIGEST_SIZE] __aligned(sizeof(u32));
+ u64 digcnt[2];
size_t bufcnt;
size_t buflen;
dma_addr_t dma_addr;
@@ -84,6 +92,8 @@ struct atmel_sha_reqctx {
unsigned int offset; /* offset in current sg */
unsigned int total; /* total request */
+ size_t block_size;
+
u8 buffer[0] __aligned(sizeof(u32));
};
@@ -97,7 +107,12 @@ struct atmel_sha_ctx {
};
-#define ATMEL_SHA_QUEUE_LENGTH 1
+#define ATMEL_SHA_QUEUE_LENGTH 50
+
+struct atmel_sha_dma {
+ struct dma_chan *chan;
+ struct dma_slave_config dma_conf;
+};
struct atmel_sha_dev {
struct list_head list;
@@ -114,6 +129,12 @@ struct atmel_sha_dev {
unsigned long flags;
struct crypto_queue queue;
struct ahash_request *req;
+
+ struct atmel_sha_dma dma_lch_in;
+
+ struct atmel_sha_caps caps;
+
+ u32 hw_version;
};
struct atmel_sha_drv {
@@ -137,14 +158,6 @@ static inline void atmel_sha_write(struct atmel_sha_dev *dd,
writel_relaxed(value, dd->io_base + offset);
}
-static void atmel_sha_dualbuff_test(struct atmel_sha_dev *dd)
-{
- atmel_sha_write(dd, SHA_MR, SHA_MR_DUALBUFF);
-
- if (atmel_sha_read(dd, SHA_MR) & SHA_MR_DUALBUFF)
- dd->flags |= SHA_FLAGS_DUALBUFF;
-}
-
static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx)
{
size_t count;
@@ -176,31 +189,58 @@ static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx)
}
/*
- * The purpose of this padding is to ensure that the padded message
- * is a multiple of 512 bits. The bit "1" is appended at the end of
- * the message followed by "padlen-1" zero bits. Then a 64 bits block
- * equals to the message length in bits is appended.
+ * The purpose of this padding is to ensure that the padded message is a
+ * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
+ * The bit "1" is appended at the end of the message followed by
+ * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
+ * 128 bits block (SHA384/SHA512) equals to the message length in bits
+ * is appended.
*
- * padlen is calculated as followed:
+ * For SHA1/SHA224/SHA256, padlen is calculated as followed:
* - if message length < 56 bytes then padlen = 56 - message length
* - else padlen = 64 + 56 - message length
+ *
+ * For SHA384/SHA512, padlen is calculated as followed:
+ * - if message length < 112 bytes then padlen = 112 - message length
+ * - else padlen = 128 + 112 - message length
*/
static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length)
{
unsigned int index, padlen;
- u64 bits;
- u64 size;
-
- bits = (ctx->bufcnt + ctx->digcnt + length) << 3;
- size = cpu_to_be64(bits);
-
- index = ctx->bufcnt & 0x3f;
- padlen = (index < 56) ? (56 - index) : ((64+56) - index);
- *(ctx->buffer + ctx->bufcnt) = 0x80;
- memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
- memcpy(ctx->buffer + ctx->bufcnt + padlen, &size, 8);
- ctx->bufcnt += padlen + 8;
- ctx->flags |= SHA_FLAGS_PAD;
+ u64 bits[2];
+ u64 size[2];
+
+ size[0] = ctx->digcnt[0];
+ size[1] = ctx->digcnt[1];
+
+ size[0] += ctx->bufcnt;
+ if (size[0] < ctx->bufcnt)
+ size[1]++;
+
+ size[0] += length;
+ if (size[0] < length)
+ size[1]++;
+
+ bits[1] = cpu_to_be64(size[0] << 3);
+ bits[0] = cpu_to_be64(size[1] << 3 | size[0] >> 61);
+
+ if (ctx->flags & (SHA_FLAGS_SHA384 | SHA_FLAGS_SHA512)) {
+ index = ctx->bufcnt & 0x7f;
+ padlen = (index < 112) ? (112 - index) : ((128+112) - index);
+ *(ctx->buffer + ctx->bufcnt) = 0x80;
+ memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
+ memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16);
+ ctx->bufcnt += padlen + 16;
+ ctx->flags |= SHA_FLAGS_PAD;
+ } else {
+ index = ctx->bufcnt & 0x3f;
+ padlen = (index < 56) ? (56 - index) : ((64+56) - index);
+ *(ctx->buffer + ctx->bufcnt) = 0x80;
+ memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
+ memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8);
+ ctx->bufcnt += padlen + 8;
+ ctx->flags |= SHA_FLAGS_PAD;
+ }
}
static int atmel_sha_init(struct ahash_request *req)
@@ -231,13 +271,35 @@ static int atmel_sha_init(struct ahash_request *req)
dev_dbg(dd->dev, "init: digest size: %d\n",
crypto_ahash_digestsize(tfm));
- if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE)
+ switch (crypto_ahash_digestsize(tfm)) {
+ case SHA1_DIGEST_SIZE:
ctx->flags |= SHA_FLAGS_SHA1;
- else if (crypto_ahash_digestsize(tfm) == SHA256_DIGEST_SIZE)
+ ctx->block_size = SHA1_BLOCK_SIZE;
+ break;
+ case SHA224_DIGEST_SIZE:
+ ctx->flags |= SHA_FLAGS_SHA224;
+ ctx->block_size = SHA224_BLOCK_SIZE;
+ break;
+ case SHA256_DIGEST_SIZE:
ctx->flags |= SHA_FLAGS_SHA256;
+ ctx->block_size = SHA256_BLOCK_SIZE;
+ break;
+ case SHA384_DIGEST_SIZE:
+ ctx->flags |= SHA_FLAGS_SHA384;
+ ctx->block_size = SHA384_BLOCK_SIZE;
+ break;
+ case SHA512_DIGEST_SIZE:
+ ctx->flags |= SHA_FLAGS_SHA512;
+ ctx->block_size = SHA512_BLOCK_SIZE;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
ctx->bufcnt = 0;
- ctx->digcnt = 0;
+ ctx->digcnt[0] = 0;
+ ctx->digcnt[1] = 0;
ctx->buflen = SHA_BUFFER_LEN;
return 0;
@@ -249,19 +311,28 @@ static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma)
u32 valcr = 0, valmr = SHA_MR_MODE_AUTO;
if (likely(dma)) {
- atmel_sha_write(dd, SHA_IER, SHA_INT_TXBUFE);
+ if (!dd->caps.has_dma)
+ atmel_sha_write(dd, SHA_IER, SHA_INT_TXBUFE);
valmr = SHA_MR_MODE_PDC;
- if (dd->flags & SHA_FLAGS_DUALBUFF)
- valmr = SHA_MR_DUALBUFF;
+ if (dd->caps.has_dualbuff)
+ valmr |= SHA_MR_DUALBUFF;
} else {
atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
}
- if (ctx->flags & SHA_FLAGS_SHA256)
+ if (ctx->flags & SHA_FLAGS_SHA1)
+ valmr |= SHA_MR_ALGO_SHA1;
+ else if (ctx->flags & SHA_FLAGS_SHA224)
+ valmr |= SHA_MR_ALGO_SHA224;
+ else if (ctx->flags & SHA_FLAGS_SHA256)
valmr |= SHA_MR_ALGO_SHA256;
+ else if (ctx->flags & SHA_FLAGS_SHA384)
+ valmr |= SHA_MR_ALGO_SHA384;
+ else if (ctx->flags & SHA_FLAGS_SHA512)
+ valmr |= SHA_MR_ALGO_SHA512;
/* Setting CR_FIRST only for the first iteration */
- if (!ctx->digcnt)
+ if (!(ctx->digcnt[0] || ctx->digcnt[1]))
valcr = SHA_CR_FIRST;
atmel_sha_write(dd, SHA_CR, valcr);
@@ -275,13 +346,15 @@ static int atmel_sha_xmit_cpu(struct atmel_sha_dev *dd, const u8 *buf,
int count, len32;
const u32 *buffer = (const u32 *)buf;
- dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
- ctx->digcnt, length, final);
+ dev_dbg(dd->dev, "xmit_cpu: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n",
+ ctx->digcnt[1], ctx->digcnt[0], length, final);
atmel_sha_write_ctrl(dd, 0);
/* should be non-zero before next lines to disable clocks later */
- ctx->digcnt += length;
+ ctx->digcnt[0] += length;
+ if (ctx->digcnt[0] < length)
+ ctx->digcnt[1]++;
if (final)
dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
@@ -302,8 +375,8 @@ static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
int len32;
- dev_dbg(dd->dev, "xmit_pdc: digcnt: %d, length: %d, final: %d\n",
- ctx->digcnt, length1, final);
+ dev_dbg(dd->dev, "xmit_pdc: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n",
+ ctx->digcnt[1], ctx->digcnt[0], length1, final);
len32 = DIV_ROUND_UP(length1, sizeof(u32));
atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTDIS);
@@ -317,7 +390,9 @@ static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
atmel_sha_write_ctrl(dd, 1);
/* should be non-zero before next lines to disable clocks later */
- ctx->digcnt += length1;
+ ctx->digcnt[0] += length1;
+ if (ctx->digcnt[0] < length1)
+ ctx->digcnt[1]++;
if (final)
dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
@@ -330,6 +405,86 @@ static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
return -EINPROGRESS;
}
+static void atmel_sha_dma_callback(void *data)
+{
+ struct atmel_sha_dev *dd = data;
+
+ /* dma_lch_in - completed - wait DATRDY */
+ atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
+}
+
+static int atmel_sha_xmit_dma(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
+ size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
+{
+ struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
+ struct dma_async_tx_descriptor *in_desc;
+ struct scatterlist sg[2];
+
+ dev_dbg(dd->dev, "xmit_dma: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n",
+ ctx->digcnt[1], ctx->digcnt[0], length1, final);
+
+ if (ctx->flags & (SHA_FLAGS_SHA1 | SHA_FLAGS_SHA224 |
+ SHA_FLAGS_SHA256)) {
+ dd->dma_lch_in.dma_conf.src_maxburst = 16;
+ dd->dma_lch_in.dma_conf.dst_maxburst = 16;
+ } else {
+ dd->dma_lch_in.dma_conf.src_maxburst = 32;
+ dd->dma_lch_in.dma_conf.dst_maxburst = 32;
+ }
+
+ dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
+
+ if (length2) {
+ sg_init_table(sg, 2);
+ sg_dma_address(&sg[0]) = dma_addr1;
+ sg_dma_len(&sg[0]) = length1;
+ sg_dma_address(&sg[1]) = dma_addr2;
+ sg_dma_len(&sg[1]) = length2;
+ in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 2,
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ } else {
+ sg_init_table(sg, 1);
+ sg_dma_address(&sg[0]) = dma_addr1;
+ sg_dma_len(&sg[0]) = length1;
+ in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 1,
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ }
+ if (!in_desc)
+ return -EINVAL;
+
+ in_desc->callback = atmel_sha_dma_callback;
+ in_desc->callback_param = dd;
+
+ atmel_sha_write_ctrl(dd, 1);
+
+ /* should be non-zero before next lines to disable clocks later */
+ ctx->digcnt[0] += length1;
+ if (ctx->digcnt[0] < length1)
+ ctx->digcnt[1]++;
+
+ if (final)
+ dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
+
+ dd->flags |= SHA_FLAGS_DMA_ACTIVE;
+
+ /* Start DMA transfer */
+ dmaengine_submit(in_desc);
+ dma_async_issue_pending(dd->dma_lch_in.chan);
+
+ return -EINPROGRESS;
+}
+
+static int atmel_sha_xmit_start(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
+ size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
+{
+ if (dd->caps.has_dma)
+ return atmel_sha_xmit_dma(dd, dma_addr1, length1,
+ dma_addr2, length2, final);
+ else
+ return atmel_sha_xmit_pdc(dd, dma_addr1, length1,
+ dma_addr2, length2, final);
+}
+
static int atmel_sha_update_cpu(struct atmel_sha_dev *dd)
{
struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
@@ -337,7 +492,6 @@ static int atmel_sha_update_cpu(struct atmel_sha_dev *dd)
atmel_sha_append_sg(ctx);
atmel_sha_fill_padding(ctx, 0);
-
bufcnt = ctx->bufcnt;
ctx->bufcnt = 0;
@@ -349,17 +503,17 @@ static int atmel_sha_xmit_dma_map(struct atmel_sha_dev *dd,
size_t length, int final)
{
ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
- ctx->buflen + SHA1_BLOCK_SIZE, DMA_TO_DEVICE);
+ ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen +
- SHA1_BLOCK_SIZE);
+ ctx->block_size);
return -EINVAL;
}
ctx->flags &= ~SHA_FLAGS_SG;
/* next call does not fail... so no unmap in the case of error */
- return atmel_sha_xmit_pdc(dd, ctx->dma_addr, length, 0, 0, final);
+ return atmel_sha_xmit_start(dd, ctx->dma_addr, length, 0, 0, final);
}
static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd)
@@ -372,8 +526,8 @@ static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd)
final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
- dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n",
- ctx->bufcnt, ctx->digcnt, final);
+ dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: 0x%llx 0x%llx, final: %d\n",
+ ctx->bufcnt, ctx->digcnt[1], ctx->digcnt[0], final);
if (final)
atmel_sha_fill_padding(ctx, 0);
@@ -400,30 +554,25 @@ static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
if (ctx->bufcnt || ctx->offset)
return atmel_sha_update_dma_slow(dd);
- dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n",
- ctx->digcnt, ctx->bufcnt, ctx->total);
+ dev_dbg(dd->dev, "fast: digcnt: 0x%llx 0x%llx, bufcnt: %u, total: %u\n",
+ ctx->digcnt[1], ctx->digcnt[0], ctx->bufcnt, ctx->total);
sg = ctx->sg;
if (!IS_ALIGNED(sg->offset, sizeof(u32)))
return atmel_sha_update_dma_slow(dd);
- if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, SHA1_BLOCK_SIZE))
- /* size is not SHA1_BLOCK_SIZE aligned */
+ if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->block_size))
+ /* size is not ctx->block_size aligned */
return atmel_sha_update_dma_slow(dd);
length = min(ctx->total, sg->length);
if (sg_is_last(sg)) {
if (!(ctx->flags & SHA_FLAGS_FINUP)) {
- /* not last sg must be SHA1_BLOCK_SIZE aligned */
- tail = length & (SHA1_BLOCK_SIZE - 1);
+ /* not last sg must be ctx->block_size aligned */
+ tail = length & (ctx->block_size - 1);
length -= tail;
- if (length == 0) {
- /* offset where to start slow */
- ctx->offset = length;
- return atmel_sha_update_dma_slow(dd);
- }
}
}
@@ -434,7 +583,7 @@ static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
/* Add padding */
if (final) {
- tail = length & (SHA1_BLOCK_SIZE - 1);
+ tail = length & (ctx->block_size - 1);
length -= tail;
ctx->total += tail;
ctx->offset = length; /* offset where to start slow */
@@ -445,10 +594,10 @@ static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
atmel_sha_fill_padding(ctx, length);
ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
- ctx->buflen + SHA1_BLOCK_SIZE, DMA_TO_DEVICE);
+ ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
dev_err(dd->dev, "dma %u bytes error\n",
- ctx->buflen + SHA1_BLOCK_SIZE);
+ ctx->buflen + ctx->block_size);
return -EINVAL;
}
@@ -456,7 +605,7 @@ static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
ctx->flags &= ~SHA_FLAGS_SG;
count = ctx->bufcnt;
ctx->bufcnt = 0;
- return atmel_sha_xmit_pdc(dd, ctx->dma_addr, count, 0,
+ return atmel_sha_xmit_start(dd, ctx->dma_addr, count, 0,
0, final);
} else {
ctx->sg = sg;
@@ -470,7 +619,7 @@ static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
count = ctx->bufcnt;
ctx->bufcnt = 0;
- return atmel_sha_xmit_pdc(dd, sg_dma_address(ctx->sg),
+ return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg),
length, ctx->dma_addr, count, final);
}
}
@@ -483,7 +632,7 @@ static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
ctx->flags |= SHA_FLAGS_SG;
/* next call does not fail... so no unmap in the case of error */
- return atmel_sha_xmit_pdc(dd, sg_dma_address(ctx->sg), length, 0,
+ return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), length, 0,
0, final);
}
@@ -498,12 +647,13 @@ static int atmel_sha_update_dma_stop(struct atmel_sha_dev *dd)
if (ctx->sg)
ctx->offset = 0;
}
- if (ctx->flags & SHA_FLAGS_PAD)
+ if (ctx->flags & SHA_FLAGS_PAD) {
dma_unmap_single(dd->dev, ctx->dma_addr,
- ctx->buflen + SHA1_BLOCK_SIZE, DMA_TO_DEVICE);
+ ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
+ }
} else {
dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen +
- SHA1_BLOCK_SIZE, DMA_TO_DEVICE);
+ ctx->block_size, DMA_TO_DEVICE);
}
return 0;
@@ -515,8 +665,8 @@ static int atmel_sha_update_req(struct atmel_sha_dev *dd)
struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
int err;
- dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
- ctx->total, ctx->digcnt, (ctx->flags & SHA_FLAGS_FINUP) != 0);
+ dev_dbg(dd->dev, "update_req: total: %u, digcnt: 0x%llx 0x%llx\n",
+ ctx->total, ctx->digcnt[1], ctx->digcnt[0]);
if (ctx->flags & SHA_FLAGS_CPU)
err = atmel_sha_update_cpu(dd);
@@ -524,8 +674,8 @@ static int atmel_sha_update_req(struct atmel_sha_dev *dd)
err = atmel_sha_update_dma_start(dd);
/* wait for dma completion before can take more data */
- dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n",
- err, ctx->digcnt);
+ dev_dbg(dd->dev, "update: err: %d, digcnt: 0x%llx 0%llx\n",
+ err, ctx->digcnt[1], ctx->digcnt[0]);
return err;
}
@@ -562,12 +712,21 @@ static void atmel_sha_copy_hash(struct ahash_request *req)
u32 *hash = (u32 *)ctx->digest;
int i;
- if (likely(ctx->flags & SHA_FLAGS_SHA1))
+ if (ctx->flags & SHA_FLAGS_SHA1)
for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
- else
+ else if (ctx->flags & SHA_FLAGS_SHA224)
+ for (i = 0; i < SHA224_DIGEST_SIZE / sizeof(u32); i++)
+ hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
+ else if (ctx->flags & SHA_FLAGS_SHA256)
for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(u32); i++)
hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
+ else if (ctx->flags & SHA_FLAGS_SHA384)
+ for (i = 0; i < SHA384_DIGEST_SIZE / sizeof(u32); i++)
+ hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
+ else
+ for (i = 0; i < SHA512_DIGEST_SIZE / sizeof(u32); i++)
+ hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
}
static void atmel_sha_copy_ready_hash(struct ahash_request *req)
@@ -577,10 +736,16 @@ static void atmel_sha_copy_ready_hash(struct ahash_request *req)
if (!req->result)
return;
- if (likely(ctx->flags & SHA_FLAGS_SHA1))
+ if (ctx->flags & SHA_FLAGS_SHA1)
memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE);
- else
+ else if (ctx->flags & SHA_FLAGS_SHA224)
+ memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE);
+ else if (ctx->flags & SHA_FLAGS_SHA256)
memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE);
+ else if (ctx->flags & SHA_FLAGS_SHA384)
+ memcpy(req->result, ctx->digest, SHA384_DIGEST_SIZE);
+ else
+ memcpy(req->result, ctx->digest, SHA512_DIGEST_SIZE);
}
static int atmel_sha_finish(struct ahash_request *req)
@@ -589,11 +754,11 @@ static int atmel_sha_finish(struct ahash_request *req)
struct atmel_sha_dev *dd = ctx->dd;
int err = 0;
- if (ctx->digcnt)
+ if (ctx->digcnt[0] || ctx->digcnt[1])
atmel_sha_copy_ready_hash(req);
- dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt,
- ctx->bufcnt);
+ dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %d\n", ctx->digcnt[1],
+ ctx->digcnt[0], ctx->bufcnt);
return err;
}
@@ -628,9 +793,8 @@ static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
{
clk_prepare_enable(dd->iclk);
- if (SHA_FLAGS_INIT & dd->flags) {
+ if (!(SHA_FLAGS_INIT & dd->flags)) {
atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST);
- atmel_sha_dualbuff_test(dd);
dd->flags |= SHA_FLAGS_INIT;
dd->err = 0;
}
@@ -638,6 +802,23 @@ static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
return 0;
}
+static inline unsigned int atmel_sha_get_version(struct atmel_sha_dev *dd)
+{
+ return atmel_sha_read(dd, SHA_HW_VERSION) & 0x00000fff;
+}
+
+static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
+{
+ atmel_sha_hw_init(dd);
+
+ dd->hw_version = atmel_sha_get_version(dd);
+
+ dev_info(dd->dev,
+ "version: 0x%x\n", dd->hw_version);
+
+ clk_disable_unprepare(dd->iclk);
+}
+
static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
struct ahash_request *req)
{
@@ -682,10 +863,9 @@ static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
if (ctx->op == SHA_OP_UPDATE) {
err = atmel_sha_update_req(dd);
- if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP)) {
+ if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP))
/* no final() after finup() */
err = atmel_sha_final_req(dd);
- }
} else if (ctx->op == SHA_OP_FINAL) {
err = atmel_sha_final_req(dd);
}
@@ -808,7 +988,7 @@ static int atmel_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
}
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct atmel_sha_reqctx) +
- SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
+ SHA_BUFFER_LEN + SHA512_BLOCK_SIZE);
return 0;
}
@@ -826,7 +1006,7 @@ static void atmel_sha_cra_exit(struct crypto_tfm *tfm)
tctx->fallback = NULL;
}
-static struct ahash_alg sha_algs[] = {
+static struct ahash_alg sha_1_256_algs[] = {
{
.init = atmel_sha_init,
.update = atmel_sha_update,
@@ -875,6 +1055,79 @@ static struct ahash_alg sha_algs[] = {
},
};
+static struct ahash_alg sha_224_alg = {
+ .init = atmel_sha_init,
+ .update = atmel_sha_update,
+ .final = atmel_sha_final,
+ .finup = atmel_sha_finup,
+ .digest = atmel_sha_digest,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "atmel-sha224",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct atmel_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = atmel_sha_cra_init,
+ .cra_exit = atmel_sha_cra_exit,
+ }
+ }
+};
+
+static struct ahash_alg sha_384_512_algs[] = {
+{
+ .init = atmel_sha_init,
+ .update = atmel_sha_update,
+ .final = atmel_sha_final,
+ .finup = atmel_sha_finup,
+ .digest = atmel_sha_digest,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha384",
+ .cra_driver_name = "atmel-sha384",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct atmel_sha_ctx),
+ .cra_alignmask = 0x3,
+ .cra_module = THIS_MODULE,
+ .cra_init = atmel_sha_cra_init,
+ .cra_exit = atmel_sha_cra_exit,
+ }
+ }
+},
+{
+ .init = atmel_sha_init,
+ .update = atmel_sha_update,
+ .final = atmel_sha_final,
+ .finup = atmel_sha_finup,
+ .digest = atmel_sha_digest,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "atmel-sha512",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct atmel_sha_ctx),
+ .cra_alignmask = 0x3,
+ .cra_module = THIS_MODULE,
+ .cra_init = atmel_sha_cra_init,
+ .cra_exit = atmel_sha_cra_exit,
+ }
+ }
+},
+};
+
static void atmel_sha_done_task(unsigned long data)
{
struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
@@ -941,32 +1194,142 @@ static void atmel_sha_unregister_algs(struct atmel_sha_dev *dd)
{
int i;
- for (i = 0; i < ARRAY_SIZE(sha_algs); i++)
- crypto_unregister_ahash(&sha_algs[i]);
+ for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++)
+ crypto_unregister_ahash(&sha_1_256_algs[i]);
+
+ if (dd->caps.has_sha224)
+ crypto_unregister_ahash(&sha_224_alg);
+
+ if (dd->caps.has_sha_384_512) {
+ for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++)
+ crypto_unregister_ahash(&sha_384_512_algs[i]);
+ }
}
static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
{
int err, i, j;
- for (i = 0; i < ARRAY_SIZE(sha_algs); i++) {
- err = crypto_register_ahash(&sha_algs[i]);
+ for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) {
+ err = crypto_register_ahash(&sha_1_256_algs[i]);
if (err)
- goto err_sha_algs;
+ goto err_sha_1_256_algs;
+ }
+
+ if (dd->caps.has_sha224) {
+ err = crypto_register_ahash(&sha_224_alg);
+ if (err)
+ goto err_sha_224_algs;
+ }
+
+ if (dd->caps.has_sha_384_512) {
+ for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++) {
+ err = crypto_register_ahash(&sha_384_512_algs[i]);
+ if (err)
+ goto err_sha_384_512_algs;
+ }
}
return 0;
-err_sha_algs:
+err_sha_384_512_algs:
+ for (j = 0; j < i; j++)
+ crypto_unregister_ahash(&sha_384_512_algs[j]);
+ crypto_unregister_ahash(&sha_224_alg);
+err_sha_224_algs:
+ i = ARRAY_SIZE(sha_1_256_algs);
+err_sha_1_256_algs:
for (j = 0; j < i; j++)
- crypto_unregister_ahash(&sha_algs[j]);
+ crypto_unregister_ahash(&sha_1_256_algs[j]);
return err;
}
+static bool atmel_sha_filter(struct dma_chan *chan, void *slave)
+{
+ struct at_dma_slave *sl = slave;
+
+ if (sl && sl->dma_dev == chan->device->dev) {
+ chan->private = sl;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static int atmel_sha_dma_init(struct atmel_sha_dev *dd,
+ struct crypto_platform_data *pdata)
+{
+ int err = -ENOMEM;
+ dma_cap_mask_t mask_in;
+
+ if (pdata && pdata->dma_slave->rxdata.dma_dev) {
+ /* Try to grab DMA channel */
+ dma_cap_zero(mask_in);
+ dma_cap_set(DMA_SLAVE, mask_in);
+
+ dd->dma_lch_in.chan = dma_request_channel(mask_in,
+ atmel_sha_filter, &pdata->dma_slave->rxdata);
+
+ if (!dd->dma_lch_in.chan)
+ return err;
+
+ dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
+ dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
+ SHA_REG_DIN(0);
+ dd->dma_lch_in.dma_conf.src_maxburst = 1;
+ dd->dma_lch_in.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_in.dma_conf.dst_maxburst = 1;
+ dd->dma_lch_in.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_in.dma_conf.device_fc = false;
+
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static void atmel_sha_dma_cleanup(struct atmel_sha_dev *dd)
+{
+ dma_release_channel(dd->dma_lch_in.chan);
+}
+
+static void atmel_sha_get_cap(struct atmel_sha_dev *dd)
+{
+
+ dd->caps.has_dma = 0;
+ dd->caps.has_dualbuff = 0;
+ dd->caps.has_sha224 = 0;
+ dd->caps.has_sha_384_512 = 0;
+
+ /* keep only major version number */
+ switch (dd->hw_version & 0xff0) {
+ case 0x410:
+ dd->caps.has_dma = 1;
+ dd->caps.has_dualbuff = 1;
+ dd->caps.has_sha224 = 1;
+ dd->caps.has_sha_384_512 = 1;
+ break;
+ case 0x400:
+ dd->caps.has_dma = 1;
+ dd->caps.has_dualbuff = 1;
+ dd->caps.has_sha224 = 1;
+ break;
+ case 0x320:
+ break;
+ default:
+ dev_warn(dd->dev,
+ "Unmanaged sha version, set minimum capabilities\n");
+ break;
+ }
+}
+
static int atmel_sha_probe(struct platform_device *pdev)
{
struct atmel_sha_dev *sha_dd;
+ struct crypto_platform_data *pdata;
struct device *dev = &pdev->dev;
struct resource *sha_res;
unsigned long sha_phys_size;
@@ -1018,7 +1381,7 @@ static int atmel_sha_probe(struct platform_device *pdev)
}
/* Initializing the clock */
- sha_dd->iclk = clk_get(&pdev->dev, NULL);
+ sha_dd->iclk = clk_get(&pdev->dev, "sha_clk");
if (IS_ERR(sha_dd->iclk)) {
dev_err(dev, "clock intialization failed.\n");
err = PTR_ERR(sha_dd->iclk);
@@ -1032,6 +1395,22 @@ static int atmel_sha_probe(struct platform_device *pdev)
goto sha_io_err;
}
+ atmel_sha_hw_version_init(sha_dd);
+
+ atmel_sha_get_cap(sha_dd);
+
+ if (sha_dd->caps.has_dma) {
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "platform data not available\n");
+ err = -ENXIO;
+ goto err_pdata;
+ }
+ err = atmel_sha_dma_init(sha_dd, pdata);
+ if (err)
+ goto err_sha_dma;
+ }
+
spin_lock(&atmel_sha.lock);
list_add_tail(&sha_dd->list, &atmel_sha.dev_list);
spin_unlock(&atmel_sha.lock);
@@ -1048,6 +1427,10 @@ err_algs:
spin_lock(&atmel_sha.lock);
list_del(&sha_dd->list);
spin_unlock(&atmel_sha.lock);
+ if (sha_dd->caps.has_dma)
+ atmel_sha_dma_cleanup(sha_dd);
+err_sha_dma:
+err_pdata:
iounmap(sha_dd->io_base);
sha_io_err:
clk_put(sha_dd->iclk);
@@ -1078,6 +1461,9 @@ static int atmel_sha_remove(struct platform_device *pdev)
tasklet_kill(&sha_dd->done_task);
+ if (sha_dd->caps.has_dma)
+ atmel_sha_dma_cleanup(sha_dd);
+
iounmap(sha_dd->io_base);
clk_put(sha_dd->iclk);
@@ -1102,6 +1488,6 @@ static struct platform_driver atmel_sha_driver = {
module_platform_driver(atmel_sha_driver);
-MODULE_DESCRIPTION("Atmel SHA1/SHA256 hw acceleration support.");
+MODULE_DESCRIPTION("Atmel SHA (1/256/224/384/512) hw acceleration support.");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");
diff --git a/drivers/crypto/atmel-tdes-regs.h b/drivers/crypto/atmel-tdes-regs.h
index 5ac2a900d80..f86734d0fda 100644
--- a/drivers/crypto/atmel-tdes-regs.h
+++ b/drivers/crypto/atmel-tdes-regs.h
@@ -69,6 +69,8 @@
#define TDES_XTEARNDR_XTEA_RNDS_MASK (0x3F << 0)
#define TDES_XTEARNDR_XTEA_RNDS_OFFSET 0
+#define TDES_HW_VERSION 0xFC
+
#define TDES_RPR 0x100
#define TDES_RCR 0x104
#define TDES_TPR 0x108
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 7c73fbb1753..4a99564a08e 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -38,29 +38,35 @@
#include <crypto/des.h>
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
+#include <linux/platform_data/crypto-atmel.h>
#include "atmel-tdes-regs.h"
/* TDES flags */
-#define TDES_FLAGS_MODE_MASK 0x007f
+#define TDES_FLAGS_MODE_MASK 0x00ff
#define TDES_FLAGS_ENCRYPT BIT(0)
#define TDES_FLAGS_CBC BIT(1)
#define TDES_FLAGS_CFB BIT(2)
#define TDES_FLAGS_CFB8 BIT(3)
#define TDES_FLAGS_CFB16 BIT(4)
#define TDES_FLAGS_CFB32 BIT(5)
-#define TDES_FLAGS_OFB BIT(6)
+#define TDES_FLAGS_CFB64 BIT(6)
+#define TDES_FLAGS_OFB BIT(7)
#define TDES_FLAGS_INIT BIT(16)
#define TDES_FLAGS_FAST BIT(17)
#define TDES_FLAGS_BUSY BIT(18)
+#define TDES_FLAGS_DMA BIT(19)
-#define ATMEL_TDES_QUEUE_LENGTH 1
+#define ATMEL_TDES_QUEUE_LENGTH 50
#define CFB8_BLOCK_SIZE 1
#define CFB16_BLOCK_SIZE 2
#define CFB32_BLOCK_SIZE 4
-#define CFB64_BLOCK_SIZE 8
+struct atmel_tdes_caps {
+ bool has_dma;
+ u32 has_cfb_3keys;
+};
struct atmel_tdes_dev;
@@ -70,12 +76,19 @@ struct atmel_tdes_ctx {
int keylen;
u32 key[3*DES_KEY_SIZE / sizeof(u32)];
unsigned long flags;
+
+ u16 block_size;
};
struct atmel_tdes_reqctx {
unsigned long mode;
};
+struct atmel_tdes_dma {
+ struct dma_chan *chan;
+ struct dma_slave_config dma_conf;
+};
+
struct atmel_tdes_dev {
struct list_head list;
unsigned long phys_base;
@@ -99,8 +112,10 @@ struct atmel_tdes_dev {
size_t total;
struct scatterlist *in_sg;
+ unsigned int nb_in_sg;
size_t in_offset;
struct scatterlist *out_sg;
+ unsigned int nb_out_sg;
size_t out_offset;
size_t buflen;
@@ -109,10 +124,16 @@ struct atmel_tdes_dev {
void *buf_in;
int dma_in;
dma_addr_t dma_addr_in;
+ struct atmel_tdes_dma dma_lch_in;
void *buf_out;
int dma_out;
dma_addr_t dma_addr_out;
+ struct atmel_tdes_dma dma_lch_out;
+
+ struct atmel_tdes_caps caps;
+
+ u32 hw_version;
};
struct atmel_tdes_drv {
@@ -207,6 +228,31 @@ static int atmel_tdes_hw_init(struct atmel_tdes_dev *dd)
return 0;
}
+static inline unsigned int atmel_tdes_get_version(struct atmel_tdes_dev *dd)
+{
+ return atmel_tdes_read(dd, TDES_HW_VERSION) & 0x00000fff;
+}
+
+static void atmel_tdes_hw_version_init(struct atmel_tdes_dev *dd)
+{
+ atmel_tdes_hw_init(dd);
+
+ dd->hw_version = atmel_tdes_get_version(dd);
+
+ dev_info(dd->dev,
+ "version: 0x%x\n", dd->hw_version);
+
+ clk_disable_unprepare(dd->iclk);
+}
+
+static void atmel_tdes_dma_callback(void *data)
+{
+ struct atmel_tdes_dev *dd = data;
+
+ /* dma_lch_out - completed */
+ tasklet_schedule(&dd->done_task);
+}
+
static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
{
int err;
@@ -217,7 +263,9 @@ static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
if (err)
return err;
- atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
+ if (!dd->caps.has_dma)
+ atmel_tdes_write(dd, TDES_PTCR,
+ TDES_PTCR_TXTDIS | TDES_PTCR_RXTDIS);
/* MR register must be set before IV registers */
if (dd->ctx->keylen > (DES_KEY_SIZE << 1)) {
@@ -241,6 +289,8 @@ static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
valmr |= TDES_MR_CFBS_16b;
else if (dd->flags & TDES_FLAGS_CFB32)
valmr |= TDES_MR_CFBS_32b;
+ else if (dd->flags & TDES_FLAGS_CFB64)
+ valmr |= TDES_MR_CFBS_64b;
} else if (dd->flags & TDES_FLAGS_OFB) {
valmr |= TDES_MR_OPMOD_OFB;
}
@@ -262,7 +312,7 @@ static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
return 0;
}
-static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd)
+static int atmel_tdes_crypt_pdc_stop(struct atmel_tdes_dev *dd)
{
int err = 0;
size_t count;
@@ -288,7 +338,7 @@ static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd)
return err;
}
-static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd)
+static int atmel_tdes_buff_init(struct atmel_tdes_dev *dd)
{
int err = -ENOMEM;
@@ -333,7 +383,7 @@ err_alloc:
return err;
}
-static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
+static void atmel_tdes_buff_cleanup(struct atmel_tdes_dev *dd)
{
dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
DMA_FROM_DEVICE);
@@ -343,7 +393,7 @@ static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
free_page((unsigned long)dd->buf_in);
}
-static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
+static int atmel_tdes_crypt_pdc(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
dma_addr_t dma_addr_out, int length)
{
struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -379,7 +429,76 @@ static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
return 0;
}
-static int atmel_tdes_crypt_dma_start(struct atmel_tdes_dev *dd)
+static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
+ dma_addr_t dma_addr_out, int length)
+{
+ struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct atmel_tdes_dev *dd = ctx->dd;
+ struct scatterlist sg[2];
+ struct dma_async_tx_descriptor *in_desc, *out_desc;
+
+ dd->dma_size = length;
+
+ if (!(dd->flags & TDES_FLAGS_FAST)) {
+ dma_sync_single_for_device(dd->dev, dma_addr_in, length,
+ DMA_TO_DEVICE);
+ }
+
+ if (dd->flags & TDES_FLAGS_CFB8) {
+ dd->dma_lch_in.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dd->dma_lch_out.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_1_BYTE;
+ } else if (dd->flags & TDES_FLAGS_CFB16) {
+ dd->dma_lch_in.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_2_BYTES;
+ dd->dma_lch_out.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_2_BYTES;
+ } else {
+ dd->dma_lch_in.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_out.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ }
+
+ dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
+ dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
+
+ dd->flags |= TDES_FLAGS_DMA;
+
+ sg_init_table(&sg[0], 1);
+ sg_dma_address(&sg[0]) = dma_addr_in;
+ sg_dma_len(&sg[0]) = length;
+
+ sg_init_table(&sg[1], 1);
+ sg_dma_address(&sg[1]) = dma_addr_out;
+ sg_dma_len(&sg[1]) = length;
+
+ in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0],
+ 1, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!in_desc)
+ return -EINVAL;
+
+ out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1],
+ 1, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!out_desc)
+ return -EINVAL;
+
+ out_desc->callback = atmel_tdes_dma_callback;
+ out_desc->callback_param = dd;
+
+ dmaengine_submit(out_desc);
+ dma_async_issue_pending(dd->dma_lch_out.chan);
+
+ dmaengine_submit(in_desc);
+ dma_async_issue_pending(dd->dma_lch_in.chan);
+
+ return 0;
+}
+
+static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
{
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(
crypto_ablkcipher_reqtfm(dd->req));
@@ -387,23 +506,23 @@ static int atmel_tdes_crypt_dma_start(struct atmel_tdes_dev *dd)
size_t count;
dma_addr_t addr_in, addr_out;
- if (sg_is_last(dd->in_sg) && sg_is_last(dd->out_sg)) {
+ if ((!dd->in_offset) && (!dd->out_offset)) {
/* check for alignment */
- in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32));
- out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32));
-
+ in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) &&
+ IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size);
+ out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) &&
+ IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size);
fast = in && out;
+
+ if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg))
+ fast = 0;
}
+
if (fast) {
count = min(dd->total, sg_dma_len(dd->in_sg));
count = min(count, sg_dma_len(dd->out_sg));
- if (count != dd->total) {
- pr_err("request length != buffer length\n");
- return -EINVAL;
- }
-
err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
if (!err) {
dev_err(dd->dev, "dma_map_sg() error\n");
@@ -433,13 +552,16 @@ static int atmel_tdes_crypt_dma_start(struct atmel_tdes_dev *dd)
addr_out = dd->dma_addr_out;
dd->flags &= ~TDES_FLAGS_FAST;
-
}
dd->total -= count;
- err = atmel_tdes_crypt_dma(tfm, addr_in, addr_out, count);
- if (err) {
+ if (dd->caps.has_dma)
+ err = atmel_tdes_crypt_dma(tfm, addr_in, addr_out, count);
+ else
+ err = atmel_tdes_crypt_pdc(tfm, addr_in, addr_out, count);
+
+ if (err && (dd->flags & TDES_FLAGS_FAST)) {
dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
}
@@ -447,7 +569,6 @@ static int atmel_tdes_crypt_dma_start(struct atmel_tdes_dev *dd)
return err;
}
-
static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err)
{
struct ablkcipher_request *req = dd->req;
@@ -506,7 +627,7 @@ static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
err = atmel_tdes_write_ctrl(dd);
if (!err)
- err = atmel_tdes_crypt_dma_start(dd);
+ err = atmel_tdes_crypt_start(dd);
if (err) {
/* des_task will not finish it, so do it here */
atmel_tdes_finish_req(dd, err);
@@ -516,41 +637,145 @@ static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
return ret;
}
+static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd)
+{
+ int err = -EINVAL;
+ size_t count;
+
+ if (dd->flags & TDES_FLAGS_DMA) {
+ err = 0;
+ if (dd->flags & TDES_FLAGS_FAST) {
+ dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
+ dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
+ } else {
+ dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
+ dd->dma_size, DMA_FROM_DEVICE);
+
+ /* copy data */
+ count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
+ dd->buf_out, dd->buflen, dd->dma_size, 1);
+ if (count != dd->dma_size) {
+ err = -EINVAL;
+ pr_err("not all data converted: %u\n", count);
+ }
+ }
+ }
+ return err;
+}
static int atmel_tdes_crypt(struct ablkcipher_request *req, unsigned long mode)
{
struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(
crypto_ablkcipher_reqtfm(req));
struct atmel_tdes_reqctx *rctx = ablkcipher_request_ctx(req);
- struct atmel_tdes_dev *dd;
if (mode & TDES_FLAGS_CFB8) {
if (!IS_ALIGNED(req->nbytes, CFB8_BLOCK_SIZE)) {
pr_err("request size is not exact amount of CFB8 blocks\n");
return -EINVAL;
}
+ ctx->block_size = CFB8_BLOCK_SIZE;
} else if (mode & TDES_FLAGS_CFB16) {
if (!IS_ALIGNED(req->nbytes, CFB16_BLOCK_SIZE)) {
pr_err("request size is not exact amount of CFB16 blocks\n");
return -EINVAL;
}
+ ctx->block_size = CFB16_BLOCK_SIZE;
} else if (mode & TDES_FLAGS_CFB32) {
if (!IS_ALIGNED(req->nbytes, CFB32_BLOCK_SIZE)) {
pr_err("request size is not exact amount of CFB32 blocks\n");
return -EINVAL;
}
- } else if (!IS_ALIGNED(req->nbytes, DES_BLOCK_SIZE)) {
- pr_err("request size is not exact amount of DES blocks\n");
- return -EINVAL;
+ ctx->block_size = CFB32_BLOCK_SIZE;
+ } else {
+ if (!IS_ALIGNED(req->nbytes, DES_BLOCK_SIZE)) {
+ pr_err("request size is not exact amount of DES blocks\n");
+ return -EINVAL;
+ }
+ ctx->block_size = DES_BLOCK_SIZE;
}
- dd = atmel_tdes_find_dev(ctx);
- if (!dd)
+ rctx->mode = mode;
+
+ return atmel_tdes_handle_queue(ctx->dd, req);
+}
+
+static bool atmel_tdes_filter(struct dma_chan *chan, void *slave)
+{
+ struct at_dma_slave *sl = slave;
+
+ if (sl && sl->dma_dev == chan->device->dev) {
+ chan->private = sl;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd,
+ struct crypto_platform_data *pdata)
+{
+ int err = -ENOMEM;
+ dma_cap_mask_t mask_in, mask_out;
+
+ if (pdata && pdata->dma_slave->txdata.dma_dev &&
+ pdata->dma_slave->rxdata.dma_dev) {
+
+ /* Try to grab 2 DMA channels */
+ dma_cap_zero(mask_in);
+ dma_cap_set(DMA_SLAVE, mask_in);
+
+ dd->dma_lch_in.chan = dma_request_channel(mask_in,
+ atmel_tdes_filter, &pdata->dma_slave->rxdata);
+
+ if (!dd->dma_lch_in.chan)
+ goto err_dma_in;
+
+ dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
+ dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
+ TDES_IDATA1R;
+ dd->dma_lch_in.dma_conf.src_maxburst = 1;
+ dd->dma_lch_in.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_in.dma_conf.dst_maxburst = 1;
+ dd->dma_lch_in.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_in.dma_conf.device_fc = false;
+
+ dma_cap_zero(mask_out);
+ dma_cap_set(DMA_SLAVE, mask_out);
+ dd->dma_lch_out.chan = dma_request_channel(mask_out,
+ atmel_tdes_filter, &pdata->dma_slave->txdata);
+
+ if (!dd->dma_lch_out.chan)
+ goto err_dma_out;
+
+ dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
+ dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
+ TDES_ODATA1R;
+ dd->dma_lch_out.dma_conf.src_maxburst = 1;
+ dd->dma_lch_out.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_out.dma_conf.dst_maxburst = 1;
+ dd->dma_lch_out.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_out.dma_conf.device_fc = false;
+
+ return 0;
+ } else {
return -ENODEV;
+ }
- rctx->mode = mode;
+err_dma_out:
+ dma_release_channel(dd->dma_lch_in.chan);
+err_dma_in:
+ return err;
+}
- return atmel_tdes_handle_queue(dd, req);
+static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
+{
+ dma_release_channel(dd->dma_lch_in.chan);
+ dma_release_channel(dd->dma_lch_out.chan);
}
static int atmel_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
@@ -590,7 +815,8 @@ static int atmel_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
/*
* HW bug in cfb 3-keys mode.
*/
- if (strstr(alg_name, "cfb") && (keylen != 2*DES_KEY_SIZE)) {
+ if (!ctx->dd->caps.has_cfb_3keys && strstr(alg_name, "cfb")
+ && (keylen != 2*DES_KEY_SIZE)) {
crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
} else if ((keylen != 2*DES_KEY_SIZE) && (keylen != 3*DES_KEY_SIZE)) {
@@ -678,8 +904,15 @@ static int atmel_tdes_ofb_decrypt(struct ablkcipher_request *req)
static int atmel_tdes_cra_init(struct crypto_tfm *tfm)
{
+ struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct atmel_tdes_dev *dd;
+
tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_tdes_reqctx);
+ dd = atmel_tdes_find_dev(ctx);
+ if (!dd)
+ return -ENODEV;
+
return 0;
}
@@ -695,7 +928,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = 0x7,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
@@ -715,7 +948,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = 0x7,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
@@ -736,7 +969,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = 0x7,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
@@ -778,7 +1011,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = CFB16_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = 0x1,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
@@ -799,7 +1032,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = CFB32_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = 0x3,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
@@ -820,7 +1053,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = 0x7,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
@@ -841,7 +1074,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = 0x7,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
@@ -861,7 +1094,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = 0x7,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
@@ -882,7 +1115,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = 0x7,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
@@ -924,7 +1157,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = CFB16_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = 0x1,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
@@ -945,7 +1178,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = CFB32_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = 0x3,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
@@ -966,7 +1199,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = 0x7,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
@@ -994,14 +1227,24 @@ static void atmel_tdes_done_task(unsigned long data)
struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *) data;
int err;
- err = atmel_tdes_crypt_dma_stop(dd);
+ if (!(dd->flags & TDES_FLAGS_DMA))
+ err = atmel_tdes_crypt_pdc_stop(dd);
+ else
+ err = atmel_tdes_crypt_dma_stop(dd);
err = dd->err ? : err;
if (dd->total && !err) {
- err = atmel_tdes_crypt_dma_start(dd);
+ if (dd->flags & TDES_FLAGS_FAST) {
+ dd->in_sg = sg_next(dd->in_sg);
+ dd->out_sg = sg_next(dd->out_sg);
+ if (!dd->in_sg || !dd->out_sg)
+ err = -EINVAL;
+ }
if (!err)
- return;
+ err = atmel_tdes_crypt_start(dd);
+ if (!err)
+ return; /* DMA started. Not fininishing. */
}
atmel_tdes_finish_req(dd, err);
@@ -1053,9 +1296,31 @@ err_tdes_algs:
return err;
}
+static void atmel_tdes_get_cap(struct atmel_tdes_dev *dd)
+{
+
+ dd->caps.has_dma = 0;
+ dd->caps.has_cfb_3keys = 0;
+
+ /* keep only major version number */
+ switch (dd->hw_version & 0xf00) {
+ case 0x700:
+ dd->caps.has_dma = 1;
+ dd->caps.has_cfb_3keys = 1;
+ break;
+ case 0x600:
+ break;
+ default:
+ dev_warn(dd->dev,
+ "Unmanaged tdes version, set minimum capabilities\n");
+ break;
+ }
+}
+
static int atmel_tdes_probe(struct platform_device *pdev)
{
struct atmel_tdes_dev *tdes_dd;
+ struct crypto_platform_data *pdata;
struct device *dev = &pdev->dev;
struct resource *tdes_res;
unsigned long tdes_phys_size;
@@ -1109,7 +1374,7 @@ static int atmel_tdes_probe(struct platform_device *pdev)
}
/* Initializing the clock */
- tdes_dd->iclk = clk_get(&pdev->dev, NULL);
+ tdes_dd->iclk = clk_get(&pdev->dev, "tdes_clk");
if (IS_ERR(tdes_dd->iclk)) {
dev_err(dev, "clock intialization failed.\n");
err = PTR_ERR(tdes_dd->iclk);
@@ -1123,9 +1388,25 @@ static int atmel_tdes_probe(struct platform_device *pdev)
goto tdes_io_err;
}
- err = atmel_tdes_dma_init(tdes_dd);
+ atmel_tdes_hw_version_init(tdes_dd);
+
+ atmel_tdes_get_cap(tdes_dd);
+
+ err = atmel_tdes_buff_init(tdes_dd);
if (err)
- goto err_tdes_dma;
+ goto err_tdes_buff;
+
+ if (tdes_dd->caps.has_dma) {
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "platform data not available\n");
+ err = -ENXIO;
+ goto err_pdata;
+ }
+ err = atmel_tdes_dma_init(tdes_dd, pdata);
+ if (err)
+ goto err_tdes_dma;
+ }
spin_lock(&atmel_tdes.lock);
list_add_tail(&tdes_dd->list, &atmel_tdes.dev_list);
@@ -1143,8 +1424,12 @@ err_algs:
spin_lock(&atmel_tdes.lock);
list_del(&tdes_dd->list);
spin_unlock(&atmel_tdes.lock);
- atmel_tdes_dma_cleanup(tdes_dd);
+ if (tdes_dd->caps.has_dma)
+ atmel_tdes_dma_cleanup(tdes_dd);
err_tdes_dma:
+err_pdata:
+ atmel_tdes_buff_cleanup(tdes_dd);
+err_tdes_buff:
iounmap(tdes_dd->io_base);
tdes_io_err:
clk_put(tdes_dd->iclk);
@@ -1178,7 +1463,10 @@ static int atmel_tdes_remove(struct platform_device *pdev)
tasklet_kill(&tdes_dd->done_task);
tasklet_kill(&tdes_dd->queue_task);
- atmel_tdes_dma_cleanup(tdes_dd);
+ if (tdes_dd->caps.has_dma)
+ atmel_tdes_dma_cleanup(tdes_dd);
+
+ atmel_tdes_buff_cleanup(tdes_dd);
iounmap(tdes_dd->io_base);
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c
index 827913d7d33..d797f31f5d8 100644
--- a/drivers/crypto/bfin_crc.c
+++ b/drivers/crypto/bfin_crc.c
@@ -151,7 +151,7 @@ static int bfin_crypto_crc_init(struct ahash_request *req)
struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
struct bfin_crypto_crc *crc;
- dev_dbg(crc->dev, "crc_init\n");
+ dev_dbg(ctx->crc->dev, "crc_init\n");
spin_lock_bh(&crc_list.lock);
list_for_each_entry(crc, &crc_list.dev_list, list) {
crc_ctx->crc = crc;
@@ -160,7 +160,7 @@ static int bfin_crypto_crc_init(struct ahash_request *req)
spin_unlock_bh(&crc_list.lock);
if (sg_count(req->src) > CRC_MAX_DMA_DESC) {
- dev_dbg(crc->dev, "init: requested sg list is too big > %d\n",
+ dev_dbg(ctx->crc->dev, "init: requested sg list is too big > %d\n",
CRC_MAX_DMA_DESC);
return -EINVAL;
}
@@ -175,7 +175,7 @@ static int bfin_crypto_crc_init(struct ahash_request *req)
/* init crc results */
put_unaligned_le32(crc_ctx->key, req->result);
- dev_dbg(crc->dev, "init: digest size: %d\n",
+ dev_dbg(ctx->crc->dev, "init: digest size: %d\n",
crypto_ahash_digestsize(tfm));
return bfin_crypto_crc_init_hw(crc, crc_ctx->key);
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index 65c7668614a..b44091c47f7 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -78,7 +78,7 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
tristate "Register hash algorithm implementations with Crypto API"
depends on CRYPTO_DEV_FSL_CAAM
default y
- select CRYPTO_AHASH
+ select CRYPTO_HASH
help
Selecting this will offload ahash for users of the
scatterlist crypto API to the SEC4 via job ring.
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index cf268b14ae9..765fdf5ce57 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -1693,6 +1693,7 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha224),cbc(aes))",
.driver_name = "authenc-hmac-sha224-cbc-aes-caam",
.blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
@@ -1732,6 +1733,7 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha384),cbc(aes))",
.driver_name = "authenc-hmac-sha384-cbc-aes-caam",
.blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
@@ -1810,6 +1812,7 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha224),cbc(des3_ede))",
.driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
.blocksize = DES3_EDE_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
@@ -1849,6 +1852,7 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha384),cbc(des3_ede))",
.driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
.blocksize = DES3_EDE_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
@@ -1926,6 +1930,7 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha224),cbc(des))",
.driver_name = "authenc-hmac-sha224-cbc-des-caam",
.blocksize = DES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
@@ -1965,6 +1970,7 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha384),cbc(des))",
.driver_name = "authenc-hmac-sha384-cbc-des-caam",
.blocksize = DES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 32aba7a6150..5996521a1ca 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -411,7 +411,7 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
return 0;
}
-static u32 gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
+static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
u32 keylen)
{
return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
@@ -420,7 +420,7 @@ static u32 gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
}
/* Digest hash size if it is too large */
-static u32 hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
+static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
u32 *keylen, u8 *key_out, u32 digestsize)
{
struct device *jrdev = ctx->jrdev;
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 8acf00490fd..6e94bcd9467 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -304,6 +304,9 @@ static int caam_probe(struct platform_device *pdev)
caam_remove(pdev);
return ret;
}
+
+ /* Enable RDB bit so that RNG works faster */
+ setbits32(&topregs->ctrl.scfgr, SCFGR_RDBENABLE);
}
/* NOTE: RTIC detection ought to go here, around Si time */
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 30b8f74833d..9f25f529602 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -36,7 +36,7 @@ static void report_jump_idx(u32 status, char *outstr)
static void report_ccb_status(u32 status, char *outstr)
{
- char *cha_id_list[] = {
+ static const char * const cha_id_list[] = {
"",
"AES",
"DES",
@@ -51,7 +51,7 @@ static void report_ccb_status(u32 status, char *outstr)
"ZUCE",
"ZUCA",
};
- char *err_id_list[] = {
+ static const char * const err_id_list[] = {
"No error.",
"Mode error.",
"Data size error.",
@@ -69,7 +69,7 @@ static void report_ccb_status(u32 status, char *outstr)
"Invalid CHA combination was selected",
"Invalid CHA selected.",
};
- char *rng_err_id_list[] = {
+ static const char * const rng_err_id_list[] = {
"",
"",
"",
@@ -117,7 +117,7 @@ static void report_jump_status(u32 status, char *outstr)
static void report_deco_status(u32 status, char *outstr)
{
- const struct {
+ static const struct {
u8 value;
char *error_text;
} desc_error_list[] = {
@@ -245,7 +245,7 @@ static void report_cond_code_status(u32 status, char *outstr)
char *caam_jr_strstatus(char *outstr, u32 status)
{
- struct stat_src {
+ static const struct stat_src {
void (*report_ssed)(u32 status, char *outstr);
char *error;
} status_src[] = {
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 5cd4c1b268a..e4a16b74137 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -41,6 +41,7 @@ struct caam_jrentry_info {
/* Private sub-storage for a single JobR */
struct caam_drv_private_jr {
struct device *parentdev; /* points back to controller dev */
+ struct platform_device *jr_pdev;/* points to platform device for JR */
int ridx;
struct caam_job_ring __iomem *rregs; /* JobR's register space */
struct tasklet_struct irqtask;
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 93d14070141..b4aa773ecbc 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -407,6 +407,7 @@ int caam_jr_shutdown(struct device *dev)
dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
jrp->outring, outbusaddr);
kfree(jrp->entinfo);
+ of_device_unregister(jrp->jr_pdev);
return ret;
}
@@ -454,6 +455,8 @@ int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
kfree(jrpriv);
return -EINVAL;
}
+
+ jrpriv->jr_pdev = jr_pdev;
jrdev = &jr_pdev->dev;
dev_set_drvdata(jrdev, jrpriv);
ctrlpriv->jrdev[ring] = jrdev;
@@ -472,6 +475,7 @@ int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
/* Now do the platform independent part */
error = caam_jr_init(jrdev); /* now turn on hardware */
if (error) {
+ of_device_unregister(jr_pdev);
kfree(jrpriv);
return error;
}
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index f6dba10246c..87138d2adb5 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -44,7 +44,7 @@ Split key generation-----------------------------------------------
[06] 0x64260028 fifostr: class2 mdsplit-jdk len=40
@0xffe04000
*/
-u32 gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
+int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
int split_key_pad_len, const u8 *key_in, u32 keylen,
u32 alg_op)
{
diff --git a/drivers/crypto/caam/key_gen.h b/drivers/crypto/caam/key_gen.h
index d95d290c6e8..c5588f6d810 100644
--- a/drivers/crypto/caam/key_gen.h
+++ b/drivers/crypto/caam/key_gen.h
@@ -12,6 +12,6 @@ struct split_key_result {
void split_key_done(struct device *dev, u32 *desc, u32 err, void *context);
-u32 gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
+int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
int split_key_pad_len, const u8 *key_in, u32 keylen,
u32 alg_op);
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 3223fc6d647..cd6fedad993 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -252,7 +252,8 @@ struct caam_ctrl {
/* Read/Writable */
u32 rsvd1;
u32 mcr; /* MCFG Master Config Register */
- u32 rsvd2[2];
+ u32 rsvd2;
+ u32 scfgr; /* SCFGR, Security Config Register */
/* Bus Access Configuration Section 010-11f */
/* Read/Writable */
@@ -299,6 +300,7 @@ struct caam_ctrl {
#define MCFGR_WDFAIL 0x20000000 /* DECO watchdog force-fail */
#define MCFGR_DMA_RESET 0x10000000
#define MCFGR_LONG_PTR 0x00010000 /* Use >32-bit desc addressing */
+#define SCFGR_RDBENABLE 0x00000400
/* AXI read cache control */
#define MCFGR_ARCACHE_SHIFT 12
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 6aa425fe0ed..ee15b0f7849 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -636,7 +636,7 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
pr_debug("err: %d\n", err);
- pm_runtime_put_sync(dd->dev);
+ pm_runtime_put(dd->dev);
dd->flags &= ~FLAGS_BUSY;
req->base.complete(&req->base, err);
@@ -1248,18 +1248,7 @@ static struct platform_driver omap_aes_driver = {
},
};
-static int __init omap_aes_mod_init(void)
-{
- return platform_driver_register(&omap_aes_driver);
-}
-
-static void __exit omap_aes_mod_exit(void)
-{
- platform_driver_unregister(&omap_aes_driver);
-}
-
-module_init(omap_aes_mod_init);
-module_exit(omap_aes_mod_exit);
+module_platform_driver(omap_aes_driver);
MODULE_DESCRIPTION("OMAP AES hw acceleration support.");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 3d1611f5aec..a1e1b4756ee 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -923,7 +923,7 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
- pm_runtime_put_sync(dd->dev);
+ pm_runtime_put(dd->dev);
if (req->base.complete)
req->base.complete(&req->base, err);
@@ -1813,18 +1813,7 @@ static struct platform_driver omap_sham_driver = {
},
};
-static int __init omap_sham_mod_init(void)
-{
- return platform_driver_register(&omap_sham_driver);
-}
-
-static void __exit omap_sham_mod_exit(void)
-{
- platform_driver_unregister(&omap_sham_driver);
-}
-
-module_init(omap_sham_mod_init);
-module_exit(omap_sham_mod_exit);
+module_platform_driver(omap_sham_driver);
MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 2096d4685a9..ac30724d923 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -1688,8 +1688,6 @@ static const struct of_device_id spacc_of_id_table[] = {
{ .compatible = "picochip,spacc-l2" },
{}
};
-#else /* CONFIG_OF */
-#define spacc_of_id_table NULL
#endif /* CONFIG_OF */
static bool spacc_is_compatible(struct platform_device *pdev,
@@ -1874,7 +1872,7 @@ static struct platform_driver spacc_driver = {
#ifdef CONFIG_PM
.pm = &spacc_pm_ops,
#endif /* CONFIG_PM */
- .of_match_table = spacc_of_id_table,
+ .of_match_table = of_match_ptr(spacc_of_id_table),
},
.id_table = spacc_id_table,
};
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
new file mode 100644
index 00000000000..a97bb6c1596
--- /dev/null
+++ b/drivers/crypto/sahara.c
@@ -0,0 +1,1070 @@
+/*
+ * Cryptographic API.
+ *
+ * Support for SAHARA cryptographic accelerator.
+ *
+ * Copyright (c) 2013 Vista Silicon S.L.
+ * Author: Javier Martin <javier.martin@vista-silicon.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Based on omap-aes.c and tegra-aes.c
+ */
+
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#define SAHARA_NAME "sahara"
+#define SAHARA_VERSION_3 3
+#define SAHARA_TIMEOUT_MS 1000
+#define SAHARA_MAX_HW_DESC 2
+#define SAHARA_MAX_HW_LINK 20
+
+#define FLAGS_MODE_MASK 0x000f
+#define FLAGS_ENCRYPT BIT(0)
+#define FLAGS_CBC BIT(1)
+#define FLAGS_NEW_KEY BIT(3)
+#define FLAGS_BUSY 4
+
+#define SAHARA_HDR_BASE 0x00800000
+#define SAHARA_HDR_SKHA_ALG_AES 0
+#define SAHARA_HDR_SKHA_OP_ENC (1 << 2)
+#define SAHARA_HDR_SKHA_MODE_ECB (0 << 3)
+#define SAHARA_HDR_SKHA_MODE_CBC (1 << 3)
+#define SAHARA_HDR_FORM_DATA (5 << 16)
+#define SAHARA_HDR_FORM_KEY (8 << 16)
+#define SAHARA_HDR_LLO (1 << 24)
+#define SAHARA_HDR_CHA_SKHA (1 << 28)
+#define SAHARA_HDR_CHA_MDHA (2 << 28)
+#define SAHARA_HDR_PARITY_BIT (1 << 31)
+
+/* SAHARA can only process one request at a time */
+#define SAHARA_QUEUE_LENGTH 1
+
+#define SAHARA_REG_VERSION 0x00
+#define SAHARA_REG_DAR 0x04
+#define SAHARA_REG_CONTROL 0x08
+#define SAHARA_CONTROL_SET_THROTTLE(x) (((x) & 0xff) << 24)
+#define SAHARA_CONTROL_SET_MAXBURST(x) (((x) & 0xff) << 16)
+#define SAHARA_CONTROL_RNG_AUTORSD (1 << 7)
+#define SAHARA_CONTROL_ENABLE_INT (1 << 4)
+#define SAHARA_REG_CMD 0x0C
+#define SAHARA_CMD_RESET (1 << 0)
+#define SAHARA_CMD_CLEAR_INT (1 << 8)
+#define SAHARA_CMD_CLEAR_ERR (1 << 9)
+#define SAHARA_CMD_SINGLE_STEP (1 << 10)
+#define SAHARA_CMD_MODE_BATCH (1 << 16)
+#define SAHARA_CMD_MODE_DEBUG (1 << 18)
+#define SAHARA_REG_STATUS 0x10
+#define SAHARA_STATUS_GET_STATE(x) ((x) & 0x7)
+#define SAHARA_STATE_IDLE 0
+#define SAHARA_STATE_BUSY 1
+#define SAHARA_STATE_ERR 2
+#define SAHARA_STATE_FAULT 3
+#define SAHARA_STATE_COMPLETE 4
+#define SAHARA_STATE_COMP_FLAG (1 << 2)
+#define SAHARA_STATUS_DAR_FULL (1 << 3)
+#define SAHARA_STATUS_ERROR (1 << 4)
+#define SAHARA_STATUS_SECURE (1 << 5)
+#define SAHARA_STATUS_FAIL (1 << 6)
+#define SAHARA_STATUS_INIT (1 << 7)
+#define SAHARA_STATUS_RNG_RESEED (1 << 8)
+#define SAHARA_STATUS_ACTIVE_RNG (1 << 9)
+#define SAHARA_STATUS_ACTIVE_MDHA (1 << 10)
+#define SAHARA_STATUS_ACTIVE_SKHA (1 << 11)
+#define SAHARA_STATUS_MODE_BATCH (1 << 16)
+#define SAHARA_STATUS_MODE_DEDICATED (1 << 17)
+#define SAHARA_STATUS_MODE_DEBUG (1 << 18)
+#define SAHARA_STATUS_GET_ISTATE(x) (((x) >> 24) & 0xff)
+#define SAHARA_REG_ERRSTATUS 0x14
+#define SAHARA_ERRSTATUS_GET_SOURCE(x) ((x) & 0xf)
+#define SAHARA_ERRSOURCE_CHA 14
+#define SAHARA_ERRSOURCE_DMA 15
+#define SAHARA_ERRSTATUS_DMA_DIR (1 << 8)
+#define SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
+#define SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
+#define SAHARA_ERRSTATUS_GET_CHASRC(x) (((x) >> 16) & 0xfff)
+#define SAHARA_ERRSTATUS_GET_CHAERR(x) (((x) >> 28) & 0x3)
+#define SAHARA_REG_FADDR 0x18
+#define SAHARA_REG_CDAR 0x1C
+#define SAHARA_REG_IDAR 0x20
+
+struct sahara_hw_desc {
+ u32 hdr;
+ u32 len1;
+ dma_addr_t p1;
+ u32 len2;
+ dma_addr_t p2;
+ dma_addr_t next;
+};
+
+struct sahara_hw_link {
+ u32 len;
+ dma_addr_t p;
+ dma_addr_t next;
+};
+
+struct sahara_ctx {
+ struct sahara_dev *dev;
+ unsigned long flags;
+ int keylen;
+ u8 key[AES_KEYSIZE_128];
+ struct crypto_ablkcipher *fallback;
+};
+
+struct sahara_aes_reqctx {
+ unsigned long mode;
+};
+
+struct sahara_dev {
+ struct device *device;
+ void __iomem *regs_base;
+ struct clk *clk_ipg;
+ struct clk *clk_ahb;
+
+ struct sahara_ctx *ctx;
+ spinlock_t lock;
+ struct crypto_queue queue;
+ unsigned long flags;
+
+ struct tasklet_struct done_task;
+ struct tasklet_struct queue_task;
+
+ struct sahara_hw_desc *hw_desc[SAHARA_MAX_HW_DESC];
+ dma_addr_t hw_phys_desc[SAHARA_MAX_HW_DESC];
+
+ u8 *key_base;
+ dma_addr_t key_phys_base;
+
+ u8 *iv_base;
+ dma_addr_t iv_phys_base;
+
+ struct sahara_hw_link *hw_link[SAHARA_MAX_HW_LINK];
+ dma_addr_t hw_phys_link[SAHARA_MAX_HW_LINK];
+
+ struct ablkcipher_request *req;
+ size_t total;
+ struct scatterlist *in_sg;
+ unsigned int nb_in_sg;
+ struct scatterlist *out_sg;
+ unsigned int nb_out_sg;
+
+ u32 error;
+ struct timer_list watchdog;
+};
+
+static struct sahara_dev *dev_ptr;
+
+static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
+{
+ writel(data, dev->regs_base + reg);
+}
+
+static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
+{
+ return readl(dev->regs_base + reg);
+}
+
+static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
+{
+ u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
+ SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
+ SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
+
+ if (dev->flags & FLAGS_CBC) {
+ hdr |= SAHARA_HDR_SKHA_MODE_CBC;
+ hdr ^= SAHARA_HDR_PARITY_BIT;
+ }
+
+ if (dev->flags & FLAGS_ENCRYPT) {
+ hdr |= SAHARA_HDR_SKHA_OP_ENC;
+ hdr ^= SAHARA_HDR_PARITY_BIT;
+ }
+
+ return hdr;
+}
+
+static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
+{
+ return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
+ SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
+}
+
+static int sahara_sg_length(struct scatterlist *sg,
+ unsigned int total)
+{
+ int sg_nb;
+ unsigned int len;
+ struct scatterlist *sg_list;
+
+ sg_nb = 0;
+ sg_list = sg;
+
+ while (total) {
+ len = min(sg_list->length, total);
+
+ sg_nb++;
+ total -= len;
+
+ sg_list = sg_next(sg_list);
+ if (!sg_list)
+ total = 0;
+ }
+
+ return sg_nb;
+}
+
+static char *sahara_err_src[16] = {
+ "No error",
+ "Header error",
+ "Descriptor length error",
+ "Descriptor length or pointer error",
+ "Link length error",
+ "Link pointer error",
+ "Input buffer error",
+ "Output buffer error",
+ "Output buffer starvation",
+ "Internal state fault",
+ "General descriptor problem",
+ "Reserved",
+ "Descriptor address error",
+ "Link address error",
+ "CHA error",
+ "DMA error"
+};
+
+static char *sahara_err_dmasize[4] = {
+ "Byte transfer",
+ "Half-word transfer",
+ "Word transfer",
+ "Reserved"
+};
+
+static char *sahara_err_dmasrc[8] = {
+ "No error",
+ "AHB bus error",
+ "Internal IP bus error",
+ "Parity error",
+ "DMA crosses 256 byte boundary",
+ "DMA is busy",
+ "Reserved",
+ "DMA HW error"
+};
+
+static char *sahara_cha_errsrc[12] = {
+ "Input buffer non-empty",
+ "Illegal address",
+ "Illegal mode",
+ "Illegal data size",
+ "Illegal key size",
+ "Write during processing",
+ "CTX read during processing",
+ "HW error",
+ "Input buffer disabled/underflow",
+ "Output buffer disabled/overflow",
+ "DES key parity error",
+ "Reserved"
+};
+
+static char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
+
+static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
+{
+ u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
+ u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
+
+ dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
+
+ dev_err(dev->device, " - %s.\n", sahara_err_src[source]);
+
+ if (source == SAHARA_ERRSOURCE_DMA) {
+ if (error & SAHARA_ERRSTATUS_DMA_DIR)
+ dev_err(dev->device, " * DMA read.\n");
+ else
+ dev_err(dev->device, " * DMA write.\n");
+
+ dev_err(dev->device, " * %s.\n",
+ sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
+ dev_err(dev->device, " * %s.\n",
+ sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
+ } else if (source == SAHARA_ERRSOURCE_CHA) {
+ dev_err(dev->device, " * %s.\n",
+ sahara_cha_errsrc[chasrc]);
+ dev_err(dev->device, " * %s.\n",
+ sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
+ }
+ dev_err(dev->device, "\n");
+}
+
+static char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
+
+static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
+{
+ u8 state;
+
+ if (!IS_ENABLED(DEBUG))
+ return;
+
+ state = SAHARA_STATUS_GET_STATE(status);
+
+ dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
+ __func__, status);
+
+ dev_dbg(dev->device, " - State = %d:\n", state);
+ if (state & SAHARA_STATE_COMP_FLAG)
+ dev_dbg(dev->device, " * Descriptor completed. IRQ pending.\n");
+
+ dev_dbg(dev->device, " * %s.\n",
+ sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
+
+ if (status & SAHARA_STATUS_DAR_FULL)
+ dev_dbg(dev->device, " - DAR Full.\n");
+ if (status & SAHARA_STATUS_ERROR)
+ dev_dbg(dev->device, " - Error.\n");
+ if (status & SAHARA_STATUS_SECURE)
+ dev_dbg(dev->device, " - Secure.\n");
+ if (status & SAHARA_STATUS_FAIL)
+ dev_dbg(dev->device, " - Fail.\n");
+ if (status & SAHARA_STATUS_RNG_RESEED)
+ dev_dbg(dev->device, " - RNG Reseed Request.\n");
+ if (status & SAHARA_STATUS_ACTIVE_RNG)
+ dev_dbg(dev->device, " - RNG Active.\n");
+ if (status & SAHARA_STATUS_ACTIVE_MDHA)
+ dev_dbg(dev->device, " - MDHA Active.\n");
+ if (status & SAHARA_STATUS_ACTIVE_SKHA)
+ dev_dbg(dev->device, " - SKHA Active.\n");
+
+ if (status & SAHARA_STATUS_MODE_BATCH)
+ dev_dbg(dev->device, " - Batch Mode.\n");
+ else if (status & SAHARA_STATUS_MODE_DEDICATED)
+ dev_dbg(dev->device, " - Decidated Mode.\n");
+ else if (status & SAHARA_STATUS_MODE_DEBUG)
+ dev_dbg(dev->device, " - Debug Mode.\n");
+
+ dev_dbg(dev->device, " - Internal state = 0x%02x\n",
+ SAHARA_STATUS_GET_ISTATE(status));
+
+ dev_dbg(dev->device, "Current DAR: 0x%08x\n",
+ sahara_read(dev, SAHARA_REG_CDAR));
+ dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
+ sahara_read(dev, SAHARA_REG_IDAR));
+}
+
+static void sahara_dump_descriptors(struct sahara_dev *dev)
+{
+ int i;
+
+ if (!IS_ENABLED(DEBUG))
+ return;
+
+ for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
+ dev_dbg(dev->device, "Descriptor (%d) (0x%08x):\n",
+ i, dev->hw_phys_desc[i]);
+ dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
+ dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
+ dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
+ dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
+ dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
+ dev_dbg(dev->device, "\tnext = 0x%08x\n",
+ dev->hw_desc[i]->next);
+ }
+ dev_dbg(dev->device, "\n");
+}
+
+static void sahara_dump_links(struct sahara_dev *dev)
+{
+ int i;
+
+ if (!IS_ENABLED(DEBUG))
+ return;
+
+ for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
+ dev_dbg(dev->device, "Link (%d) (0x%08x):\n",
+ i, dev->hw_phys_link[i]);
+ dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
+ dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
+ dev_dbg(dev->device, "\tnext = 0x%08x\n",
+ dev->hw_link[i]->next);
+ }
+ dev_dbg(dev->device, "\n");
+}
+
+static void sahara_aes_done_task(unsigned long data)
+{
+ struct sahara_dev *dev = (struct sahara_dev *)data;
+
+ dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
+ DMA_TO_DEVICE);
+ dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
+ DMA_FROM_DEVICE);
+
+ spin_lock(&dev->lock);
+ clear_bit(FLAGS_BUSY, &dev->flags);
+ spin_unlock(&dev->lock);
+
+ dev->req->base.complete(&dev->req->base, dev->error);
+}
+
+void sahara_watchdog(unsigned long data)
+{
+ struct sahara_dev *dev = (struct sahara_dev *)data;
+ unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
+ unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
+
+ sahara_decode_status(dev, stat);
+ sahara_decode_error(dev, err);
+ dev->error = -ETIMEDOUT;
+ sahara_aes_done_task(data);
+}
+
+static int sahara_hw_descriptor_create(struct sahara_dev *dev)
+{
+ struct sahara_ctx *ctx = dev->ctx;
+ struct scatterlist *sg;
+ int ret;
+ int i, j;
+
+ /* Copy new key if necessary */
+ if (ctx->flags & FLAGS_NEW_KEY) {
+ memcpy(dev->key_base, ctx->key, ctx->keylen);
+ ctx->flags &= ~FLAGS_NEW_KEY;
+
+ if (dev->flags & FLAGS_CBC) {
+ dev->hw_desc[0]->len1 = AES_BLOCK_SIZE;
+ dev->hw_desc[0]->p1 = dev->iv_phys_base;
+ } else {
+ dev->hw_desc[0]->len1 = 0;
+ dev->hw_desc[0]->p1 = 0;
+ }
+ dev->hw_desc[0]->len2 = ctx->keylen;
+ dev->hw_desc[0]->p2 = dev->key_phys_base;
+ dev->hw_desc[0]->next = dev->hw_phys_desc[1];
+ }
+ dev->hw_desc[0]->hdr = sahara_aes_key_hdr(dev);
+
+ dev->nb_in_sg = sahara_sg_length(dev->in_sg, dev->total);
+ dev->nb_out_sg = sahara_sg_length(dev->out_sg, dev->total);
+ if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
+ dev_err(dev->device, "not enough hw links (%d)\n",
+ dev->nb_in_sg + dev->nb_out_sg);
+ return -EINVAL;
+ }
+
+ ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
+ DMA_TO_DEVICE);
+ if (ret != dev->nb_in_sg) {
+ dev_err(dev->device, "couldn't map in sg\n");
+ goto unmap_in;
+ }
+ ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
+ DMA_FROM_DEVICE);
+ if (ret != dev->nb_out_sg) {
+ dev_err(dev->device, "couldn't map out sg\n");
+ goto unmap_out;
+ }
+
+ /* Create input links */
+ dev->hw_desc[1]->p1 = dev->hw_phys_link[0];
+ sg = dev->in_sg;
+ for (i = 0; i < dev->nb_in_sg; i++) {
+ dev->hw_link[i]->len = sg->length;
+ dev->hw_link[i]->p = sg->dma_address;
+ if (i == (dev->nb_in_sg - 1)) {
+ dev->hw_link[i]->next = 0;
+ } else {
+ dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
+ sg = sg_next(sg);
+ }
+ }
+
+ /* Create output links */
+ dev->hw_desc[1]->p2 = dev->hw_phys_link[i];
+ sg = dev->out_sg;
+ for (j = i; j < dev->nb_out_sg + i; j++) {
+ dev->hw_link[j]->len = sg->length;
+ dev->hw_link[j]->p = sg->dma_address;
+ if (j == (dev->nb_out_sg + i - 1)) {
+ dev->hw_link[j]->next = 0;
+ } else {
+ dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
+ sg = sg_next(sg);
+ }
+ }
+
+ /* Fill remaining fields of hw_desc[1] */
+ dev->hw_desc[1]->hdr = sahara_aes_data_link_hdr(dev);
+ dev->hw_desc[1]->len1 = dev->total;
+ dev->hw_desc[1]->len2 = dev->total;
+ dev->hw_desc[1]->next = 0;
+
+ sahara_dump_descriptors(dev);
+ sahara_dump_links(dev);
+
+ /* Start processing descriptor chain. */
+ mod_timer(&dev->watchdog,
+ jiffies + msecs_to_jiffies(SAHARA_TIMEOUT_MS));
+ sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
+
+ return 0;
+
+unmap_out:
+ dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
+ DMA_TO_DEVICE);
+unmap_in:
+ dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
+ DMA_FROM_DEVICE);
+
+ return -EINVAL;
+}
+
+static void sahara_aes_queue_task(unsigned long data)
+{
+ struct sahara_dev *dev = (struct sahara_dev *)data;
+ struct crypto_async_request *async_req, *backlog;
+ struct sahara_ctx *ctx;
+ struct sahara_aes_reqctx *rctx;
+ struct ablkcipher_request *req;
+ int ret;
+
+ spin_lock(&dev->lock);
+ backlog = crypto_get_backlog(&dev->queue);
+ async_req = crypto_dequeue_request(&dev->queue);
+ if (!async_req)
+ clear_bit(FLAGS_BUSY, &dev->flags);
+ spin_unlock(&dev->lock);
+
+ if (!async_req)
+ return;
+
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+
+ req = ablkcipher_request_cast(async_req);
+
+ /* Request is ready to be dispatched by the device */
+ dev_dbg(dev->device,
+ "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
+ req->nbytes, req->src, req->dst);
+
+ /* assign new request to device */
+ dev->req = req;
+ dev->total = req->nbytes;
+ dev->in_sg = req->src;
+ dev->out_sg = req->dst;
+
+ rctx = ablkcipher_request_ctx(req);
+ ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+ rctx->mode &= FLAGS_MODE_MASK;
+ dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
+
+ if ((dev->flags & FLAGS_CBC) && req->info)
+ memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);
+
+ /* assign new context to device */
+ ctx->dev = dev;
+ dev->ctx = ctx;
+
+ ret = sahara_hw_descriptor_create(dev);
+ if (ret < 0) {
+ spin_lock(&dev->lock);
+ clear_bit(FLAGS_BUSY, &dev->flags);
+ spin_unlock(&dev->lock);
+ dev->req->base.complete(&dev->req->base, ret);
+ }
+}
+
+static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct sahara_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ int ret;
+
+ ctx->keylen = keylen;
+
+ /* SAHARA only supports 128bit keys */
+ if (keylen == AES_KEYSIZE_128) {
+ memcpy(ctx->key, key, keylen);
+ ctx->flags |= FLAGS_NEW_KEY;
+ return 0;
+ }
+
+ if (keylen != AES_KEYSIZE_128 &&
+ keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
+ return -EINVAL;
+
+ /*
+ * The requested key size is not supported by HW, do a fallback.
+ */
+ ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+ ctx->fallback->base.crt_flags |=
+ (tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+
+ ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen);
+ if (ret) {
+ struct crypto_tfm *tfm_aux = crypto_ablkcipher_tfm(tfm);
+
+ tfm_aux->crt_flags &= ~CRYPTO_TFM_RES_MASK;
+ tfm_aux->crt_flags |=
+ (ctx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK);
+ }
+ return ret;
+}
+
+static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
+{
+ struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+ struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct sahara_dev *dev = dev_ptr;
+ int err = 0;
+ int busy;
+
+ dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
+ req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
+
+ if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
+ dev_err(dev->device,
+ "request size is not exact amount of AES blocks\n");
+ return -EINVAL;
+ }
+
+ ctx->dev = dev;
+
+ rctx->mode = mode;
+ spin_lock_bh(&dev->lock);
+ err = ablkcipher_enqueue_request(&dev->queue, req);
+ busy = test_and_set_bit(FLAGS_BUSY, &dev->flags);
+ spin_unlock_bh(&dev->lock);
+
+ if (!busy)
+ tasklet_schedule(&dev->queue_task);
+
+ return err;
+}
+
+static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
+{
+ struct crypto_tfm *tfm =
+ crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
+ struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+ int err;
+
+ if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
+ ablkcipher_request_set_tfm(req, ctx->fallback);
+ err = crypto_ablkcipher_encrypt(req);
+ ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
+ return err;
+ }
+
+ return sahara_aes_crypt(req, FLAGS_ENCRYPT);
+}
+
+static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
+{
+ struct crypto_tfm *tfm =
+ crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
+ struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+ int err;
+
+ if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
+ ablkcipher_request_set_tfm(req, ctx->fallback);
+ err = crypto_ablkcipher_decrypt(req);
+ ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
+ return err;
+ }
+
+ return sahara_aes_crypt(req, 0);
+}
+
+static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
+{
+ struct crypto_tfm *tfm =
+ crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
+ struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+ int err;
+
+ if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
+ ablkcipher_request_set_tfm(req, ctx->fallback);
+ err = crypto_ablkcipher_encrypt(req);
+ ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
+ return err;
+ }
+
+ return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
+}
+
+static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
+{
+ struct crypto_tfm *tfm =
+ crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
+ struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+ int err;
+
+ if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
+ ablkcipher_request_set_tfm(req, ctx->fallback);
+ err = crypto_ablkcipher_decrypt(req);
+ ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
+ return err;
+ }
+
+ return sahara_aes_crypt(req, FLAGS_CBC);
+}
+
+static int sahara_aes_cra_init(struct crypto_tfm *tfm)
+{
+ const char *name = tfm->__crt_alg->cra_name;
+ struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ ctx->fallback = crypto_alloc_ablkcipher(name, 0,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fallback)) {
+ pr_err("Error allocating fallback algo %s\n", name);
+ return PTR_ERR(ctx->fallback);
+ }
+
+ tfm->crt_ablkcipher.reqsize = sizeof(struct sahara_aes_reqctx);
+
+ return 0;
+}
+
+static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
+{
+ struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (ctx->fallback)
+ crypto_free_ablkcipher(ctx->fallback);
+ ctx->fallback = NULL;
+}
+
+static struct crypto_alg aes_algs[] = {
+{
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "sahara-ecb-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sahara_ctx),
+ .cra_alignmask = 0x0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = sahara_aes_cra_init,
+ .cra_exit = sahara_aes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE ,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = sahara_aes_setkey,
+ .encrypt = sahara_aes_ecb_encrypt,
+ .decrypt = sahara_aes_ecb_decrypt,
+ }
+}, {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "sahara-cbc-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sahara_ctx),
+ .cra_alignmask = 0x0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = sahara_aes_cra_init,
+ .cra_exit = sahara_aes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE ,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = sahara_aes_setkey,
+ .encrypt = sahara_aes_cbc_encrypt,
+ .decrypt = sahara_aes_cbc_decrypt,
+ }
+}
+};
+
+static irqreturn_t sahara_irq_handler(int irq, void *data)
+{
+ struct sahara_dev *dev = (struct sahara_dev *)data;
+ unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
+ unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
+
+ del_timer(&dev->watchdog);
+
+ sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
+ SAHARA_REG_CMD);
+
+ sahara_decode_status(dev, stat);
+
+ if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
+ return IRQ_NONE;
+ } else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
+ dev->error = 0;
+ } else {
+ sahara_decode_error(dev, err);
+ dev->error = -EINVAL;
+ }
+
+ tasklet_schedule(&dev->done_task);
+
+ return IRQ_HANDLED;
+}
+
+
+static int sahara_register_algs(struct sahara_dev *dev)
+{
+ int err, i, j;
+
+ for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
+ INIT_LIST_HEAD(&aes_algs[i].cra_list);
+ err = crypto_register_alg(&aes_algs[i]);
+ if (err)
+ goto err_aes_algs;
+ }
+
+ return 0;
+
+err_aes_algs:
+ for (j = 0; j < i; j++)
+ crypto_unregister_alg(&aes_algs[j]);
+
+ return err;
+}
+
+static void sahara_unregister_algs(struct sahara_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
+ crypto_unregister_alg(&aes_algs[i]);
+}
+
+static struct platform_device_id sahara_platform_ids[] = {
+ { .name = "sahara-imx27" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
+
+static struct of_device_id sahara_dt_ids[] = {
+ { .compatible = "fsl,imx27-sahara" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, sahara_dt_ids);
+
+static int sahara_probe(struct platform_device *pdev)
+{
+ struct sahara_dev *dev;
+ struct resource *res;
+ u32 version;
+ int irq;
+ int err;
+ int i;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(struct sahara_dev), GFP_KERNEL);
+ if (dev == NULL) {
+ dev_err(&pdev->dev, "unable to alloc data struct.\n");
+ return -ENOMEM;
+ }
+
+ dev->device = &pdev->dev;
+ platform_set_drvdata(pdev, dev);
+
+ /* Get the base address */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get memory region resource\n");
+ return -ENODEV;
+ }
+
+ if (devm_request_mem_region(&pdev->dev, res->start,
+ resource_size(res), SAHARA_NAME) == NULL) {
+ dev_err(&pdev->dev, "failed to request memory region\n");
+ return -ENOENT;
+ }
+ dev->regs_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!dev->regs_base) {
+ dev_err(&pdev->dev, "failed to ioremap address region\n");
+ return -ENOENT;
+ }
+
+ /* Get the IRQ */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get irq resource\n");
+ return irq;
+ }
+
+ if (devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
+ 0, SAHARA_NAME, dev) < 0) {
+ dev_err(&pdev->dev, "failed to request irq\n");
+ return -ENOENT;
+ }
+
+ /* clocks */
+ dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(dev->clk_ipg)) {
+ dev_err(&pdev->dev, "Could not get ipg clock\n");
+ return PTR_ERR(dev->clk_ipg);
+ }
+
+ dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(dev->clk_ahb)) {
+ dev_err(&pdev->dev, "Could not get ahb clock\n");
+ return PTR_ERR(dev->clk_ahb);
+ }
+
+ /* Allocate HW descriptors */
+ dev->hw_desc[0] = dma_alloc_coherent(&pdev->dev,
+ SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
+ &dev->hw_phys_desc[0], GFP_KERNEL);
+ if (!dev->hw_desc[0]) {
+ dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
+ return -ENOMEM;
+ }
+ dev->hw_desc[1] = dev->hw_desc[0] + 1;
+ dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
+ sizeof(struct sahara_hw_desc);
+
+ /* Allocate space for iv and key */
+ dev->key_base = dma_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
+ &dev->key_phys_base, GFP_KERNEL);
+ if (!dev->key_base) {
+ dev_err(&pdev->dev, "Could not allocate memory for key\n");
+ err = -ENOMEM;
+ goto err_key;
+ }
+ dev->iv_base = dev->key_base + AES_KEYSIZE_128;
+ dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
+
+ /* Allocate space for HW links */
+ dev->hw_link[0] = dma_alloc_coherent(&pdev->dev,
+ SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
+ &dev->hw_phys_link[0], GFP_KERNEL);
+ if (!dev->hw_link) {
+ dev_err(&pdev->dev, "Could not allocate hw links\n");
+ err = -ENOMEM;
+ goto err_link;
+ }
+ for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
+ dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
+ sizeof(struct sahara_hw_link);
+ dev->hw_link[i] = dev->hw_link[i - 1] + 1;
+ }
+
+ crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
+
+ dev_ptr = dev;
+
+ tasklet_init(&dev->queue_task, sahara_aes_queue_task,
+ (unsigned long)dev);
+ tasklet_init(&dev->done_task, sahara_aes_done_task,
+ (unsigned long)dev);
+
+ init_timer(&dev->watchdog);
+ dev->watchdog.function = &sahara_watchdog;
+ dev->watchdog.data = (unsigned long)dev;
+
+ clk_prepare_enable(dev->clk_ipg);
+ clk_prepare_enable(dev->clk_ahb);
+
+ version = sahara_read(dev, SAHARA_REG_VERSION);
+ if (version != SAHARA_VERSION_3) {
+ dev_err(&pdev->dev, "SAHARA version %d not supported\n",
+ version);
+ err = -ENODEV;
+ goto err_algs;
+ }
+
+ sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
+ SAHARA_REG_CMD);
+ sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
+ SAHARA_CONTROL_SET_MAXBURST(8) |
+ SAHARA_CONTROL_RNG_AUTORSD |
+ SAHARA_CONTROL_ENABLE_INT,
+ SAHARA_REG_CONTROL);
+
+ err = sahara_register_algs(dev);
+ if (err)
+ goto err_algs;
+
+ dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
+
+ return 0;
+
+err_algs:
+ dma_free_coherent(&pdev->dev,
+ SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
+ dev->hw_link[0], dev->hw_phys_link[0]);
+ clk_disable_unprepare(dev->clk_ipg);
+ clk_disable_unprepare(dev->clk_ahb);
+ dev_ptr = NULL;
+err_link:
+ dma_free_coherent(&pdev->dev,
+ 2 * AES_KEYSIZE_128,
+ dev->key_base, dev->key_phys_base);
+err_key:
+ dma_free_coherent(&pdev->dev,
+ SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
+ dev->hw_desc[0], dev->hw_phys_desc[0]);
+
+ return err;
+}
+
+static int sahara_remove(struct platform_device *pdev)
+{
+ struct sahara_dev *dev = platform_get_drvdata(pdev);
+
+ dma_free_coherent(&pdev->dev,
+ SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
+ dev->hw_link[0], dev->hw_phys_link[0]);
+ dma_free_coherent(&pdev->dev,
+ 2 * AES_KEYSIZE_128,
+ dev->key_base, dev->key_phys_base);
+ dma_free_coherent(&pdev->dev,
+ SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
+ dev->hw_desc[0], dev->hw_phys_desc[0]);
+
+ tasklet_kill(&dev->done_task);
+ tasklet_kill(&dev->queue_task);
+
+ sahara_unregister_algs(dev);
+
+ clk_disable_unprepare(dev->clk_ipg);
+ clk_disable_unprepare(dev->clk_ahb);
+
+ dev_ptr = NULL;
+
+ return 0;
+}
+
+static struct platform_driver sahara_driver = {
+ .probe = sahara_probe,
+ .remove = sahara_remove,
+ .driver = {
+ .name = SAHARA_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(sahara_dt_ids),
+ },
+ .id_table = sahara_platform_ids,
+};
+
+module_platform_driver(sahara_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
+MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index 1827e9f1f87..cf550896753 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -938,6 +938,7 @@ static int hash_dma_final(struct ahash_request *req)
if (!ctx->device->dma.nents) {
dev_err(device_data->dev, "[%s] "
"ctx->device->dma.nents = 0", __func__);
+ ret = ctx->device->dma.nents;
goto out;
}
@@ -945,6 +946,7 @@ static int hash_dma_final(struct ahash_request *req)
if (bytes_written != req->nbytes) {
dev_err(device_data->dev, "[%s] "
"hash_dma_write() failed!", __func__);
+ ret = bytes_written;
goto out;
}
@@ -1367,14 +1369,12 @@ static int hash_setkey(struct crypto_ahash *tfm,
/**
* Freed in final.
*/
- ctx->key = kmalloc(keylen, GFP_KERNEL);
+ ctx->key = kmemdup(key, keylen, GFP_KERNEL);
if (!ctx->key) {
pr_err(DEV_DBG_NAME " [%s] Failed to allocate ctx->key "
"for %d\n", __func__, alg);
return -ENOMEM;
}
-
- memcpy(ctx->key, key, keylen);
ctx->keylen = keylen;
return ret;