Commit 904e0ab5 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6:
  [HWRNG] omap: Minor updates
  [CRYPTO] kconfig: Ordering cleanup
  [CRYPTO] all: Clean up init()/fini()
  [CRYPTO] padlock-aes: Use generic setkey function
  [CRYPTO] aes: Export generic setkey
  [CRYPTO] api: Make the crypto subsystem fully modular
  [CRYPTO] cts: Add CTS mode required for Kerberos AES support
  [CRYPTO] lrw: Replace all adds to big endians variables with be*_add_cpu
  [CRYPTO] tcrypt: Change the XTEA test vectors
  [CRYPTO] tcrypt: Shrink the tcrypt module
  [CRYPTO] tcrypt: Change the usage of the test vectors
  [CRYPTO] api: Constify function pointer tables
  [CRYPTO] aes-x86-32: Remove unused return code
  [CRYPTO] tcrypt: Shrink speed templates
  [CRYPTO] tcrypt: Group common speed templates
  [CRYPTO] sha512: Rename sha512 to sha512_generic
  [CRYPTO] sha384: Hardware acceleration for s390
  [CRYPTO] sha512: Hardware acceleration for s390
  [CRYPTO] s390: Generic sha_update and sha_final
  [CRYPTO] api: Switch to proc_create()
parents 98a1e95f c49a7f18
......@@ -2,8 +2,9 @@
# Cryptographic API
#
obj-$(CONFIG_CRYPTO_SHA1_S390) += sha1_s390.o
obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256_s390.o
obj-$(CONFIG_CRYPTO_SHA1_S390) += sha1_s390.o sha_common.o
obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256_s390.o sha_common.o
obj-$(CONFIG_CRYPTO_SHA512_S390) += sha512_s390.o sha_common.o
obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o des_check_key.o
obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o
obj-$(CONFIG_S390_PRNG) += prng.o
......@@ -82,6 +82,7 @@ enum crypt_s390_kimd_func {
KIMD_QUERY = CRYPT_S390_KIMD | 0,
KIMD_SHA_1 = CRYPT_S390_KIMD | 1,
KIMD_SHA_256 = CRYPT_S390_KIMD | 2,
KIMD_SHA_512 = CRYPT_S390_KIMD | 3,
};
/*
......@@ -92,6 +93,7 @@ enum crypt_s390_klmd_func {
KLMD_QUERY = CRYPT_S390_KLMD | 0,
KLMD_SHA_1 = CRYPT_S390_KLMD | 1,
KLMD_SHA_256 = CRYPT_S390_KLMD | 2,
KLMD_SHA_512 = CRYPT_S390_KLMD | 3,
};
/*
......
/*
* Cryptographic API.
*
* s390 generic implementation of the SHA Secure Hash Algorithms.
*
* Copyright IBM Corp. 2007
* Author(s): Jan Glauber (jang@de.ibm.com)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#ifndef _CRYPTO_ARCH_S390_SHA_H
#define _CRYPTO_ARCH_S390_SHA_H
#include <linux/crypto.h>
#include <crypto/sha.h>
/* must be big enough for the largest SHA variant */
#define SHA_MAX_STATE_SIZE 16
#define SHA_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
struct s390_sha_ctx {
u64 count; /* message length in bytes */
u32 state[SHA_MAX_STATE_SIZE];
u8 buf[2 * SHA_MAX_BLOCK_SIZE];
int func; /* KIMD function to use */
};
void s390_sha_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len);
void s390_sha_final(struct crypto_tfm *tfm, u8 *out);
#endif
......@@ -29,16 +29,11 @@
#include <crypto/sha.h>
#include "crypt_s390.h"
struct s390_sha1_ctx {
u64 count; /* message length */
u32 state[5];
u8 buf[2 * SHA1_BLOCK_SIZE];
};
#include "sha.h"
static void sha1_init(struct crypto_tfm *tfm)
{
struct s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm);
struct s390_sha_ctx *sctx = crypto_tfm_ctx(tfm);
sctx->state[0] = SHA1_H0;
sctx->state[1] = SHA1_H1;
......@@ -46,79 +41,7 @@ static void sha1_init(struct crypto_tfm *tfm)
sctx->state[3] = SHA1_H3;
sctx->state[4] = SHA1_H4;
sctx->count = 0;
}
static void sha1_update(struct crypto_tfm *tfm, const u8 *data,
unsigned int len)
{
struct s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm);
unsigned int index;
int ret;
/* how much is already in the buffer? */
index = sctx->count & 0x3f;
sctx->count += len;
if (index + len < SHA1_BLOCK_SIZE)
goto store;
/* process one stored block */
if (index) {
memcpy(sctx->buf + index, data, SHA1_BLOCK_SIZE - index);
ret = crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buf,
SHA1_BLOCK_SIZE);
BUG_ON(ret != SHA1_BLOCK_SIZE);
data += SHA1_BLOCK_SIZE - index;
len -= SHA1_BLOCK_SIZE - index;
}
/* process as many blocks as possible */
if (len >= SHA1_BLOCK_SIZE) {
ret = crypt_s390_kimd(KIMD_SHA_1, sctx->state, data,
len & ~(SHA1_BLOCK_SIZE - 1));
BUG_ON(ret != (len & ~(SHA1_BLOCK_SIZE - 1)));
data += ret;
len -= ret;
}
store:
/* anything left? */
if (len)
memcpy(sctx->buf + index , data, len);
}
/* Add padding and return the message digest. */
static void sha1_final(struct crypto_tfm *tfm, u8 *out)
{
struct s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm);
u64 bits;
unsigned int index, end;
int ret;
/* must perform manual padding */
index = sctx->count & 0x3f;
end = (index < 56) ? SHA1_BLOCK_SIZE : (2 * SHA1_BLOCK_SIZE);
/* start pad with 1 */
sctx->buf[index] = 0x80;
/* pad with zeros */
index++;
memset(sctx->buf + index, 0x00, end - index - 8);
/* append message length */
bits = sctx->count * 8;
memcpy(sctx->buf + end - 8, &bits, sizeof(bits));
ret = crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buf, end);
BUG_ON(ret != end);
/* copy digest to out */
memcpy(out, sctx->state, SHA1_DIGEST_SIZE);
/* wipe context */
memset(sctx, 0, sizeof *sctx);
sctx->func = KIMD_SHA_1;
}
static struct crypto_alg alg = {
......@@ -127,21 +50,20 @@ static struct crypto_alg alg = {
.cra_priority = CRYPT_S390_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_sha1_ctx),
.cra_ctxsize = sizeof(struct s390_sha_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = { .digest = {
.dia_digestsize = SHA1_DIGEST_SIZE,
.dia_init = sha1_init,
.dia_update = sha1_update,
.dia_final = sha1_final } }
.dia_update = s390_sha_update,
.dia_final = s390_sha_final } }
};
static int __init sha1_s390_init(void)
{
if (!crypt_s390_func_available(KIMD_SHA_1))
return -EOPNOTSUPP;
return crypto_register_alg(&alg);
}
......@@ -154,6 +76,5 @@ module_init(sha1_s390_init);
module_exit(sha1_s390_fini);
MODULE_ALIAS("sha1");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
......@@ -22,16 +22,11 @@
#include <crypto/sha.h>
#include "crypt_s390.h"
struct s390_sha256_ctx {
u64 count; /* message length */
u32 state[8];
u8 buf[2 * SHA256_BLOCK_SIZE];
};
#include "sha.h"
static void sha256_init(struct crypto_tfm *tfm)
{
struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm);
struct s390_sha_ctx *sctx = crypto_tfm_ctx(tfm);
sctx->state[0] = SHA256_H0;
sctx->state[1] = SHA256_H1;
......@@ -42,79 +37,7 @@ static void sha256_init(struct crypto_tfm *tfm)
sctx->state[6] = SHA256_H6;
sctx->state[7] = SHA256_H7;
sctx->count = 0;
}
static void sha256_update(struct crypto_tfm *tfm, const u8 *data,
unsigned int len)
{
struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm);
unsigned int index;
int ret;
/* how much is already in the buffer? */
index = sctx->count & 0x3f;
sctx->count += len;
if ((index + len) < SHA256_BLOCK_SIZE)
goto store;
/* process one stored block */
if (index) {
memcpy(sctx->buf + index, data, SHA256_BLOCK_SIZE - index);
ret = crypt_s390_kimd(KIMD_SHA_256, sctx->state, sctx->buf,
SHA256_BLOCK_SIZE);
BUG_ON(ret != SHA256_BLOCK_SIZE);
data += SHA256_BLOCK_SIZE - index;
len -= SHA256_BLOCK_SIZE - index;
}
/* process as many blocks as possible */
if (len >= SHA256_BLOCK_SIZE) {
ret = crypt_s390_kimd(KIMD_SHA_256, sctx->state, data,
len & ~(SHA256_BLOCK_SIZE - 1));
BUG_ON(ret != (len & ~(SHA256_BLOCK_SIZE - 1)));
data += ret;
len -= ret;
}
store:
/* anything left? */
if (len)
memcpy(sctx->buf + index , data, len);
}
/* Add padding and return the message digest */
static void sha256_final(struct crypto_tfm *tfm, u8 *out)
{
struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm);
u64 bits;
unsigned int index, end;
int ret;
/* must perform manual padding */
index = sctx->count & 0x3f;
end = (index < 56) ? SHA256_BLOCK_SIZE : (2 * SHA256_BLOCK_SIZE);
/* start pad with 1 */
sctx->buf[index] = 0x80;
/* pad with zeros */
index++;
memset(sctx->buf + index, 0x00, end - index - 8);
/* append message length */
bits = sctx->count * 8;
memcpy(sctx->buf + end - 8, &bits, sizeof(bits));
ret = crypt_s390_kimd(KIMD_SHA_256, sctx->state, sctx->buf, end);
BUG_ON(ret != end);
/* copy digest to out */
memcpy(out, sctx->state, SHA256_DIGEST_SIZE);
/* wipe context */
memset(sctx, 0, sizeof *sctx);
sctx->func = KIMD_SHA_256;
}
static struct crypto_alg alg = {
......@@ -123,14 +46,14 @@ static struct crypto_alg alg = {
.cra_priority = CRYPT_S390_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_sha256_ctx),
.cra_ctxsize = sizeof(struct s390_sha_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = { .digest = {
.dia_digestsize = SHA256_DIGEST_SIZE,
.dia_init = sha256_init,
.dia_update = sha256_update,
.dia_final = sha256_final } }
.dia_update = s390_sha_update,
.dia_final = s390_sha_final } }
};
static int sha256_s390_init(void)
......@@ -150,6 +73,5 @@ module_init(sha256_s390_init);
module_exit(sha256_s390_fini);
MODULE_ALIAS("sha256");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm");
/*
* Cryptographic API.
*
* s390 implementation of the SHA512 and SHA38 Secure Hash Algorithm.
*
* Copyright IBM Corp. 2007
* Author(s): Jan Glauber (jang@de.ibm.com)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/crypto.h>
#include "sha.h"
#include "crypt_s390.h"
static void sha512_init(struct crypto_tfm *tfm)
{
struct s390_sha_ctx *ctx = crypto_tfm_ctx(tfm);
*(__u64 *)&ctx->state[0] = 0x6a09e667f3bcc908ULL;
*(__u64 *)&ctx->state[2] = 0xbb67ae8584caa73bULL;
*(__u64 *)&ctx->state[4] = 0x3c6ef372fe94f82bULL;
*(__u64 *)&ctx->state[6] = 0xa54ff53a5f1d36f1ULL;
*(__u64 *)&ctx->state[8] = 0x510e527fade682d1ULL;
*(__u64 *)&ctx->state[10] = 0x9b05688c2b3e6c1fULL;
*(__u64 *)&ctx->state[12] = 0x1f83d9abfb41bd6bULL;
*(__u64 *)&ctx->state[14] = 0x5be0cd19137e2179ULL;
ctx->count = 0;
ctx->func = KIMD_SHA_512;
}
static struct crypto_alg sha512_alg = {
.cra_name = "sha512",
.cra_driver_name = "sha512-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_sha_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(sha512_alg.cra_list),
.cra_u = { .digest = {
.dia_digestsize = SHA512_DIGEST_SIZE,
.dia_init = sha512_init,
.dia_update = s390_sha_update,
.dia_final = s390_sha_final } }
};
MODULE_ALIAS("sha512");
static void sha384_init(struct crypto_tfm *tfm)
{
struct s390_sha_ctx *ctx = crypto_tfm_ctx(tfm);
*(__u64 *)&ctx->state[0] = 0xcbbb9d5dc1059ed8ULL;
*(__u64 *)&ctx->state[2] = 0x629a292a367cd507ULL;
*(__u64 *)&ctx->state[4] = 0x9159015a3070dd17ULL;
*(__u64 *)&ctx->state[6] = 0x152fecd8f70e5939ULL;
*(__u64 *)&ctx->state[8] = 0x67332667ffc00b31ULL;
*(__u64 *)&ctx->state[10] = 0x8eb44a8768581511ULL;
*(__u64 *)&ctx->state[12] = 0xdb0c2e0d64f98fa7ULL;
*(__u64 *)&ctx->state[14] = 0x47b5481dbefa4fa4ULL;
ctx->count = 0;
ctx->func = KIMD_SHA_512;
}
static struct crypto_alg sha384_alg = {
.cra_name = "sha384",
.cra_driver_name = "sha384-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_sha_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(sha384_alg.cra_list),
.cra_u = { .digest = {
.dia_digestsize = SHA384_DIGEST_SIZE,
.dia_init = sha384_init,
.dia_update = s390_sha_update,
.dia_final = s390_sha_final } }
};
MODULE_ALIAS("sha384");
static int __init init(void)
{
int ret;
if (!crypt_s390_func_available(KIMD_SHA_512))
return -EOPNOTSUPP;
if ((ret = crypto_register_alg(&sha512_alg)) < 0)
goto out;
if ((ret = crypto_register_alg(&sha384_alg)) < 0)
crypto_unregister_alg(&sha512_alg);
out:
return ret;
}
static void __exit fini(void)
{
crypto_unregister_alg(&sha512_alg);
crypto_unregister_alg(&sha384_alg);
}
module_init(init);
module_exit(fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA512 and SHA-384 Secure Hash Algorithm");
/*
* Cryptographic API.
*
* s390 generic implementation of the SHA Secure Hash Algorithms.
*
* Copyright IBM Corp. 2007
* Author(s): Jan Glauber (jang@de.ibm.com)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <linux/crypto.h>
#include "sha.h"
#include "crypt_s390.h"
void s390_sha_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len)
{
struct s390_sha_ctx *ctx = crypto_tfm_ctx(tfm);
unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
unsigned int index;
int ret;
/* how much is already in the buffer? */
index = ctx->count & (bsize - 1);
ctx->count += len;
if ((index + len) < bsize)
goto store;
/* process one stored block */
if (index) {
memcpy(ctx->buf + index, data, bsize - index);
ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, bsize);
BUG_ON(ret != bsize);
data += bsize - index;
len -= bsize - index;
}
/* process as many blocks as possible */
if (len >= bsize) {
ret = crypt_s390_kimd(ctx->func, ctx->state, data,
len & ~(bsize - 1));
BUG_ON(ret != (len & ~(bsize - 1)));
data += ret;
len -= ret;
}
store:
if (len)
memcpy(ctx->buf + index , data, len);
}
EXPORT_SYMBOL_GPL(s390_sha_update);
void s390_sha_final(struct crypto_tfm *tfm, u8 *out)
{
struct s390_sha_ctx *ctx = crypto_tfm_ctx(tfm);
unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
u64 bits;
unsigned int index, end, plen;
int ret;
/* SHA-512 uses 128 bit padding length */
plen = (bsize > SHA256_BLOCK_SIZE) ? 16 : 8;
/* must perform manual padding */
index = ctx->count & (bsize - 1);
end = (index < bsize - plen) ? bsize : (2 * bsize);
/* start pad with 1 */
ctx->buf[index] = 0x80;
index++;
/* pad with zeros */
memset(ctx->buf + index, 0x00, end - index - 8);
/*
* Append message length. Well, SHA-512 wants a 128 bit lenght value,
* nevertheless we use u64, should be enough for now...
*/
bits = ctx->count * 8;
memcpy(ctx->buf + end - 8, &bits, sizeof(bits));
ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, end);
BUG_ON(ret != end);
/* copy digest to out */
memcpy(out, ctx->state, crypto_hash_digestsize(crypto_hash_cast(tfm)));
/* wipe context */
memset(ctx, 0, sizeof *ctx);
}
EXPORT_SYMBOL_GPL(s390_sha_final);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("s390 SHA cipher common functions");
......@@ -289,7 +289,6 @@ aes_enc_blk:
pop %ebx
mov %r0,(%ebp)
pop %ebp
mov $1,%eax
ret
// AES (Rijndael) Decryption Subroutine
......@@ -365,6 +364,4 @@ aes_dec_blk:
pop %ebx
mov %r0,(%ebp)
pop %ebp
mov $1,%eax
ret
This diff is collapsed.
......@@ -2,7 +2,8 @@
# Cryptographic API
#
obj-$(CONFIG_CRYPTO) += api.o cipher.o digest.o compress.o
obj-$(CONFIG_CRYPTO) += crypto.o
crypto-objs := api.o cipher.o digest.o compress.o
crypto_algapi-$(CONFIG_PROC_FS) += proc.o
crypto_algapi-objs := algapi.o scatterwalk.o $(crypto_algapi-y)
......@@ -28,13 +29,14 @@ obj-$(CONFIG_CRYPTO_MD4) += md4.o
obj-$(CONFIG_CRYPTO_MD5) += md5.o
obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o
obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o
obj-$(CONFIG_CRYPTO_SHA512) += sha512.o
obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o
obj-$(CONFIG_CRYPTO_WP512) += wp512.o
obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o
obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o
obj-$(CONFIG_CRYPTO_ECB) += ecb.o
obj-$(CONFIG_CRYPTO_CBC) += cbc.o
obj-$(CONFIG_CRYPTO_PCBC) += pcbc.o
obj-$(CONFIG_CRYPTO_CTS) += cts.o
obj-$(CONFIG_CRYPTO_LRW) += lrw.o
obj-$(CONFIG_CRYPTO_XTS) += xts.o
obj-$(CONFIG_CRYPTO_CTR) += ctr.o
......
......@@ -229,18 +229,29 @@ static void __init gen_tabs(void)
ctx->key_enc[8 * i + 15] = t; \
} while (0)
int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
/**
* crypto_aes_expand_key - Expands the AES key as described in FIPS-197
* @ctx: The location where the computed key will be stored.
* @in_key: The supplied key.
* @key_len: The length of the supplied key.
*
* Returns 0 on success. The function fails only if an invalid key size (or
* pointer) is supplied.
* The expanded key size is 240 bytes (max of 14 rounds with a unique 16 bytes
* key schedule plus a 16 bytes key which is used before the first round).
* The decryption key is prepared for the "Equivalent Inverse Cipher" as
* described in FIPS-197. The first slot (16 bytes) of each key (enc or dec) is
* for the initial combination, the second slot for the first round and so on.
*/
int crypto_aes_expand_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
unsigned int key_len)
{
struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *key = (const __le32 *)in_key;
u32 *flags = &tfm->crt_flags;
u32 i, t, u, v, w, j;
if (key_len % 8) {
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
key_len != AES_KEYSIZE_256)
return -EINVAL;
}
ctx->key_length = key_len;
......@@ -250,20 +261,20 @@ int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
ctx->key_dec[key_len + 27] = ctx->key_enc[3] = le32_to_cpu(key[3]);
switch (key_len) {
case 16:
case AES_KEYSIZE_128:
t = ctx->key_enc[3];
for (i = 0; i < 10; ++i)
loop4(i);
break;
case 24:
case AES_KEYSIZE_192:
ctx->key_enc[4] = le32_to_cpu(key[4]);
t = ctx->key_enc[5] = le32_to_cpu(key[5]);
for (i = 0; i < 8; ++i)
loop6(i);
break;
case 32:
case AES_KEYSIZE_256:
ctx->key_enc[4] = le32_to_cpu(key[4]);
ctx->key_enc[5] = le32_to_cpu(key[5]);
ctx->key_enc[6] = le32_to_cpu(key[6]);
......@@ -284,6 +295,33 @@ int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
}
return 0;
}
EXPORT_SYMBOL_GPL(crypto_aes_expand_key);
/**
* crypto_aes_set_key - Set the AES key.
* @tfm: The %crypto_tfm that is used in the context.
* @in_key: The input key.
* @key_len: The size of the key.
*
* Returns 0 on success, on failure the %CRYPTO_TFM_RES_BAD_KEY_LEN flag in tfm
* is set. The function uses crypto_aes_expand_key() to expand the key.
* &crypto_aes_ctx _must_ be the private data embedded in @tfm which is
* retrieved with crypto_tfm_ctx().
*/
int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
u32 *flags = &tfm->crt_flags;
int ret;
ret = crypto_aes_expand_key(ctx, in_key, key_len);
if (!ret)
return 0;
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
EXPORT_SYMBOL_GPL(crypto_aes_set_key);
/* encrypt a block of text */
......
......@@ -687,7 +687,7 @@ static struct crypto_alg anubis_alg = {
.cia_decrypt = anubis_decrypt } }
};
static int __init init(void)
static int __init anubis_mod_init(void)
{
int ret = 0;
......@@ -695,13 +695,13 @@ static int __init init(void)
return ret;
}
static void __exit fini(void)
static void __exit anubis_mod_fini(void)
{
crypto_unregister_alg(&anubis_alg);
}
module_init(init);
module_exit(fini);
module_init(anubis_mod_init);
module_exit(anubis_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Anubis Cryptographic Algorithm");
......@@ -445,3 +445,6 @@ int crypto_has_alg(const char *name, u32 type, u32 mask)
return ret;
}
EXPORT_SYMBOL_GPL(crypto_has_alg);
MODULE_DESCRIPTION("Cryptographic core API");
MODULE_LICENSE("GPL");
......@@ -465,18 +465,18 @@ static struct crypto_alg alg = {
.cia_decrypt = bf_decrypt } }
};
static int __init init(void)
static int __init blowfish_mod_init(void)
{
return crypto_register_alg(&alg);
}
static void __exit fini(void)
static void __exit blowfish_mod_fini(void)
{
crypto_unregister_alg(&alg);
}
module_init(init);
module_exit(fini);
module_init(blowfish_mod_init);
module_exit(blowfish_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Blowfish Cipher Algorithm");
......@@ -817,18 +817,18 @@ static struct crypto_alg alg = {
}
};
static int __init init(void)
static int __init cast5_mod_init(void)
{
return crypto_register_alg(&alg);
}
static void __exit fini(void)
static void __exit cast5_mod_fini(void)
{
crypto_unregister_alg(&alg);
}
module_init(init);
module_exit(fini);
module_init(cast5_mod_init);
module_exit(cast5_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Cast5 Cipher Algorithm");
......
......@@ -528,18 +528,18 @@ static struct crypto_alg alg = {
}
};
static int __init init(void)
static int __init cast6_mod_init(void)
{
return crypto_register_alg(&alg);
}
static void __exit fini(void)
static void __exit cast6_mod_fini(void)
{
crypto_unregister_alg(&alg);
}
module_init(init);
module_exit(fini);
module_init(cast6_mod_init);
module_exit(cast6_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Cast6 Cipher Algorithm");
......@@ -98,18 +98,18 @@ static struct crypto_alg alg = {
}
};
static int __init init(void)
static int __init crc32c_mod_init(void)
{
return crypto_register_alg(&alg);
}
static void __exit fini(void)
static void __exit crc32c_mod_fini(void)
{
crypto_unregister_alg(&alg);
}
module_init(init);
module_exit(fini);
module_init(crc32c_mod_init);
module_exit(crc32c_mod_fini);
MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>");
MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations wrapper for lib/crc32c");
......
......@@ -142,7 +142,7 @@ MODULE_ALIAS("compress_null");
MODULE_ALIAS("digest_null");
MODULE_ALIAS("cipher_null");
static int __init init(void)
static int __init crypto_null_mod_init(void)
{
int ret = 0;
......@@ -174,7 +174,7 @@ out_unregister_cipher:
goto out;
}
static void __exit fini(void)
static void __exit crypto_null_mod_fini(void)
{
crypto_unregister_alg(&compress_null);
crypto_unregister_alg(&digest_null);
......@@ -182,8 +182,8 @@ static void __exit fini(void)
crypto_unregister_alg(&cipher_null);
}
module_init(init);
module_exit(fini);
module_init(crypto_null_mod_init);
module_exit(crypto_null_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Null Cryptographic Algorithms");
This diff is collapsed.
......@@ -208,18 +208,18 @@ static struct crypto_alg alg = {
.coa_decompress = deflate_decompress } }
};
static int __init init(void)
static int __init deflate_mod_init(void)
{
return crypto_register_alg(&alg);
}
static void __exit fini(void)
static void __exit deflate_mod_fini(void)
{
crypto_unregister_alg(&alg);
}
module_init(init);
module_exit(fini);
module_init(deflate_mod_init);
module_exit(deflate_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Deflate Compression Algorithm for IPCOMP");
......
......@@ -977,7 +977,7 @@ static struct crypto_alg des3_ede_alg = {
MODULE_ALIAS("des3_ede");
static int __init init(void)
static int __init des_generic_mod_init(void)
{
int ret = 0;
......@@ -992,14 +992,14 @@ out:
return ret;
}
static void __exit fini(void)
static void __exit des_generic_mod_fini(void)
{
crypto_unregister_alg(&des3_ede_alg);
crypto_unregister_alg(&des_alg);
}
module_init(init);
module_exit(fini);
module_init(des_generic_mod_init);
module_exit(des_generic_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms");
......
......@@ -405,18 +405,18 @@ static struct crypto_alg fcrypt_alg = {
.cia_decrypt = fcrypt_decrypt } }
};
static int __init init(void)
static int __init fcrypt_mod_init(void)
{
return crypto_register_alg(&fcrypt_alg);
}
static void __exit fini(void)
static void __exit fcrypt_mod_fini(void)
{
crypto_unregister_alg(&fcrypt_alg);
}
module_init(init);
module_exit(fini);
module_init(fcrypt_mod_init);
module_exit(fcrypt_mod_fini);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("FCrypt Cipher Algorithm");
......
......@@ -862,7 +862,7 @@ static struct crypto_alg khazad_alg = {
.cia_decrypt = khazad_decrypt } }
};
static int __init init(void)
static int __init khazad_mod_init(void)
{
int ret = 0;
......@@ -870,14 +870,14 @@ static int __init init(void)
return ret;
}
static void __exit fini(void)
static void __exit khazad_mod_fini(void)
{
crypto_unregister_alg(&khazad_alg);
}
module_init(init);
module_exit(fini);
module_init(khazad_mod_init);
module_exit(khazad_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Khazad Cryptographic Algorithm");
......@@ -91,8 +91,9 @@ struct sinfo {
static inline void inc(be128 *iv)
{
if (!(iv->b = cpu_to_be64(be64_to_cpu(iv->b) + 1)))
iv->a = cpu_to_be64(be64_to_cpu(iv->a) + 1);
be64_add_cpu(&iv->b, 1);
if (!iv->b)
be64_add_cpu(&iv->a, 1);
}
static inline void lrw_round(struct sinfo *s, void *dst, const void *src)
......
......@@ -89,18 +89,18 @@ static struct crypto_alg alg = {
.coa_decompress = lzo_decompress } }
};
static int __init init(void)
static int __init lzo_mod_init(void)
{
return crypto_register_alg(&alg);
}
static void __exit fini(void)
static void __exit lzo_mod_fini(void)
{
crypto_unregister_alg(&alg);
}
module_init(init);
module_exit(fini);
module_init(lzo_mod_init);
module_exit(lzo_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("LZO Compression Algorithm");
......@@ -233,18 +233,18 @@ static struct crypto_alg alg = {
.dia_final = md4_final } }
};
static int __init init(void)
static int __init md4_mod_init(void)
{
return crypto_register_alg(&alg);
}
static void __exit fini(void)
static void __exit md4_mod_fini(void)
{
crypto_unregister_alg(&alg);
}
module_init(init);
module_exit(fini);
module_init(md4_mod_init);
module_exit(md4_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MD4 Message Digest Algorithm");
......
......@@ -228,18 +228,18 @@ static struct crypto_alg alg = {
.dia_final = md5_final } }
};
static int __init init(void)
static int __init md5_mod_init(void)
{
return crypto_register_alg(&alg);
}
static void __exit fini(void)
static void __exit md5_mod_fini(void)
{
crypto_unregister_alg(&alg);
}
module_init(init);
module_exit(fini);
module_init(md5_mod_init);
module_exit(md5_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MD5 Message Digest Algorithm");
......@@ -78,7 +78,7 @@ static int c_show(struct seq_file *m, void *p)
return 0;
}
static struct seq_operations crypto_seq_ops = {
static const struct seq_operations crypto_seq_ops = {
.start = c_start,
.next = c_next,
.stop = c_stop,
......@@ -99,11 +99,7 @@ static const struct file_operations proc_crypto_ops = {
void __init crypto_init_proc(void)
{
struct proc_dir_entry *proc;
proc = create_proc_entry("crypto", 0, NULL);
if (proc)
proc->proc_fops = &proc_crypto_ops;
proc_create("crypto", 0, NULL, &proc_crypto_ops);
}
void __exit crypto_exit_proc(void)
......
......@@ -237,18 +237,18 @@ static struct crypto_alg alg = {
}
};
static int __init init(void)
static int __init salsa20_generic_mod_init(void)
{
return crypto_register_alg(&alg);
}
static void __exit fini(void)
static void __exit salsa20_generic_mod_fini(void)
{
crypto_unregister_alg(&alg);
}
module_init(init);
module_exit(fini);
module_init(salsa20_generic_mod_init);
module_exit(salsa20_generic_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm");
......
......@@ -557,7 +557,7 @@ static struct crypto_alg tnepres_alg = {
.cia_decrypt = tnepres_decrypt } }
};
static int __init init(void)
static int __init serpent_mod_init(void)
{
int ret = crypto_register_alg(&serpent_alg);
......@@ -572,14 +572,14 @@ static int __init init(void)
return ret;
}
static void __exit fini(void)
static void __exit serpent_mod_fini(void)
{
crypto_unregister_alg(&tnepres_alg);
crypto_unregister_alg(&serpent_alg);
}
module_init(init);
module_exit(fini);
module_init(serpent_mod_init);
module_exit(serpent_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Serpent and tnepres (kerneli compatible serpent reversed) Cipher Algorithm");
......
......@@ -120,18 +120,18 @@ static struct crypto_alg alg = {
.dia_final = sha1_final } }
};
static int __init init(void)
static int __init sha1_generic_mod_init(void)
{
return crypto_register_alg(&alg);
}
static void __exit fini(void)
static void __exit sha1_generic_mod_fini(void)
{
crypto_unregister_alg(&alg);
}
module_init(init);
module_exit(fini);
module_init(sha1_generic_mod_init);
module_exit(sha1_generic_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
......
......@@ -353,7 +353,7 @@ static struct crypto_alg sha224 = {
.dia_final = sha224_final } }
};
static int __init init(void)
static int __init sha256_generic_mod_init(void)
{
int ret = 0;
......@@ -370,14 +370,14 @@ static int __init init(void)
return ret;
}
static void __exit fini(void)
static void __exit sha256_generic_mod_fini(void)
{
crypto_unregister_alg(&sha224);
crypto_unregister_alg(&sha256);
}
module_init(init);
module_exit(fini);
module_init(sha256_generic_mod_init);
module_exit(sha256_generic_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm");
......
......@@ -278,9 +278,7 @@ static struct crypto_alg sha384 = {
}
};
MODULE_ALIAS("sha384");
static int __init init(void)
static int __init sha512_generic_mod_init(void)
{
int ret = 0;
......@@ -292,14 +290,17 @@ out:
return ret;
}
static void __exit fini(void)
static void __exit sha512_generic_mod_fini(void)
{
crypto_unregister_alg(&sha384);
crypto_unregister_alg(&sha512);
}
module_init(init);
module_exit(fini);
module_init(sha512_generic_mod_init);
module_exit(sha512_generic_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA-512 and SHA-384 Secure Hash Algorithms");
MODULE_ALIAS("sha384");
MODULE_ALIAS("sha512");
This diff is collapsed.
This diff is collapsed.
......@@ -267,7 +267,7 @@ static struct crypto_alg xeta_alg = {
.cia_decrypt = xeta_decrypt } }
};
static int __init init(void)
static int __init tea_mod_init(void)
{
int ret = 0;
......@@ -292,7 +292,7 @@ out:
return ret;
}
static void __exit fini(void)
static void __exit tea_mod_fini(void)
{
crypto_unregister_alg(&tea_alg);
crypto_unregister_alg(&xtea_alg);
......@@ -302,8 +302,8 @@ static void __exit fini(void)
MODULE_ALIAS("xtea");
MODULE_ALIAS("xeta");
module_init(init);
module_exit(fini);
module_init(tea_mod_init);
module_exit(tea_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("TEA, XTEA & XETA Cryptographic Algorithms");
......@@ -663,7 +663,7 @@ static struct crypto_alg tgr128 = {
.dia_final = tgr128_final}}
};
static int __init init(void)
static int __init tgr192_mod_init(void)
{
int ret = 0;
......@@ -688,7 +688,7 @@ static int __init init(void)
return ret;
}
static void __exit fini(void)
static void __exit tgr192_mod_fini(void)
{
crypto_unregister_alg(&tgr192);
crypto_unregister_alg(&tgr160);
......@@ -698,8 +698,8 @@ static void __exit fini(void)
MODULE_ALIAS("tgr160");
MODULE_ALIAS("tgr128");
module_init(init);
module_exit(fini);
module_init(tgr192_mod_init);
module_exit(tgr192_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Tiger Message Digest Algorithm");
......@@ -197,18 +197,18 @@ static struct crypto_alg alg = {
.cia_decrypt = twofish_decrypt } }
};
static int __init init(void)
static int __init twofish_mod_init(void)
{
return crypto_register_alg(&alg);
}
static void __exit fini(void)
static void __exit twofish_mod_fini(void)
{
crypto_unregister_alg(&alg);
}
module_init(init);
module_exit(fini);
module_init(twofish_mod_init);
module_exit(twofish_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION ("Twofish Cipher Algorithm");
......@@ -1146,7 +1146,7 @@ static struct crypto_alg wp256 = {
.dia_final = wp256_final } }
};
static int __init init(void)
static int __init wp512_mod_init(void)
{
int ret = 0;
......@@ -1172,7 +1172,7 @@ out:
return ret;
}
static void __exit fini(void)
static void __exit wp512_mod_fini(void)
{
crypto_unregister_alg(&wp512);
crypto_unregister_alg(&wp384);
......@@ -1182,8 +1182,8 @@ static void __exit fini(void)
MODULE_ALIAS("wp384");
MODULE_ALIAS("wp256");
module_init(init);
module_exit(fini);
module_init(wp512_mod_init);
module_exit(wp512_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Whirlpool Message Digest Algorithm");
/*
* drivers/char/hw_random/omap-rng.c
*
* RNG driver for TI OMAP CPU family
* omap-rng.c - RNG driver for TI OMAP CPU family
*
* Author: Deepak Saxena <dsaxena@plexity.net>
*
......@@ -15,11 +13,6 @@
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*
* TODO:
*
* - Make status updated be interrupt driven so we don't poll
*
*/
#include <linux/module.h>
......@@ -55,17 +48,16 @@ static void __iomem *rng_base;
static struct clk *rng_ick;
static struct platform_device *rng_dev;
static u32 omap_rng_read_reg(int reg)
static inline u32 omap_rng_read_reg(int reg)
{
return __raw_readl(rng_base + reg);
}
static void omap_rng_write_reg(int reg, u32 val)
static inline void omap_rng_write_reg(int reg, u32 val)
{
__raw_writel(val, rng_base + reg);
}
/* REVISIT: Does the status bit really work on 16xx? */
static int omap_rng_data_present(struct hwrng *rng, int wait)
{
int data, i;
......@@ -74,6 +66,11 @@ static int omap_rng_data_present(struct hwrng *rng, int wait)
data = omap_rng_read_reg(RNG_STAT_REG) ? 0 : 1;
if (data || !wait)
break;
/* RNG produces data fast enough (2+ MBit/sec, even
* during "rngtest" loads, that these delays don't
* seem to trigger. We *could* use the RNG IRQ, but
* that'd be higher overhead ... so why bother?
*/
udelay(10);
}
return data;
......@@ -101,7 +98,8 @@ static int __init omap_rng_probe(struct platform_device *pdev)
* A bit ugly, and it will never actually happen but there can
* be only one RNG and this catches any bork
*/
BUG_ON(rng_dev);
if (rng_dev)
return -EBUSY;
if (cpu_is_omap24xx()) {
rng_ick = clk_get(NULL, "rng_ick");
......@@ -124,7 +122,7 @@ static int __init omap_rng_probe(struct platform_device *pdev)
return -EBUSY;
dev_set_drvdata(&pdev->dev, mem);
rng_base = (u32 __iomem *)io_p2v(res->start);
rng_base = (u32 __force __iomem *)io_p2v(res->start);
ret = hwrng_register(&omap_rng_ops);
if (ret) {
......@@ -182,6 +180,8 @@ static int omap_rng_resume(struct platform_device *pdev)
#endif
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:omap_rng");
static struct platform_driver omap_rng_driver = {
.driver = {
......
......@@ -27,6 +27,7 @@ config CRYPTO_DEV_PADLOCK_AES
tristate "PadLock driver for AES algorithm"
depends on CRYPTO_DEV_PADLOCK
select CRYPTO_BLKCIPHER
select CRYPTO_AES
help
Use VIA PadLock for AES algorithm.
......@@ -101,6 +102,19 @@ config CRYPTO_SHA256_S390
This version of SHA implements a 256 bit hash with 128 bits of
security against collision attacks.
config CRYPTO_SHA512_S390
tristate "SHA384 and SHA512 digest algorithm"
depends on S390
select CRYPTO_ALGAPI
help
This is the s390 hardware accelerated implementation of the
SHA512 secure hash standard.
This version of SHA implements a 512 bit hash with 256 bits of
security against collision attacks. The code also includes SHA-384,
a 384 bit hash with 192 bits of security against collision attacks.
config CRYPTO_DES_S390
tristate "DES and Triple DES cipher algorithms"
depends on S390
......
This diff is collapsed.
......@@ -14,11 +14,13 @@
#define AES_KEYSIZE_192 24
#define AES_KEYSIZE_256 32
#define AES_BLOCK_SIZE 16
#define AES_MAX_KEYLENGTH (15 * 16)
#define AES_MAX_KEYLENGTH_U32 (AES_MAX_KEYLENGTH / sizeof(u32))
struct crypto_aes_ctx {
u32 key_length;
u32 key_enc[60];
u32 key_dec[60];
u32 key_enc[AES_MAX_KEYLENGTH_U32];
u32 key_dec[AES_MAX_KEYLENGTH_U32];
};
extern u32 crypto_ft_tab[4][256];
......@@ -28,4 +30,6 @@ extern u32 crypto_il_tab[4][256];
int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len);
int crypto_aes_expand_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
unsigned int key_len);
#endif
......@@ -317,14 +317,7 @@ int crypto_unregister_alg(struct crypto_alg *alg);
/*
* Algorithm query interface.
*/
#ifdef CONFIG_CRYPTO
int crypto_has_alg(const char *name, u32 type, u32 mask);
#else
static inline int crypto_has_alg(const char *name, u32 type, u32 mask)
{
return 0;
}
#endif
/*
* Transforms: user-instantiated objects which encapsulate algorithms
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment