Commit 38320c70 authored by Herbert Xu's avatar Herbert Xu Committed by David S. Miller

[IPSEC]: Use crypto_aead and authenc in ESP

This patch converts ESP to use the crypto_aead interface and in particular
the authenc algorithm.  This lays the foundations for future support of
combined mode algorithms.
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent bf164cc0
#ifndef _NET_ESP_H #ifndef _NET_ESP_H
#define _NET_ESP_H #define _NET_ESP_H
#include <linux/crypto.h> #include <linux/skbuff.h>
#include <net/xfrm.h>
#include <linux/scatterlist.h>
#define ESP_NUM_FAST_SG 4 struct crypto_aead;
struct esp_data struct esp_data {
{ /* 0..255 */
struct scatterlist sgbuf[ESP_NUM_FAST_SG]; int padlen;
/* Confidentiality */ /* Confidentiality & Integrity */
struct { struct crypto_aead *aead;
int padlen; /* 0..255 */
/* ivlen is offset from enc_data, where encrypted data start.
* It is logically different of crypto_tfm_alg_ivsize(tfm).
* We assume that it is either zero (no ivec), or
* >= crypto_tfm_alg_ivsize(tfm). */
int ivlen;
int ivinitted;
u8 *ivec; /* ivec buffer */
struct crypto_blkcipher *tfm; /* crypto handle */
} conf;
/* Integrity. It is active when icv_full_len != 0 */
struct {
u8 *work_icv;
int icv_full_len;
int icv_trunc_len;
struct crypto_hash *tfm;
} auth;
}; };
extern void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len); extern void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
static inline int esp_mac_digest(struct esp_data *esp, struct sk_buff *skb,
int offset, int len)
{
struct hash_desc desc;
int err;
desc.tfm = esp->auth.tfm;
desc.flags = 0;
err = crypto_hash_init(&desc);
if (unlikely(err))
return err;
err = skb_icv_walk(skb, &desc, offset, len, crypto_hash_update);
if (unlikely(err))
return err;
return crypto_hash_final(&desc, esp->auth.work_icv);
}
struct ip_esp_hdr; struct ip_esp_hdr;
static inline struct ip_esp_hdr *ip_esp_hdr(const struct sk_buff *skb) static inline struct ip_esp_hdr *ip_esp_hdr(const struct sk_buff *skb)
......
...@@ -343,6 +343,7 @@ config INET_ESP ...@@ -343,6 +343,7 @@ config INET_ESP
tristate "IP: ESP transformation" tristate "IP: ESP transformation"
select XFRM select XFRM
select CRYPTO select CRYPTO
select CRYPTO_AEAD
select CRYPTO_HMAC select CRYPTO_HMAC
select CRYPTO_MD5 select CRYPTO_MD5
select CRYPTO_CBC select CRYPTO_CBC
......
#include <crypto/aead.h>
#include <crypto/authenc.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/module.h> #include <linux/module.h>
#include <net/ip.h> #include <net/ip.h>
#include <net/xfrm.h> #include <net/xfrm.h>
#include <net/esp.h> #include <net/esp.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/crypto.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/pfkeyv2.h> #include <linux/pfkeyv2.h>
#include <linux/random.h> #include <linux/rtnetlink.h>
#include <linux/slab.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/in6.h> #include <linux/in6.h>
#include <net/icmp.h> #include <net/icmp.h>
#include <net/protocol.h> #include <net/protocol.h>
#include <net/udp.h> #include <net/udp.h>
struct esp_skb_cb {
struct xfrm_skb_cb xfrm;
void *tmp;
};
#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
/*
* Allocate an AEAD request structure with extra space for SG and IV.
*
* For alignment considerations the IV is placed at the front, followed
* by the request and finally the SG list.
*
* TODO: Use spare space in skb for this where possible.
*/
static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags)
{
unsigned int len;
len = crypto_aead_ivsize(aead);
if (len) {
len += crypto_aead_alignmask(aead) &
~(crypto_tfm_ctx_alignment() - 1);
len = ALIGN(len, crypto_tfm_ctx_alignment());
}
len += sizeof(struct aead_givcrypt_request) + crypto_aead_reqsize(aead);
len = ALIGN(len, __alignof__(struct scatterlist));
len += sizeof(struct scatterlist) * nfrags;
return kmalloc(len, GFP_ATOMIC);
}
static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp)
{
return crypto_aead_ivsize(aead) ?
PTR_ALIGN((u8 *)tmp, crypto_aead_alignmask(aead) + 1) : tmp;
}
static inline struct aead_givcrypt_request *esp_tmp_givreq(
struct crypto_aead *aead, u8 *iv)
{
struct aead_givcrypt_request *req;
req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
crypto_tfm_ctx_alignment());
aead_givcrypt_set_tfm(req, aead);
return req;
}
static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
{
struct aead_request *req;
req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
crypto_tfm_ctx_alignment());
aead_request_set_tfm(req, aead);
return req;
}
static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
struct aead_request *req)
{
return (void *)ALIGN((unsigned long)(req + 1) +
crypto_aead_reqsize(aead),
__alignof__(struct scatterlist));
}
static inline struct scatterlist *esp_givreq_sg(
struct crypto_aead *aead, struct aead_givcrypt_request *req)
{
return (void *)ALIGN((unsigned long)(req + 1) +
crypto_aead_reqsize(aead),
__alignof__(struct scatterlist));
}
static void esp_output_done(struct crypto_async_request *base, int err)
{
struct sk_buff *skb = base->data;
kfree(ESP_SKB_CB(skb)->tmp);
xfrm_output_resume(skb, err);
}
static int esp_output(struct xfrm_state *x, struct sk_buff *skb) static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
{ {
int err; int err;
struct ip_esp_hdr *esph; struct ip_esp_hdr *esph;
struct crypto_blkcipher *tfm; struct crypto_aead *aead;
struct blkcipher_desc desc; struct aead_givcrypt_request *req;
struct scatterlist *sg;
struct scatterlist *asg;
struct esp_data *esp; struct esp_data *esp;
struct sk_buff *trailer; struct sk_buff *trailer;
void *tmp;
u8 *iv;
u8 *tail; u8 *tail;
int blksize; int blksize;
int clen; int clen;
...@@ -36,18 +127,27 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) ...@@ -36,18 +127,27 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
clen = skb->len; clen = skb->len;
esp = x->data; esp = x->data;
alen = esp->auth.icv_trunc_len; aead = esp->aead;
tfm = esp->conf.tfm; alen = crypto_aead_authsize(aead);
desc.tfm = tfm;
desc.flags = 0; blksize = ALIGN(crypto_aead_blocksize(aead), 4);
blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4);
clen = ALIGN(clen + 2, blksize); clen = ALIGN(clen + 2, blksize);
if (esp->conf.padlen) if (esp->padlen)
clen = ALIGN(clen, esp->conf.padlen); clen = ALIGN(clen, esp->padlen);
if ((err = skb_cow_data(skb, clen - skb->len + alen, &trailer)) < 0)
goto error;
nfrags = err;
if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0) tmp = esp_alloc_tmp(aead, nfrags + 1);
if (!tmp)
goto error; goto error;
iv = esp_tmp_iv(aead, tmp);
req = esp_tmp_givreq(aead, iv);
asg = esp_givreq_sg(aead, req);
sg = asg + 1;
/* Fill padding... */ /* Fill padding... */
tail = skb_tail_pointer(trailer); tail = skb_tail_pointer(trailer);
do { do {
...@@ -56,28 +156,34 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) ...@@ -56,28 +156,34 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
tail[i] = i + 1; tail[i] = i + 1;
} while (0); } while (0);
tail[clen - skb->len - 2] = (clen - skb->len) - 2; tail[clen - skb->len - 2] = (clen - skb->len) - 2;
pskb_put(skb, trailer, clen - skb->len); tail[clen - skb->len - 1] = *skb_mac_header(skb);
pskb_put(skb, trailer, clen - skb->len + alen);
skb_push(skb, -skb_network_offset(skb)); skb_push(skb, -skb_network_offset(skb));
esph = ip_esp_hdr(skb); esph = ip_esp_hdr(skb);
*(skb_tail_pointer(trailer) - 1) = *skb_mac_header(skb);
*skb_mac_header(skb) = IPPROTO_ESP; *skb_mac_header(skb) = IPPROTO_ESP;
spin_lock_bh(&x->lock);
/* this is non-NULL only with UDP Encapsulation */ /* this is non-NULL only with UDP Encapsulation */
if (x->encap) { if (x->encap) {
struct xfrm_encap_tmpl *encap = x->encap; struct xfrm_encap_tmpl *encap = x->encap;
struct udphdr *uh; struct udphdr *uh;
__be32 *udpdata32; __be32 *udpdata32;
unsigned int sport, dport;
int encap_type;
spin_lock_bh(&x->lock);
sport = encap->encap_sport;
dport = encap->encap_dport;
encap_type = encap->encap_type;
spin_unlock_bh(&x->lock);
uh = (struct udphdr *)esph; uh = (struct udphdr *)esph;
uh->source = encap->encap_sport; uh->source = sport;
uh->dest = encap->encap_dport; uh->dest = dport;
uh->len = htons(skb->len + alen - skb_transport_offset(skb)); uh->len = htons(skb->len - skb_transport_offset(skb));
uh->check = 0; uh->check = 0;
switch (encap->encap_type) { switch (encap_type) {
default: default:
case UDP_ENCAP_ESPINUDP: case UDP_ENCAP_ESPINUDP:
esph = (struct ip_esp_hdr *)(uh + 1); esph = (struct ip_esp_hdr *)(uh + 1);
...@@ -95,131 +201,45 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) ...@@ -95,131 +201,45 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
esph->spi = x->id.spi; esph->spi = x->id.spi;
esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq); esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq);
if (esp->conf.ivlen) { sg_init_table(sg, nfrags);
if (unlikely(!esp->conf.ivinitted)) { skb_to_sgvec(skb, sg,
get_random_bytes(esp->conf.ivec, esp->conf.ivlen); esph->enc_data + crypto_aead_ivsize(aead) - skb->data,
esp->conf.ivinitted = 1; clen + alen);
} sg_init_one(asg, esph, sizeof(*esph));
crypto_blkcipher_set_iv(tfm, esp->conf.ivec, esp->conf.ivlen);
} aead_givcrypt_set_callback(req, 0, esp_output_done, skb);
aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
do { aead_givcrypt_set_assoc(req, asg, sizeof(*esph));
struct scatterlist *sg = &esp->sgbuf[0]; aead_givcrypt_set_giv(req, esph->enc_data, XFRM_SKB_CB(skb)->seq);
if (unlikely(nfrags > ESP_NUM_FAST_SG)) { ESP_SKB_CB(skb)->tmp = tmp;
sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC); err = crypto_aead_givencrypt(req);
if (!sg) if (err == -EINPROGRESS)
goto unlock; goto error;
}
sg_init_table(sg, nfrags);
skb_to_sgvec(skb, sg,
esph->enc_data +
esp->conf.ivlen -
skb->data, clen);
err = crypto_blkcipher_encrypt(&desc, sg, sg, clen);
if (unlikely(sg != &esp->sgbuf[0]))
kfree(sg);
} while (0);
if (unlikely(err))
goto unlock;
if (esp->conf.ivlen) {
memcpy(esph->enc_data, esp->conf.ivec, esp->conf.ivlen);
crypto_blkcipher_get_iv(tfm, esp->conf.ivec, esp->conf.ivlen);
}
if (esp->auth.icv_full_len) { if (err == -EBUSY)
err = esp_mac_digest(esp, skb, (u8 *)esph - skb->data, err = NET_XMIT_DROP;
sizeof(*esph) + esp->conf.ivlen + clen);
memcpy(pskb_put(skb, trailer, alen), esp->auth.work_icv, alen);
}
unlock: kfree(tmp);
spin_unlock_bh(&x->lock);
error: error:
return err; return err;
} }
/* static int esp_input_done2(struct sk_buff *skb, int err)
* Note: detecting truncated vs. non-truncated authentication data is very
* expensive, so we only support truncated data, which is the recommended
* and common case.
*/
static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
{ {
struct iphdr *iph; struct iphdr *iph;
struct ip_esp_hdr *esph; struct xfrm_state *x = xfrm_input_state(skb);
struct esp_data *esp = x->data; struct esp_data *esp = x->data;
struct crypto_blkcipher *tfm = esp->conf.tfm; struct crypto_aead *aead = esp->aead;
struct blkcipher_desc desc = { .tfm = tfm }; int alen = crypto_aead_authsize(aead);
struct sk_buff *trailer; int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
int blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4); int elen = skb->len - hlen;
int alen = esp->auth.icv_trunc_len;
int elen = skb->len - sizeof(*esph) - esp->conf.ivlen - alen;
int nfrags;
int ihl; int ihl;
u8 nexthdr[2]; u8 nexthdr[2];
struct scatterlist *sg;
int padlen; int padlen;
int err = -EINVAL;
if (!pskb_may_pull(skb, sizeof(*esph))) kfree(ESP_SKB_CB(skb)->tmp);
goto out;
if (elen <= 0 || (elen & (blksize-1)))
goto out;
if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
goto out;
nfrags = err;
skb->ip_summed = CHECKSUM_NONE;
spin_lock(&x->lock);
/* If integrity check is required, do this. */
if (esp->auth.icv_full_len) {
u8 sum[alen];
err = esp_mac_digest(esp, skb, 0, skb->len - alen);
if (err)
goto unlock;
if (skb_copy_bits(skb, skb->len - alen, sum, alen))
BUG();
if (unlikely(memcmp(esp->auth.work_icv, sum, alen))) {
err = -EBADMSG;
goto unlock;
}
}
esph = (struct ip_esp_hdr *)skb->data;
/* Get ivec. This can be wrong, check against another impls. */
if (esp->conf.ivlen)
crypto_blkcipher_set_iv(tfm, esph->enc_data, esp->conf.ivlen);
sg = &esp->sgbuf[0];
if (unlikely(nfrags > ESP_NUM_FAST_SG)) {
err = -ENOMEM;
sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
if (!sg)
goto unlock;
}
sg_init_table(sg, nfrags);
skb_to_sgvec(skb, sg,
sizeof(*esph) + esp->conf.ivlen,
elen);
err = crypto_blkcipher_decrypt(&desc, sg, sg, elen);
if (unlikely(sg != &esp->sgbuf[0]))
kfree(sg);
unlock:
spin_unlock(&x->lock);
if (unlikely(err)) if (unlikely(err))
goto out; goto out;
...@@ -229,15 +249,11 @@ unlock: ...@@ -229,15 +249,11 @@ unlock:
err = -EINVAL; err = -EINVAL;
padlen = nexthdr[0]; padlen = nexthdr[0];
if (padlen+2 >= elen) if (padlen + 2 + alen >= elen)
goto out; goto out;
/* ... check padding bits here. Silly. :-) */ /* ... check padding bits here. Silly. :-) */
/* RFC4303: Drop dummy packets without any error */
if (nexthdr[1] == IPPROTO_NONE)
goto out;
iph = ip_hdr(skb); iph = ip_hdr(skb);
ihl = iph->ihl * 4; ihl = iph->ihl * 4;
...@@ -279,10 +295,87 @@ unlock: ...@@ -279,10 +295,87 @@ unlock:
} }
pskb_trim(skb, skb->len - alen - padlen - 2); pskb_trim(skb, skb->len - alen - padlen - 2);
__skb_pull(skb, sizeof(*esph) + esp->conf.ivlen); __skb_pull(skb, hlen);
skb_set_transport_header(skb, -ihl); skb_set_transport_header(skb, -ihl);
return nexthdr[1]; err = nexthdr[1];
/* RFC4303: Drop dummy packets without any error */
if (err == IPPROTO_NONE)
err = -EINVAL;
out:
return err;
}
static void esp_input_done(struct crypto_async_request *base, int err)
{
struct sk_buff *skb = base->data;
xfrm_input_resume(skb, esp_input_done2(skb, err));
}
/*
* Note: detecting truncated vs. non-truncated authentication data is very
* expensive, so we only support truncated data, which is the recommended
* and common case.
*/
static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
{
struct ip_esp_hdr *esph;
struct esp_data *esp = x->data;
struct crypto_aead *aead = esp->aead;
struct aead_request *req;
struct sk_buff *trailer;
int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead);
int nfrags;
void *tmp;
u8 *iv;
struct scatterlist *sg;
struct scatterlist *asg;
int err = -EINVAL;
if (!pskb_may_pull(skb, sizeof(*esph)))
goto out;
if (elen <= 0)
goto out;
if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
goto out;
nfrags = err;
err = -ENOMEM;
tmp = esp_alloc_tmp(aead, nfrags + 1);
if (!tmp)
goto out;
ESP_SKB_CB(skb)->tmp = tmp;
iv = esp_tmp_iv(aead, tmp);
req = esp_tmp_req(aead, iv);
asg = esp_req_sg(aead, req);
sg = asg + 1;
skb->ip_summed = CHECKSUM_NONE;
esph = (struct ip_esp_hdr *)skb->data;
/* Get ivec. This can be wrong, check against another impls. */
iv = esph->enc_data;
sg_init_table(sg, nfrags);
skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen);
sg_init_one(asg, esph, sizeof(*esph));
aead_request_set_callback(req, 0, esp_input_done, skb);
aead_request_set_crypt(req, sg, sg, elen, iv);
aead_request_set_assoc(req, asg, sizeof(*esph));
err = crypto_aead_decrypt(req);
if (err == -EINPROGRESS)
goto out;
err = esp_input_done2(skb, err);
out: out:
return err; return err;
...@@ -291,11 +384,11 @@ out: ...@@ -291,11 +384,11 @@ out:
static u32 esp4_get_mtu(struct xfrm_state *x, int mtu) static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
{ {
struct esp_data *esp = x->data; struct esp_data *esp = x->data;
u32 blksize = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4); u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
u32 align = max_t(u32, blksize, esp->conf.padlen); u32 align = max_t(u32, blksize, esp->padlen);
u32 rem; u32 rem;
mtu -= x->props.header_len + esp->auth.icv_trunc_len; mtu -= x->props.header_len + crypto_aead_authsize(esp->aead);
rem = mtu & (align - 1); rem = mtu & (align - 1);
mtu &= ~(align - 1); mtu &= ~(align - 1);
...@@ -342,80 +435,98 @@ static void esp_destroy(struct xfrm_state *x) ...@@ -342,80 +435,98 @@ static void esp_destroy(struct xfrm_state *x)
if (!esp) if (!esp)
return; return;
crypto_free_blkcipher(esp->conf.tfm); crypto_free_aead(esp->aead);
esp->conf.tfm = NULL;
kfree(esp->conf.ivec);
esp->conf.ivec = NULL;
crypto_free_hash(esp->auth.tfm);
esp->auth.tfm = NULL;
kfree(esp->auth.work_icv);
esp->auth.work_icv = NULL;
kfree(esp); kfree(esp);
} }
static int esp_init_state(struct xfrm_state *x) static int esp_init_state(struct xfrm_state *x)
{ {
struct esp_data *esp = NULL; struct esp_data *esp = NULL;
struct crypto_blkcipher *tfm; struct crypto_aead *aead;
struct crypto_authenc_key_param *param;
struct rtattr *rta;
char *key;
char *p;
char authenc_name[CRYPTO_MAX_ALG_NAME];
u32 align; u32 align;
unsigned int keylen;
int err;
if (x->ealg == NULL) if (x->ealg == NULL)
goto error; return -EINVAL;
if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, "authenc(%s,%s)",
x->aalg ? x->aalg->alg_name : "digest_null",
x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
esp = kzalloc(sizeof(*esp), GFP_KERNEL); esp = kzalloc(sizeof(*esp), GFP_KERNEL);
if (esp == NULL) if (esp == NULL)
return -ENOMEM; return -ENOMEM;
x->data = esp;
aead = crypto_alloc_aead(authenc_name, 0, 0);
err = PTR_ERR(aead);
if (IS_ERR(aead))
goto error;
esp->aead = aead;
keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
(x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
err = -ENOMEM;
key = kmalloc(keylen, GFP_KERNEL);
if (!key)
goto error;
p = key;
rta = (void *)p;
rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
rta->rta_len = RTA_LENGTH(sizeof(*param));
param = RTA_DATA(rta);
p += RTA_SPACE(sizeof(*param));
if (x->aalg) { if (x->aalg) {
struct xfrm_algo_desc *aalg_desc; struct xfrm_algo_desc *aalg_desc;
struct crypto_hash *hash;
hash = crypto_alloc_hash(x->aalg->alg_name, 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(hash))
goto error;
esp->auth.tfm = hash; memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
if (crypto_hash_setkey(hash, x->aalg->alg_key, p += (x->aalg->alg_key_len + 7) / 8;
(x->aalg->alg_key_len + 7) / 8))
goto error;
aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
BUG_ON(!aalg_desc); BUG_ON(!aalg_desc);
err = -EINVAL;
if (aalg_desc->uinfo.auth.icv_fullbits/8 != if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
crypto_hash_digestsize(hash)) { crypto_aead_authsize(aead)) {
NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n", NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
x->aalg->alg_name, x->aalg->alg_name,
crypto_hash_digestsize(hash), crypto_aead_authsize(aead),
aalg_desc->uinfo.auth.icv_fullbits/8); aalg_desc->uinfo.auth.icv_fullbits/8);
goto error; goto free_key;
} }
esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; err = crypto_aead_setauthsize(
esp->auth.icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8; aead, aalg_desc->uinfo.auth.icv_truncbits / 8);
if (err)
esp->auth.work_icv = kmalloc(esp->auth.icv_full_len, GFP_KERNEL); goto free_key;
if (!esp->auth.work_icv)
goto error;
} }
tfm = crypto_alloc_blkcipher(x->ealg->alg_name, 0, CRYPTO_ALG_ASYNC); esp->padlen = 0;
if (IS_ERR(tfm))
goto error; param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
esp->conf.tfm = tfm; memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
esp->conf.ivlen = crypto_blkcipher_ivsize(tfm);
esp->conf.padlen = 0; err = crypto_aead_setkey(aead, key, keylen);
if (esp->conf.ivlen) {
esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL); free_key:
if (unlikely(esp->conf.ivec == NULL)) kfree(key);
goto error;
esp->conf.ivinitted = 0; if (err)
}
if (crypto_blkcipher_setkey(tfm, x->ealg->alg_key,
(x->ealg->alg_key_len + 7) / 8))
goto error; goto error;
x->props.header_len = sizeof(struct ip_esp_hdr) + esp->conf.ivlen;
x->props.header_len = sizeof(struct ip_esp_hdr) +
crypto_aead_ivsize(aead);
if (x->props.mode == XFRM_MODE_TUNNEL) if (x->props.mode == XFRM_MODE_TUNNEL)
x->props.header_len += sizeof(struct iphdr); x->props.header_len += sizeof(struct iphdr);
else if (x->props.mode == XFRM_MODE_BEET) else if (x->props.mode == XFRM_MODE_BEET)
...@@ -434,18 +545,14 @@ static int esp_init_state(struct xfrm_state *x) ...@@ -434,18 +545,14 @@ static int esp_init_state(struct xfrm_state *x)
break; break;
} }
} }
x->data = esp;
align = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4); align = ALIGN(crypto_aead_blocksize(aead), 4);
if (esp->conf.padlen) if (esp->padlen)
align = max_t(u32, align, esp->conf.padlen); align = max_t(u32, align, esp->padlen);
x->props.trailer_len = align + 1 + esp->auth.icv_trunc_len; x->props.trailer_len = align + 1 + crypto_aead_authsize(esp->aead);
return 0;
error: error:
x->data = esp; return err;
esp_destroy(x);
x->data = NULL;
return -EINVAL;
} }
static struct xfrm_type esp_type = static struct xfrm_type esp_type =
......
...@@ -85,6 +85,7 @@ config INET6_ESP ...@@ -85,6 +85,7 @@ config INET6_ESP
depends on IPV6 depends on IPV6
select XFRM select XFRM
select CRYPTO select CRYPTO
select CRYPTO_AEAD
select CRYPTO_HMAC select CRYPTO_HMAC
select CRYPTO_MD5 select CRYPTO_MD5
select CRYPTO_CBC select CRYPTO_CBC
......
...@@ -24,33 +24,124 @@ ...@@ -24,33 +24,124 @@
* This file is derived from net/ipv4/esp.c * This file is derived from net/ipv4/esp.c
*/ */
#include <crypto/aead.h>
#include <crypto/authenc.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/module.h> #include <linux/module.h>
#include <net/ip.h> #include <net/ip.h>
#include <net/xfrm.h> #include <net/xfrm.h>
#include <net/esp.h> #include <net/esp.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/crypto.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/pfkeyv2.h> #include <linux/pfkeyv2.h>
#include <linux/random.h> #include <linux/random.h>
#include <linux/slab.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <net/icmp.h> #include <net/icmp.h>
#include <net/ipv6.h> #include <net/ipv6.h>
#include <net/protocol.h> #include <net/protocol.h>
#include <linux/icmpv6.h> #include <linux/icmpv6.h>
struct esp_skb_cb {
struct xfrm_skb_cb xfrm;
void *tmp;
};
#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
/*
* Allocate an AEAD request structure with extra space for SG and IV.
*
* For alignment considerations the IV is placed at the front, followed
* by the request and finally the SG list.
*
* TODO: Use spare space in skb for this where possible.
*/
static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags)
{
unsigned int len;
len = crypto_aead_ivsize(aead);
if (len) {
len += crypto_aead_alignmask(aead) &
~(crypto_tfm_ctx_alignment() - 1);
len = ALIGN(len, crypto_tfm_ctx_alignment());
}
len += sizeof(struct aead_givcrypt_request) + crypto_aead_reqsize(aead);
len = ALIGN(len, __alignof__(struct scatterlist));
len += sizeof(struct scatterlist) * nfrags;
return kmalloc(len, GFP_ATOMIC);
}
static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp)
{
return crypto_aead_ivsize(aead) ?
PTR_ALIGN((u8 *)tmp, crypto_aead_alignmask(aead) + 1) : tmp;
}
static inline struct aead_givcrypt_request *esp_tmp_givreq(
struct crypto_aead *aead, u8 *iv)
{
struct aead_givcrypt_request *req;
req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
crypto_tfm_ctx_alignment());
aead_givcrypt_set_tfm(req, aead);
return req;
}
static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
{
struct aead_request *req;
req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
crypto_tfm_ctx_alignment());
aead_request_set_tfm(req, aead);
return req;
}
static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
struct aead_request *req)
{
return (void *)ALIGN((unsigned long)(req + 1) +
crypto_aead_reqsize(aead),
__alignof__(struct scatterlist));
}
static inline struct scatterlist *esp_givreq_sg(
struct crypto_aead *aead, struct aead_givcrypt_request *req)
{
return (void *)ALIGN((unsigned long)(req + 1) +
crypto_aead_reqsize(aead),
__alignof__(struct scatterlist));
}
static void esp_output_done(struct crypto_async_request *base, int err)
{
struct sk_buff *skb = base->data;
kfree(ESP_SKB_CB(skb)->tmp);
xfrm_output_resume(skb, err);
}
static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
{ {
int err; int err;
struct ip_esp_hdr *esph; struct ip_esp_hdr *esph;
struct crypto_blkcipher *tfm; struct crypto_aead *aead;
struct blkcipher_desc desc; struct aead_givcrypt_request *req;
struct scatterlist *sg;
struct scatterlist *asg;
struct sk_buff *trailer; struct sk_buff *trailer;
void *tmp;
int blksize; int blksize;
int clen; int clen;
int alen; int alen;
int nfrags; int nfrags;
u8 *iv;
u8 *tail; u8 *tail;
struct esp_data *esp = x->data; struct esp_data *esp = x->data;
...@@ -60,18 +151,26 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) ...@@ -60,18 +151,26 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
/* Round to block size */ /* Round to block size */
clen = skb->len; clen = skb->len;
alen = esp->auth.icv_trunc_len; aead = esp->aead;
tfm = esp->conf.tfm; alen = crypto_aead_authsize(aead);
desc.tfm = tfm;
desc.flags = 0; blksize = ALIGN(crypto_aead_blocksize(aead), 4);
blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4);
clen = ALIGN(clen + 2, blksize); clen = ALIGN(clen + 2, blksize);
if (esp->conf.padlen) if (esp->padlen)
clen = ALIGN(clen, esp->conf.padlen); clen = ALIGN(clen, esp->padlen);
if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0) { if ((err = skb_cow_data(skb, clen - skb->len + alen, &trailer)) < 0)
goto error; goto error;
} nfrags = err;
tmp = esp_alloc_tmp(aead, nfrags + 1);
if (!tmp)
goto error;
iv = esp_tmp_iv(aead, tmp);
req = esp_tmp_givreq(aead, iv);
asg = esp_givreq_sg(aead, req);
sg = asg + 1;
/* Fill padding... */ /* Fill padding... */
tail = skb_tail_pointer(trailer); tail = skb_tail_pointer(trailer);
...@@ -81,86 +180,113 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) ...@@ -81,86 +180,113 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
tail[i] = i + 1; tail[i] = i + 1;
} while (0); } while (0);
tail[clen-skb->len - 2] = (clen - skb->len) - 2; tail[clen-skb->len - 2] = (clen - skb->len) - 2;
pskb_put(skb, trailer, clen - skb->len); tail[clen - skb->len - 1] = *skb_mac_header(skb);
pskb_put(skb, trailer, clen - skb->len + alen);
skb_push(skb, -skb_network_offset(skb)); skb_push(skb, -skb_network_offset(skb));
esph = ip_esp_hdr(skb); esph = ip_esp_hdr(skb);
*(skb_tail_pointer(trailer) - 1) = *skb_mac_header(skb);
*skb_mac_header(skb) = IPPROTO_ESP; *skb_mac_header(skb) = IPPROTO_ESP;
esph->spi = x->id.spi; esph->spi = x->id.spi;
esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq); esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq);
spin_lock_bh(&x->lock); sg_init_table(sg, nfrags);
skb_to_sgvec(skb, sg,
esph->enc_data + crypto_aead_ivsize(aead) - skb->data,
clen + alen);
sg_init_one(asg, esph, sizeof(*esph));
if (esp->conf.ivlen) { aead_givcrypt_set_callback(req, 0, esp_output_done, skb);
if (unlikely(!esp->conf.ivinitted)) { aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
get_random_bytes(esp->conf.ivec, esp->conf.ivlen); aead_givcrypt_set_assoc(req, asg, sizeof(*esph));
esp->conf.ivinitted = 1; aead_givcrypt_set_giv(req, esph->enc_data, XFRM_SKB_CB(skb)->seq);
}
crypto_blkcipher_set_iv(tfm, esp->conf.ivec, esp->conf.ivlen);
}
do { ESP_SKB_CB(skb)->tmp = tmp;
struct scatterlist *sg = &esp->sgbuf[0]; err = crypto_aead_givencrypt(req);
if (err == -EINPROGRESS)
goto error;
if (unlikely(nfrags > ESP_NUM_FAST_SG)) { if (err == -EBUSY)
sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC); err = NET_XMIT_DROP;
if (!sg)
goto unlock; kfree(tmp);
}
sg_init_table(sg, nfrags); error:
skb_to_sgvec(skb, sg, return err;
esph->enc_data + }
esp->conf.ivlen -
skb->data, clen); static int esp_input_done2(struct sk_buff *skb, int err)
err = crypto_blkcipher_encrypt(&desc, sg, sg, clen); {
if (unlikely(sg != &esp->sgbuf[0])) struct xfrm_state *x = xfrm_input_state(skb);
kfree(sg); struct esp_data *esp = x->data;
} while (0); struct crypto_aead *aead = esp->aead;
int alen = crypto_aead_authsize(aead);
int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
int elen = skb->len - hlen;
int hdr_len = skb_network_header_len(skb);
int padlen;
u8 nexthdr[2];
kfree(ESP_SKB_CB(skb)->tmp);
if (unlikely(err)) if (unlikely(err))
goto unlock; goto out;
if (esp->conf.ivlen) { if (skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2))
memcpy(esph->enc_data, esp->conf.ivec, esp->conf.ivlen); BUG();
crypto_blkcipher_get_iv(tfm, esp->conf.ivec, esp->conf.ivlen);
}
if (esp->auth.icv_full_len) { err = -EINVAL;
err = esp_mac_digest(esp, skb, (u8 *)esph - skb->data, padlen = nexthdr[0];
sizeof(*esph) + esp->conf.ivlen + clen); if (padlen + 2 + alen >= elen) {
memcpy(pskb_put(skb, trailer, alen), esp->auth.work_icv, alen); LIMIT_NETDEBUG(KERN_WARNING "ipsec esp packet is garbage "
"padlen=%d, elen=%d\n", padlen + 2, elen - alen);
goto out;
} }
unlock: /* ... check padding bits here. Silly. :-) */
spin_unlock_bh(&x->lock);
error: pskb_trim(skb, skb->len - alen - padlen - 2);
__skb_pull(skb, hlen);
skb_set_transport_header(skb, -hdr_len);
err = nexthdr[1];
/* RFC4303: Drop dummy packets without any error */
if (err == IPPROTO_NONE)
err = -EINVAL;
out:
return err; return err;
} }
static void esp_input_done(struct crypto_async_request *base, int err)
{
struct sk_buff *skb = base->data;
xfrm_input_resume(skb, esp_input_done2(skb, err));
}
static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
{ {
struct ipv6hdr *iph;
struct ip_esp_hdr *esph; struct ip_esp_hdr *esph;
struct esp_data *esp = x->data; struct esp_data *esp = x->data;
struct crypto_blkcipher *tfm = esp->conf.tfm; struct crypto_aead *aead = esp->aead;
struct blkcipher_desc desc = { .tfm = tfm }; struct aead_request *req;
struct sk_buff *trailer; struct sk_buff *trailer;
int blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4); int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead);
int alen = esp->auth.icv_trunc_len;
int elen = skb->len - sizeof(*esph) - esp->conf.ivlen - alen;
int hdr_len = skb_network_header_len(skb);
int nfrags; int nfrags;
int ret = 0; int ret = 0;
void *tmp;
u8 *iv;
struct scatterlist *sg;
struct scatterlist *asg;
if (!pskb_may_pull(skb, sizeof(*esph))) { if (!pskb_may_pull(skb, sizeof(*esph))) {
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
if (elen <= 0 || (elen & (blksize-1))) { if (elen <= 0) {
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
...@@ -170,86 +296,38 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) ...@@ -170,86 +296,38 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
goto out; goto out;
} }
skb->ip_summed = CHECKSUM_NONE; ret = -ENOMEM;
tmp = esp_alloc_tmp(aead, nfrags + 1);
spin_lock(&x->lock); if (!tmp)
goto out;
/* If integrity check is required, do this. */
if (esp->auth.icv_full_len) {
u8 sum[alen];
ret = esp_mac_digest(esp, skb, 0, skb->len - alen);
if (ret)
goto unlock;
if (skb_copy_bits(skb, skb->len - alen, sum, alen)) ESP_SKB_CB(skb)->tmp = tmp;
BUG(); iv = esp_tmp_iv(aead, tmp);
req = esp_tmp_req(aead, iv);
asg = esp_req_sg(aead, req);
sg = asg + 1;
if (unlikely(memcmp(esp->auth.work_icv, sum, alen))) { skb->ip_summed = CHECKSUM_NONE;
ret = -EBADMSG;
goto unlock;
}
}
esph = (struct ip_esp_hdr *)skb->data; esph = (struct ip_esp_hdr *)skb->data;
iph = ipv6_hdr(skb);
/* Get ivec. This can be wrong, check against another impls. */ /* Get ivec. This can be wrong, check against another impls. */
if (esp->conf.ivlen) iv = esph->enc_data;
crypto_blkcipher_set_iv(tfm, esph->enc_data, esp->conf.ivlen);
{
struct scatterlist *sg = &esp->sgbuf[0];
if (unlikely(nfrags > ESP_NUM_FAST_SG)) {
sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
if (!sg) {
ret = -ENOMEM;
goto unlock;
}
}
sg_init_table(sg, nfrags);
skb_to_sgvec(skb, sg,
sizeof(*esph) + esp->conf.ivlen,
elen);
ret = crypto_blkcipher_decrypt(&desc, sg, sg, elen);
if (unlikely(sg != &esp->sgbuf[0]))
kfree(sg);
}
unlock: sg_init_table(sg, nfrags);
spin_unlock(&x->lock); skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen);
sg_init_one(asg, esph, sizeof(*esph));
if (unlikely(ret)) aead_request_set_callback(req, 0, esp_input_done, skb);
goto out; aead_request_set_crypt(req, sg, sg, elen, iv);
aead_request_set_assoc(req, asg, sizeof(*esph));
{ ret = crypto_aead_decrypt(req);
u8 nexthdr[2]; if (ret == -EINPROGRESS)
u8 padlen; goto out;
if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
BUG();
padlen = nexthdr[0];
if (padlen+2 >= elen) {
LIMIT_NETDEBUG(KERN_WARNING "ipsec esp packet is garbage padlen=%d, elen=%d\n", padlen+2, elen);
ret = -EINVAL;
goto out;
}
/* ... check padding bits here. Silly. :-) */
/* RFC4303: Drop dummy packets without any error */
if (nexthdr[1] == IPPROTO_NONE) {
ret = -EINVAL;
goto out;
}
pskb_trim(skb, skb->len - alen - padlen - 2); ret = esp_input_done2(skb, ret);
ret = nexthdr[1];
}
__skb_pull(skb, sizeof(*esph) + esp->conf.ivlen);
skb_set_transport_header(skb, -hdr_len);
out: out:
return ret; return ret;
} }
...@@ -257,11 +335,11 @@ out: ...@@ -257,11 +335,11 @@ out:
static u32 esp6_get_mtu(struct xfrm_state *x, int mtu) static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
{ {
struct esp_data *esp = x->data; struct esp_data *esp = x->data;
u32 blksize = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4); u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
u32 align = max_t(u32, blksize, esp->conf.padlen); u32 align = max_t(u32, blksize, esp->padlen);
u32 rem; u32 rem;
mtu -= x->props.header_len + esp->auth.icv_trunc_len; mtu -= x->props.header_len + crypto_aead_authsize(esp->aead);
rem = mtu & (align - 1); rem = mtu & (align - 1);
mtu &= ~(align - 1); mtu &= ~(align - 1);
...@@ -300,81 +378,101 @@ static void esp6_destroy(struct xfrm_state *x) ...@@ -300,81 +378,101 @@ static void esp6_destroy(struct xfrm_state *x)
if (!esp) if (!esp)
return; return;
crypto_free_blkcipher(esp->conf.tfm); crypto_free_aead(esp->aead);
esp->conf.tfm = NULL;
kfree(esp->conf.ivec);
esp->conf.ivec = NULL;
crypto_free_hash(esp->auth.tfm);
esp->auth.tfm = NULL;
kfree(esp->auth.work_icv);
esp->auth.work_icv = NULL;
kfree(esp); kfree(esp);
} }
static int esp6_init_state(struct xfrm_state *x) static int esp6_init_state(struct xfrm_state *x)
{ {
struct esp_data *esp = NULL; struct esp_data *esp = NULL;
struct crypto_blkcipher *tfm; struct crypto_aead *aead;
struct crypto_authenc_key_param *param;
struct rtattr *rta;
char *key;
char *p;
char authenc_name[CRYPTO_MAX_ALG_NAME];
u32 align;
unsigned int keylen;
int err;
if (x->ealg == NULL) if (x->ealg == NULL)
goto error; return -EINVAL;
if (x->encap) if (x->encap)
goto error; return -EINVAL;
if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, "authenc(%s,%s)",
x->aalg ? x->aalg->alg_name : "digest_null",
x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
esp = kzalloc(sizeof(*esp), GFP_KERNEL); esp = kzalloc(sizeof(*esp), GFP_KERNEL);
if (esp == NULL) if (esp == NULL)
return -ENOMEM; return -ENOMEM;
x->data = esp;
aead = crypto_alloc_aead(authenc_name, 0, 0);
err = PTR_ERR(aead);
if (IS_ERR(aead))
goto error;
esp->aead = aead;
keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
(x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
err = -ENOMEM;
key = kmalloc(keylen, GFP_KERNEL);
if (!key)
goto error;
p = key;
rta = (void *)p;
rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
rta->rta_len = RTA_LENGTH(sizeof(*param));
param = RTA_DATA(rta);
p += RTA_SPACE(sizeof(*param));
if (x->aalg) { if (x->aalg) {
struct xfrm_algo_desc *aalg_desc; struct xfrm_algo_desc *aalg_desc;
struct crypto_hash *hash;
hash = crypto_alloc_hash(x->aalg->alg_name, 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(hash))
goto error;
esp->auth.tfm = hash; memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
if (crypto_hash_setkey(hash, x->aalg->alg_key, p += (x->aalg->alg_key_len + 7) / 8;
(x->aalg->alg_key_len + 7) / 8))
goto error;
aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
BUG_ON(!aalg_desc); BUG_ON(!aalg_desc);
err = -EINVAL;
if (aalg_desc->uinfo.auth.icv_fullbits/8 != if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
crypto_hash_digestsize(hash)) { crypto_aead_authsize(aead)) {
NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n", NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
x->aalg->alg_name, x->aalg->alg_name,
crypto_hash_digestsize(hash), crypto_aead_authsize(aead),
aalg_desc->uinfo.auth.icv_fullbits/8); aalg_desc->uinfo.auth.icv_fullbits/8);
goto error; goto free_key;
} }
esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; err = crypto_aead_setauthsize(
esp->auth.icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8; aead, aalg_desc->uinfo.auth.icv_truncbits / 8);
if (err)
esp->auth.work_icv = kmalloc(esp->auth.icv_full_len, GFP_KERNEL); goto free_key;
if (!esp->auth.work_icv)
goto error;
}
tfm = crypto_alloc_blkcipher(x->ealg->alg_name, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm))
goto error;
esp->conf.tfm = tfm;
esp->conf.ivlen = crypto_blkcipher_ivsize(tfm);
esp->conf.padlen = 0;
if (esp->conf.ivlen) {
esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL);
if (unlikely(esp->conf.ivec == NULL))
goto error;
esp->conf.ivinitted = 0;
} }
if (crypto_blkcipher_setkey(tfm, x->ealg->alg_key,
(x->ealg->alg_key_len + 7) / 8)) esp->padlen = 0;
param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
err = crypto_aead_setkey(aead, key, keylen);
free_key:
kfree(key);
if (err)
goto error; goto error;
x->props.header_len = sizeof(struct ip_esp_hdr) + esp->conf.ivlen;
x->props.header_len = sizeof(struct ip_esp_hdr) +
crypto_aead_ivsize(aead);
switch (x->props.mode) { switch (x->props.mode) {
case XFRM_MODE_BEET: case XFRM_MODE_BEET:
case XFRM_MODE_TRANSPORT: case XFRM_MODE_TRANSPORT:
...@@ -385,14 +483,14 @@ static int esp6_init_state(struct xfrm_state *x) ...@@ -385,14 +483,14 @@ static int esp6_init_state(struct xfrm_state *x)
default: default:
goto error; goto error;
} }
x->data = esp;
return 0; align = ALIGN(crypto_aead_blocksize(aead), 4);
if (esp->padlen)
align = max_t(u32, align, esp->padlen);
x->props.trailer_len = align + 1 + crypto_aead_authsize(esp->aead);
error: error:
x->data = esp; return err;
esp6_destroy(x);
x->data = NULL;
return -EINVAL;
} }
static struct xfrm_type esp6_type = static struct xfrm_type esp6_type =
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment