Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto updates from Herbert Xu:
 "Here is the crypto update for 4.3:

  API:

   - the AEAD interface transition is now complete.
   - add top-level skcipher interface.

  Drivers:

   - x86-64 acceleration for chacha20/poly1305.
   - add sunxi-ss Allwinner Security System crypto accelerator.
   - add RSA algorithm to qat driver.
   - add SRIOV support to qat driver.
   - add LS1021A support to caam.
   - add i.MX6 support to caam"

* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (163 commits)
  crypto: algif_aead - fix for multiple operations on AF_ALG sockets
  crypto: qat - enable legacy VFs
  MPI: Fix mpi_read_buffer
  crypto: qat - silence a static checker warning
  crypto: vmx - Fixing opcode issue
  crypto: caam - Use the preferred style for memory allocations
  crypto: caam - Propagate the real error code in caam_probe
  crypto: caam - Fix the error handling in caam_probe
  crypto: caam - fix writing to JQCR_MS when using service interface
  crypto: hash - Add AHASH_REQUEST_ON_STACK
  crypto: testmgr - Use new skcipher interface
  crypto: skcipher - Add top-level skcipher interface
  crypto: cmac - allow usage in FIPS mode
  crypto: sahara - Use dmam_alloc_coherent
  crypto: caam - Add support for LS1021A
  crypto: qat - Don't move data inside output buffer
  crypto: vmx - Fixing GHASH Key issue on little endian
  crypto: vmx - Fixing AES-CTR counter bug
  crypto: null - Add missing Kconfig tristate for NULL2
  crypto: nx - Add forward declaration for struct crypto_aead
  ...
This commit is contained in:
Linus Torvalds
2015-08-31 17:38:39 -07:00
154 changed files with 15742 additions and 7446 deletions

View File

@@ -1,7 +1,7 @@
/*
* AEAD: Authenticated Encryption with Associated Data
*
* Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
* Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -45,16 +45,40 @@
* a breach in the integrity of the message. In essence, that -EBADMSG error
* code is the key bonus an AEAD cipher has over "standard" block chaining
* modes.
*
* Memory Structure:
*
* To support the needs of the most prominent user of AEAD ciphers, namely
* IPSEC, the AEAD ciphers have a special memory layout the caller must adhere
* to.
*
* The scatter list pointing to the input data must contain:
*
* * for RFC4106 ciphers, the concatenation of
* associated authentication data || IV || plaintext or ciphertext. Note, the
* same IV (buffer) is also set with the aead_request_set_crypt call. Note,
* the API call of aead_request_set_ad must provide the length of the AAD and
* the IV. The API call of aead_request_set_crypt only points to the size of
* the input plaintext or ciphertext.
*
* * for "normal" AEAD ciphers, the concatenation of
* associated authentication data || plaintext or ciphertext.
*
* It is important to note that if multiple scatter gather list entries form
* the input data mentioned above, the first entry must not point to a NULL
* buffer. If there is any potential where the AAD buffer can be NULL, the
* calling code must contain a precaution to ensure that this does not result
* in the first scatter gather list entry pointing to a NULL buffer.
*/
struct crypto_aead;
/**
* struct aead_request - AEAD request
* @base: Common attributes for async crypto requests
* @old: Boolean whether the old or new AEAD API is used
* @assoclen: Length in bytes of associated data for authentication
* @cryptlen: Length of data to be encrypted or decrypted
* @iv: Initialisation vector
* @assoc: Associated data
* @src: Source data
* @dst: Destination data
* @__ctx: Start of private context data
@@ -62,33 +86,17 @@
struct aead_request {
struct crypto_async_request base;
bool old;
unsigned int assoclen;
unsigned int cryptlen;
u8 *iv;
struct scatterlist *assoc;
struct scatterlist *src;
struct scatterlist *dst;
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
/**
* struct aead_givcrypt_request - AEAD request with IV generation
* @seq: Sequence number for IV generation
* @giv: Space for generated IV
* @areq: The AEAD request itself
*/
struct aead_givcrypt_request {
u64 seq;
u8 *giv;
struct aead_request areq;
};
/**
* struct aead_alg - AEAD cipher definition
* @maxauthsize: Set the maximum authentication tag size supported by the
@@ -141,16 +149,6 @@ struct aead_alg {
};
struct crypto_aead {
int (*setkey)(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen);
int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize);
int (*encrypt)(struct aead_request *req);
int (*decrypt)(struct aead_request *req);
int (*givencrypt)(struct aead_givcrypt_request *req);
int (*givdecrypt)(struct aead_givcrypt_request *req);
struct crypto_aead *child;
unsigned int authsize;
unsigned int reqsize;
@@ -192,16 +190,6 @@ static inline void crypto_free_aead(struct crypto_aead *tfm)
crypto_destroy_tfm(tfm, crypto_aead_tfm(tfm));
}
static inline struct crypto_aead *crypto_aead_crt(struct crypto_aead *tfm)
{
return tfm;
}
static inline struct old_aead_alg *crypto_old_aead_alg(struct crypto_aead *tfm)
{
return &crypto_aead_tfm(tfm)->__crt_alg->cra_aead;
}
static inline struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm)
{
return container_of(crypto_aead_tfm(tfm)->__crt_alg,
@@ -210,8 +198,7 @@ static inline struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm)
static inline unsigned int crypto_aead_alg_ivsize(struct aead_alg *alg)
{
return alg->base.cra_aead.encrypt ? alg->base.cra_aead.ivsize :
alg->ivsize;
return alg->ivsize;
}
/**
@@ -337,7 +324,7 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
*/
static inline int crypto_aead_encrypt(struct aead_request *req)
{
return crypto_aead_reqtfm(req)->encrypt(req);
return crypto_aead_alg(crypto_aead_reqtfm(req))->encrypt(req);
}
/**
@@ -364,10 +351,12 @@ static inline int crypto_aead_encrypt(struct aead_request *req)
*/
static inline int crypto_aead_decrypt(struct aead_request *req)
{
if (req->cryptlen < crypto_aead_authsize(crypto_aead_reqtfm(req)))
struct crypto_aead *aead = crypto_aead_reqtfm(req);
if (req->cryptlen < crypto_aead_authsize(aead))
return -EINVAL;
return crypto_aead_reqtfm(req)->decrypt(req);
return crypto_aead_alg(aead)->decrypt(req);
}
/**
@@ -387,7 +376,10 @@ static inline int crypto_aead_decrypt(struct aead_request *req)
*
* Return: number of bytes
*/
unsigned int crypto_aead_reqsize(struct crypto_aead *tfm);
static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm)
{
return tfm->reqsize;
}
/**
* aead_request_set_tfm() - update cipher handle reference in request
@@ -400,7 +392,7 @@ unsigned int crypto_aead_reqsize(struct crypto_aead *tfm);
static inline void aead_request_set_tfm(struct aead_request *req,
struct crypto_aead *tfm)
{
req->base.tfm = crypto_aead_tfm(tfm->child);
req->base.tfm = crypto_aead_tfm(tfm);
}
/**
@@ -525,23 +517,6 @@ static inline void aead_request_set_crypt(struct aead_request *req,
req->iv = iv;
}
/**
* aead_request_set_assoc() - set the associated data scatter / gather list
* @req: request handle
* @assoc: associated data scatter / gather list
* @assoclen: number of bytes to process from @assoc
*
* Obsolete, do not use.
*/
static inline void aead_request_set_assoc(struct aead_request *req,
struct scatterlist *assoc,
unsigned int assoclen)
{
req->assoc = assoc;
req->assoclen = assoclen;
req->old = true;
}
/**
* aead_request_set_ad - set associated data information
* @req: request handle
@@ -554,77 +529,6 @@ static inline void aead_request_set_ad(struct aead_request *req,
unsigned int assoclen)
{
req->assoclen = assoclen;
req->old = false;
}
static inline struct crypto_aead *aead_givcrypt_reqtfm(
struct aead_givcrypt_request *req)
{
return crypto_aead_reqtfm(&req->areq);
}
static inline int crypto_aead_givencrypt(struct aead_givcrypt_request *req)
{
return aead_givcrypt_reqtfm(req)->givencrypt(req);
};
static inline int crypto_aead_givdecrypt(struct aead_givcrypt_request *req)
{
return aead_givcrypt_reqtfm(req)->givdecrypt(req);
};
static inline void aead_givcrypt_set_tfm(struct aead_givcrypt_request *req,
struct crypto_aead *tfm)
{
req->areq.base.tfm = crypto_aead_tfm(tfm);
}
static inline struct aead_givcrypt_request *aead_givcrypt_alloc(
struct crypto_aead *tfm, gfp_t gfp)
{
struct aead_givcrypt_request *req;
req = kmalloc(sizeof(struct aead_givcrypt_request) +
crypto_aead_reqsize(tfm), gfp);
if (likely(req))
aead_givcrypt_set_tfm(req, tfm);
return req;
}
static inline void aead_givcrypt_free(struct aead_givcrypt_request *req)
{
kfree(req);
}
static inline void aead_givcrypt_set_callback(
struct aead_givcrypt_request *req, u32 flags,
crypto_completion_t compl, void *data)
{
aead_request_set_callback(&req->areq, flags, compl, data);
}
static inline void aead_givcrypt_set_crypt(struct aead_givcrypt_request *req,
struct scatterlist *src,
struct scatterlist *dst,
unsigned int nbytes, void *iv)
{
aead_request_set_crypt(&req->areq, src, dst, nbytes, iv);
}
static inline void aead_givcrypt_set_assoc(struct aead_givcrypt_request *req,
struct scatterlist *assoc,
unsigned int assoclen)
{
aead_request_set_assoc(&req->areq, assoc, assoclen);
}
static inline void aead_givcrypt_set_giv(struct aead_givcrypt_request *req,
u8 *giv, u64 seq)
{
req->giv = giv;
req->seq = seq;
}
#endif /* _CRYPTO_AEAD_H */

View File

@@ -18,6 +18,7 @@
#include <linux/skbuff.h>
struct crypto_aead;
struct crypto_instance;
struct module;
struct rtattr;
struct seq_file;
@@ -30,6 +31,7 @@ struct crypto_type {
void (*show)(struct seq_file *m, struct crypto_alg *alg);
int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask);
void (*free)(struct crypto_instance *inst);
unsigned int type;
unsigned int maskclear;
@@ -180,7 +182,6 @@ struct crypto_instance *crypto_alloc_instance(const char *name,
void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
int crypto_enqueue_request(struct crypto_queue *queue,
struct crypto_async_request *request);
void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset);
struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm);

25
include/crypto/chacha20.h Normal file
View File

@@ -0,0 +1,25 @@
/*
* Common values for the ChaCha20 algorithm
*/
#ifndef _CRYPTO_CHACHA20_H
#define _CRYPTO_CHACHA20_H
#include <linux/types.h>
#include <linux/crypto.h>
#define CHACHA20_IV_SIZE 16
#define CHACHA20_KEY_SIZE 32
#define CHACHA20_BLOCK_SIZE 64
struct chacha20_ctx {
u32 key[8];
};
void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv);
int crypto_chacha20_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keysize);
int crypto_chacha20_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes);
#endif

View File

@@ -63,6 +63,11 @@ struct ahash_request {
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
#define AHASH_REQUEST_ON_STACK(name, ahash) \
char __##name##_desc[sizeof(struct ahash_request) + \
crypto_ahash_reqsize(ahash)] CRYPTO_MINALIGN_ATTR; \
struct ahash_request *name = (void *)__##name##_desc
/**
* struct ahash_alg - asynchronous message digest definition
* @init: Initialize the transformation context. Intended only to initialize the

View File

@@ -1,7 +1,7 @@
/*
* AEAD: Authenticated Encryption with Associated Data
*
* Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
* Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -21,6 +21,7 @@
struct rtattr;
struct aead_instance {
void (*free)(struct aead_instance *inst);
union {
struct {
char head[offsetof(struct aead_alg, base)];
@@ -34,20 +35,15 @@ struct crypto_aead_spawn {
struct crypto_spawn base;
};
extern const struct crypto_type crypto_aead_type;
extern const struct crypto_type crypto_nivaead_type;
struct aead_queue {
struct crypto_queue base;
};
static inline void *crypto_aead_ctx(struct crypto_aead *tfm)
{
return crypto_tfm_ctx(&tfm->base);
}
static inline struct crypto_instance *crypto_aead_alg_instance(
struct crypto_aead *aead)
{
return crypto_tfm_alg_instance(&aead->base);
}
static inline struct crypto_instance *aead_crypto_instance(
struct aead_instance *inst)
{
@@ -61,7 +57,7 @@ static inline struct aead_instance *aead_instance(struct crypto_instance *inst)
static inline struct aead_instance *aead_alg_instance(struct crypto_aead *aead)
{
return aead_instance(crypto_aead_alg_instance(aead));
return aead_instance(crypto_tfm_alg_instance(&aead->base));
}
static inline void *aead_instance_ctx(struct aead_instance *inst)
@@ -90,8 +86,6 @@ static inline void crypto_set_aead_spawn(
crypto_set_spawn(&spawn->base, inst);
}
struct crypto_alg *crypto_lookup_aead(const char *name, u32 type, u32 mask);
int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name,
u32 type, u32 mask);
@@ -100,12 +94,6 @@ static inline void crypto_drop_aead(struct crypto_aead_spawn *spawn)
crypto_drop_spawn(&spawn->base);
}
static inline struct crypto_alg *crypto_aead_spawn_alg(
struct crypto_aead_spawn *spawn)
{
return spawn->base.alg;
}
static inline struct aead_alg *crypto_spawn_aead_alg(
struct crypto_aead_spawn *spawn)
{
@@ -118,38 +106,15 @@ static inline struct crypto_aead *crypto_spawn_aead(
return crypto_spawn_tfm2(&spawn->base);
}
struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
struct rtattr **tb, u32 type, u32 mask);
void aead_geniv_free(struct aead_instance *inst);
int aead_geniv_init(struct crypto_tfm *tfm);
void aead_geniv_exit(struct crypto_tfm *tfm);
static inline struct crypto_aead *aead_geniv_base(struct crypto_aead *geniv)
{
return geniv->child;
}
static inline void *aead_givcrypt_reqctx(struct aead_givcrypt_request *req)
{
return aead_request_ctx(&req->areq);
}
static inline void aead_givcrypt_complete(struct aead_givcrypt_request *req,
int err)
{
aead_request_complete(&req->areq, err);
}
static inline void crypto_aead_set_reqsize(struct crypto_aead *aead,
unsigned int reqsize)
{
crypto_aead_crt(aead)->reqsize = reqsize;
aead->reqsize = reqsize;
}
static inline unsigned int crypto_aead_alg_maxauthsize(struct aead_alg *alg)
{
return alg->base.cra_aead.encrypt ? alg->base.cra_aead.maxauthsize :
alg->maxauthsize;
return alg->maxauthsize;
}
static inline unsigned int crypto_aead_maxauthsize(struct crypto_aead *aead)
@@ -157,6 +122,37 @@ static inline unsigned int crypto_aead_maxauthsize(struct crypto_aead *aead)
return crypto_aead_alg_maxauthsize(crypto_aead_alg(aead));
}
static inline void aead_init_queue(struct aead_queue *queue,
unsigned int max_qlen)
{
crypto_init_queue(&queue->base, max_qlen);
}
static inline int aead_enqueue_request(struct aead_queue *queue,
struct aead_request *request)
{
return crypto_enqueue_request(&queue->base, &request->base);
}
static inline struct aead_request *aead_dequeue_request(
struct aead_queue *queue)
{
struct crypto_async_request *req;
req = crypto_dequeue_request(&queue->base);
return req ? container_of(req, struct aead_request, base) : NULL;
}
static inline struct aead_request *aead_get_backlog(struct aead_queue *queue)
{
struct crypto_async_request *req;
req = crypto_get_backlog(&queue->base);
return req ? container_of(req, struct aead_request, base) : NULL;
}
int crypto_register_aead(struct aead_alg *alg);
void crypto_unregister_aead(struct aead_alg *alg);
int crypto_register_aeads(struct aead_alg *algs, int count);

View File

@@ -15,10 +15,19 @@
#include <crypto/internal/aead.h>
#include <linux/spinlock.h>
#include <linux/types.h>
struct aead_geniv_ctx {
spinlock_t lock;
struct crypto_aead *child;
struct crypto_blkcipher *null;
u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
};
struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
struct rtattr **tb, u32 type, u32 mask);
void aead_geniv_free(struct aead_instance *inst);
int aead_init_geniv(struct crypto_aead *tfm);
void aead_exit_geniv(struct crypto_aead *tfm);
#endif /* _CRYPTO_INTERNAL_GENIV_H */

View File

@@ -107,5 +107,20 @@ static inline u32 ablkcipher_request_flags(struct ablkcipher_request *req)
return req->base.flags;
}
static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm)
{
return crypto_tfm_ctx(&tfm->base);
}
static inline void *skcipher_request_ctx(struct skcipher_request *req)
{
return req->__ctx;
}
static inline u32 skcipher_request_flags(struct skcipher_request *req)
{
return req->base.flags;
}
#endif /* _CRYPTO_INTERNAL_SKCIPHER_H */

41
include/crypto/poly1305.h Normal file
View File

@@ -0,0 +1,41 @@
/*
* Common values for the Poly1305 algorithm
*/
#ifndef _CRYPTO_POLY1305_H
#define _CRYPTO_POLY1305_H
#include <linux/types.h>
#include <linux/crypto.h>
#define POLY1305_BLOCK_SIZE 16
#define POLY1305_KEY_SIZE 32
#define POLY1305_DIGEST_SIZE 16
struct poly1305_desc_ctx {
/* key */
u32 r[5];
/* finalize key */
u32 s[4];
/* accumulator */
u32 h[5];
/* partial buffer */
u8 buf[POLY1305_BLOCK_SIZE];
/* bytes used in partial buffer */
unsigned int buflen;
/* r key has been set */
bool rset;
/* s key has been set */
bool sset;
};
int crypto_poly1305_init(struct shash_desc *desc);
int crypto_poly1305_setkey(struct crypto_shash *tfm,
const u8 *key, unsigned int keylen);
unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
const u8 *src, unsigned int srclen);
int crypto_poly1305_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen);
int crypto_poly1305_final(struct shash_desc *desc, u8 *dst);
#endif

View File

@@ -1,7 +1,7 @@
/*
* Symmetric key ciphers.
*
* Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
* Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -17,6 +17,28 @@
#include <linux/kernel.h>
#include <linux/slab.h>
/**
* struct skcipher_request - Symmetric key cipher request
* @cryptlen: Number of bytes to encrypt or decrypt
* @iv: Initialisation Vector
* @src: Source SG list
* @dst: Destination SG list
* @base: Underlying async request request
* @__ctx: Start of private context data
*/
struct skcipher_request {
unsigned int cryptlen;
u8 *iv;
struct scatterlist *src;
struct scatterlist *dst;
struct crypto_async_request base;
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
/**
* struct skcipher_givcrypt_request - Crypto request with IV generation
* @seq: Sequence number for IV generation
@@ -30,6 +52,23 @@ struct skcipher_givcrypt_request {
struct ablkcipher_request creq;
};
struct crypto_skcipher {
int (*setkey)(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen);
int (*encrypt)(struct skcipher_request *req);
int (*decrypt)(struct skcipher_request *req);
unsigned int ivsize;
unsigned int reqsize;
struct crypto_tfm base;
};
#define SKCIPHER_REQUEST_ON_STACK(name, tfm) \
char __##name##_desc[sizeof(struct skcipher_request) + \
crypto_skcipher_reqsize(tfm)] CRYPTO_MINALIGN_ATTR; \
struct skcipher_request *name = (void *)__##name##_desc
static inline struct crypto_ablkcipher *skcipher_givcrypt_reqtfm(
struct skcipher_givcrypt_request *req)
{
@@ -106,5 +145,355 @@ static inline void skcipher_givcrypt_set_giv(
req->seq = seq;
}
/**
* DOC: Symmetric Key Cipher API
*
* Symmetric key cipher API is used with the ciphers of type
* CRYPTO_ALG_TYPE_SKCIPHER (listed as type "skcipher" in /proc/crypto).
*
* Asynchronous cipher operations imply that the function invocation for a
* cipher request returns immediately before the completion of the operation.
* The cipher request is scheduled as a separate kernel thread and therefore
* load-balanced on the different CPUs via the process scheduler. To allow
* the kernel crypto API to inform the caller about the completion of a cipher
* request, the caller must provide a callback function. That function is
* invoked with the cipher handle when the request completes.
*
* To support the asynchronous operation, additional information than just the
* cipher handle must be supplied to the kernel crypto API. That additional
* information is given by filling in the skcipher_request data structure.
*
* For the symmetric key cipher API, the state is maintained with the tfm
* cipher handle. A single tfm can be used across multiple calls and in
* parallel. For asynchronous block cipher calls, context data supplied and
* only used by the caller can be referenced the request data structure in
* addition to the IV used for the cipher request. The maintenance of such
* state information would be important for a crypto driver implementer to
* have, because when calling the callback function upon completion of the
* cipher operation, that callback function may need some information about
* which operation just finished if it invoked multiple in parallel. This
* state information is unused by the kernel crypto API.
*/
static inline struct crypto_skcipher *__crypto_skcipher_cast(
struct crypto_tfm *tfm)
{
return container_of(tfm, struct crypto_skcipher, base);
}
/**
* crypto_alloc_skcipher() - allocate symmetric key cipher handle
* @alg_name: is the cra_name / name or cra_driver_name / driver name of the
* skcipher cipher
* @type: specifies the type of the cipher
* @mask: specifies the mask for the cipher
*
* Allocate a cipher handle for an skcipher. The returned struct
* crypto_skcipher is the cipher handle that is required for any subsequent
* API invocation for that skcipher.
*
* Return: allocated cipher handle in case of success; IS_ERR() is true in case
* of an error, PTR_ERR() returns the error code.
*/
struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
u32 type, u32 mask);
static inline struct crypto_tfm *crypto_skcipher_tfm(
struct crypto_skcipher *tfm)
{
return &tfm->base;
}
/**
* crypto_free_skcipher() - zeroize and free cipher handle
* @tfm: cipher handle to be freed
*/
static inline void crypto_free_skcipher(struct crypto_skcipher *tfm)
{
crypto_destroy_tfm(tfm, crypto_skcipher_tfm(tfm));
}
/**
* crypto_has_skcipher() - Search for the availability of an skcipher.
* @alg_name: is the cra_name / name or cra_driver_name / driver name of the
* skcipher
* @type: specifies the type of the cipher
* @mask: specifies the mask for the cipher
*
* Return: true when the skcipher is known to the kernel crypto API; false
* otherwise
*/
static inline int crypto_has_skcipher(const char *alg_name, u32 type,
u32 mask)
{
return crypto_has_alg(alg_name, crypto_skcipher_type(type),
crypto_skcipher_mask(mask));
}
/**
* crypto_skcipher_ivsize() - obtain IV size
* @tfm: cipher handle
*
* The size of the IV for the skcipher referenced by the cipher handle is
* returned. This IV size may be zero if the cipher does not need an IV.
*
* Return: IV size in bytes
*/
static inline unsigned int crypto_skcipher_ivsize(struct crypto_skcipher *tfm)
{
return tfm->ivsize;
}
/**
* crypto_skcipher_blocksize() - obtain block size of cipher
* @tfm: cipher handle
*
* The block size for the skcipher referenced with the cipher handle is
* returned. The caller may use that information to allocate appropriate
* memory for the data returned by the encryption or decryption operation
*
* Return: block size of cipher
*/
static inline unsigned int crypto_skcipher_blocksize(
struct crypto_skcipher *tfm)
{
return crypto_tfm_alg_blocksize(crypto_skcipher_tfm(tfm));
}
static inline unsigned int crypto_skcipher_alignmask(
struct crypto_skcipher *tfm)
{
return crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm));
}
static inline u32 crypto_skcipher_get_flags(struct crypto_skcipher *tfm)
{
return crypto_tfm_get_flags(crypto_skcipher_tfm(tfm));
}
static inline void crypto_skcipher_set_flags(struct crypto_skcipher *tfm,
u32 flags)
{
crypto_tfm_set_flags(crypto_skcipher_tfm(tfm), flags);
}
static inline void crypto_skcipher_clear_flags(struct crypto_skcipher *tfm,
u32 flags)
{
crypto_tfm_clear_flags(crypto_skcipher_tfm(tfm), flags);
}
/**
* crypto_skcipher_setkey() - set key for cipher
* @tfm: cipher handle
* @key: buffer holding the key
* @keylen: length of the key in bytes
*
* The caller provided key is set for the skcipher referenced by the cipher
* handle.
*
* Note, the key length determines the cipher type. Many block ciphers implement
* different cipher modes depending on the key size, such as AES-128 vs AES-192
* vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
* is performed.
*
* Return: 0 if the setting of the key was successful; < 0 if an error occurred
*/
static inline int crypto_skcipher_setkey(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
{
return tfm->setkey(tfm, key, keylen);
}
/**
* crypto_skcipher_reqtfm() - obtain cipher handle from request
* @req: skcipher_request out of which the cipher handle is to be obtained
*
* Return the crypto_skcipher handle when furnishing an skcipher_request
* data structure.
*
* Return: crypto_skcipher handle
*/
static inline struct crypto_skcipher *crypto_skcipher_reqtfm(
struct skcipher_request *req)
{
return __crypto_skcipher_cast(req->base.tfm);
}
/**
* crypto_skcipher_encrypt() - encrypt plaintext
* @req: reference to the skcipher_request handle that holds all information
* needed to perform the cipher operation
*
* Encrypt plaintext data using the skcipher_request handle. That data
* structure and how it is filled with data is discussed with the
* skcipher_request_* functions.
*
* Return: 0 if the cipher operation was successful; < 0 if an error occurred
*/
static inline int crypto_skcipher_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
return tfm->encrypt(req);
}
/**
* crypto_skcipher_decrypt() - decrypt ciphertext
* @req: reference to the skcipher_request handle that holds all information
* needed to perform the cipher operation
*
* Decrypt ciphertext data using the skcipher_request handle. That data
* structure and how it is filled with data is discussed with the
* skcipher_request_* functions.
*
* Return: 0 if the cipher operation was successful; < 0 if an error occurred
*/
static inline int crypto_skcipher_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
return tfm->decrypt(req);
}
/**
* DOC: Symmetric Key Cipher Request Handle
*
* The skcipher_request data structure contains all pointers to data
* required for the symmetric key cipher operation. This includes the cipher
* handle (which can be used by multiple skcipher_request instances), pointer
* to plaintext and ciphertext, asynchronous callback function, etc. It acts
* as a handle to the skcipher_request_* API calls in a similar way as
* skcipher handle to the crypto_skcipher_* API calls.
*/
/**
* crypto_skcipher_reqsize() - obtain size of the request data structure
* @tfm: cipher handle
*
* Return: number of bytes
*/
static inline unsigned int crypto_skcipher_reqsize(struct crypto_skcipher *tfm)
{
return tfm->reqsize;
}
/**
* skcipher_request_set_tfm() - update cipher handle reference in request
* @req: request handle to be modified
* @tfm: cipher handle that shall be added to the request handle
*
* Allow the caller to replace the existing skcipher handle in the request
* data structure with a different one.
*/
static inline void skcipher_request_set_tfm(struct skcipher_request *req,
struct crypto_skcipher *tfm)
{
req->base.tfm = crypto_skcipher_tfm(tfm);
}
static inline struct skcipher_request *skcipher_request_cast(
struct crypto_async_request *req)
{
return container_of(req, struct skcipher_request, base);
}
/**
* skcipher_request_alloc() - allocate request data structure
* @tfm: cipher handle to be registered with the request
* @gfp: memory allocation flag that is handed to kmalloc by the API call.
*
* Allocate the request data structure that must be used with the skcipher
* encrypt and decrypt API calls. During the allocation, the provided skcipher
* handle is registered in the request data structure.
*
* Return: allocated request handle in case of success; IS_ERR() is true in case
* of an error, PTR_ERR() returns the error code.
*/
static inline struct skcipher_request *skcipher_request_alloc(
struct crypto_skcipher *tfm, gfp_t gfp)
{
struct skcipher_request *req;
req = kmalloc(sizeof(struct skcipher_request) +
crypto_skcipher_reqsize(tfm), gfp);
if (likely(req))
skcipher_request_set_tfm(req, tfm);
return req;
}
/**
* skcipher_request_free() - zeroize and free request data structure
* @req: request data structure cipher handle to be freed
*/
static inline void skcipher_request_free(struct skcipher_request *req)
{
kzfree(req);
}
/**
* skcipher_request_set_callback() - set asynchronous callback function
* @req: request handle
* @flags: specify zero or an ORing of the flags
* CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
* increase the wait queue beyond the initial maximum size;
* CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
* @compl: callback function pointer to be registered with the request handle
* @data: The data pointer refers to memory that is not used by the kernel
* crypto API, but provided to the callback function for it to use. Here,
* the caller can provide a reference to memory the callback function can
* operate on. As the callback function is invoked asynchronously to the
* related functionality, it may need to access data structures of the
* related functionality which can be referenced using this pointer. The
* callback function can access the memory via the "data" field in the
* crypto_async_request data structure provided to the callback function.
*
* This function allows setting the callback function that is triggered once the
* cipher operation completes.
*
* The callback function is registered with the skcipher_request handle and
* must comply with the following template
*
* void callback_function(struct crypto_async_request *req, int error)
*/
static inline void skcipher_request_set_callback(struct skcipher_request *req,
u32 flags,
crypto_completion_t compl,
void *data)
{
req->base.complete = compl;
req->base.data = data;
req->base.flags = flags;
}
/**
* skcipher_request_set_crypt() - set data buffers
* @req: request handle
* @src: source scatter / gather list
* @dst: destination scatter / gather list
* @cryptlen: number of bytes to process from @src
* @iv: IV for the cipher operation which must comply with the IV size defined
* by crypto_skcipher_ivsize
*
* This function allows setting of the source data and destination data
* scatter / gather lists.
*
* For encryption, the source is treated as the plaintext and the
* destination is the ciphertext. For a decryption operation, the use is
* reversed - the source is the ciphertext and the destination is the plaintext.
*/
static inline void skcipher_request_set_crypt(
struct skcipher_request *req,
struct scatterlist *src, struct scatterlist *dst,
unsigned int cryptlen, void *iv)
{
req->src = src;
req->dst = dst;
req->cryptlen = cryptlen;
req->iv = iv;
}
#endif /* _CRYPTO_SKCIPHER_H */

View File

@@ -251,6 +251,9 @@
#define IMX6QDL_CLK_VIDEO_27M 238
#define IMX6QDL_CLK_MIPI_CORE_CFG 239
#define IMX6QDL_CLK_MIPI_IPG 240
#define IMX6QDL_CLK_END 241
#define IMX6QDL_CLK_CAAM_MEM 241
#define IMX6QDL_CLK_CAAM_ACLK 242
#define IMX6QDL_CLK_CAAM_IPG 243
#define IMX6QDL_CLK_END 244
#endif /* __DT_BINDINGS_CLOCK_IMX6QDL_H */

View File

@@ -101,12 +101,6 @@
*/
#define CRYPTO_ALG_INTERNAL 0x00002000
/*
* Temporary flag used to prevent legacy AEAD implementations from
* being used by user-space.
*/
#define CRYPTO_ALG_AEAD_NEW 0x00004000
/*
* Transform masks and values (for crt_flags).
*/
@@ -142,13 +136,10 @@
struct scatterlist;
struct crypto_ablkcipher;
struct crypto_async_request;
struct crypto_aead;
struct crypto_blkcipher;
struct crypto_hash;
struct crypto_tfm;
struct crypto_type;
struct aead_request;
struct aead_givcrypt_request;
struct skcipher_givcrypt_request;
typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
@@ -274,47 +265,6 @@ struct ablkcipher_alg {
unsigned int ivsize;
};
/**
* struct old_aead_alg - AEAD cipher definition
* @maxauthsize: Set the maximum authentication tag size supported by the
* transformation. A transformation may support smaller tag sizes.
* As the authentication tag is a message digest to ensure the
* integrity of the encrypted data, a consumer typically wants the
* largest authentication tag possible as defined by this
* variable.
* @setauthsize: Set authentication size for the AEAD transformation. This
* function is used to specify the consumer requested size of the
* authentication tag to be either generated by the transformation
* during encryption or the size of the authentication tag to be
* supplied during the decryption operation. This function is also
* responsible for checking the authentication tag size for
* validity.
* @setkey: see struct ablkcipher_alg
* @encrypt: see struct ablkcipher_alg
* @decrypt: see struct ablkcipher_alg
* @givencrypt: see struct ablkcipher_alg
* @givdecrypt: see struct ablkcipher_alg
* @geniv: see struct ablkcipher_alg
* @ivsize: see struct ablkcipher_alg
*
* All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
* mandatory and must be filled.
*/
struct old_aead_alg {
int (*setkey)(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen);
int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize);
int (*encrypt)(struct aead_request *req);
int (*decrypt)(struct aead_request *req);
int (*givencrypt)(struct aead_givcrypt_request *req);
int (*givdecrypt)(struct aead_givcrypt_request *req);
const char *geniv;
unsigned int ivsize;
unsigned int maxauthsize;
};
/**
* struct blkcipher_alg - synchronous block cipher definition
* @min_keysize: see struct ablkcipher_alg
@@ -409,7 +359,6 @@ struct compress_alg {
#define cra_ablkcipher cra_u.ablkcipher
#define cra_aead cra_u.aead
#define cra_blkcipher cra_u.blkcipher
#define cra_cipher cra_u.cipher
#define cra_compress cra_u.compress
@@ -460,7 +409,7 @@ struct compress_alg {
* struct crypto_type, which implements callbacks common for all
* transformation types. There are multiple options:
* &crypto_blkcipher_type, &crypto_ablkcipher_type,
* &crypto_ahash_type, &crypto_aead_type, &crypto_rng_type.
* &crypto_ahash_type, &crypto_rng_type.
* This field might be empty. In that case, there are no common
* callbacks. This is the case for: cipher, compress, shash.
* @cra_u: Callbacks implementing the transformation. This is a union of
@@ -508,7 +457,6 @@ struct crypto_alg {
union {
struct ablkcipher_alg ablkcipher;
struct old_aead_alg aead;
struct blkcipher_alg blkcipher;
struct cipher_alg cipher;
struct compress_alg compress;