Creation of Cybook 2416 (actually Gen4) repository
This commit is contained in:
69
drivers/crypto/Kconfig
Normal file
69
drivers/crypto/Kconfig
Normal file
@@ -0,0 +1,69 @@
|
||||
menu "Hardware crypto devices"
|
||||
|
||||
config CRYPTO_DEV_PADLOCK
|
||||
tristate "Support for VIA PadLock ACE"
|
||||
depends on X86_32
|
||||
select CRYPTO_ALGAPI
|
||||
default m
|
||||
help
|
||||
Some VIA processors come with an integrated crypto engine
|
||||
(so called VIA PadLock ACE, Advanced Cryptography Engine)
|
||||
that provides instructions for very fast cryptographic
|
||||
operations with supported algorithms.
|
||||
|
||||
The instructions are used only when the CPU supports them.
|
||||
Otherwise software encryption is used.
|
||||
|
||||
Selecting M for this option will compile a helper module
|
||||
padlock.ko that should autoload all below configured
|
||||
algorithms. Don't worry if your hardware does not support
|
||||
some or all of them. In such case padlock.ko will
|
||||
simply write a single line into the kernel log informing
|
||||
about its failure but everything will keep working fine.
|
||||
|
||||
If you are unsure, say M. The compiled module will be
|
||||
called padlock.ko
|
||||
|
||||
config CRYPTO_DEV_PADLOCK_AES
|
||||
tristate "PadLock driver for AES algorithm"
|
||||
depends on CRYPTO_DEV_PADLOCK
|
||||
select CRYPTO_BLKCIPHER
|
||||
default m
|
||||
help
|
||||
Use VIA PadLock for AES algorithm.
|
||||
|
||||
Available in VIA C3 and newer CPUs.
|
||||
|
||||
If unsure say M. The compiled module will be
|
||||
called padlock-aes.ko
|
||||
|
||||
config CRYPTO_DEV_PADLOCK_SHA
|
||||
tristate "PadLock driver for SHA1 and SHA256 algorithms"
|
||||
depends on CRYPTO_DEV_PADLOCK
|
||||
select CRYPTO_SHA1
|
||||
select CRYPTO_SHA256
|
||||
default m
|
||||
help
|
||||
Use VIA PadLock for SHA1/SHA256 algorithms.
|
||||
|
||||
Available in VIA C7 and newer processors.
|
||||
|
||||
If unsure say M. The compiled module will be
|
||||
called padlock-sha.ko
|
||||
|
||||
source "arch/s390/crypto/Kconfig"
|
||||
|
||||
config CRYPTO_DEV_GEODE
|
||||
tristate "Support for the Geode LX AES engine"
|
||||
depends on CRYPTO && X86_32 && PCI
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_BLKCIPHER
|
||||
default m
|
||||
help
|
||||
Say 'Y' here to use the AMD Geode LX processor on-board AES
|
||||
engine for the CryptoAPI AES alogrithm.
|
||||
|
||||
To compile this driver as a module, choose M here: the module
|
||||
will be called geode-aes.
|
||||
|
||||
endmenu
|
||||
4
drivers/crypto/Makefile
Normal file
4
drivers/crypto/Makefile
Normal file
@@ -0,0 +1,4 @@
|
||||
obj-$(CONFIG_CRYPTO_DEV_PADLOCK) += padlock.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
|
||||
478
drivers/crypto/geode-aes.c
Normal file
478
drivers/crypto/geode-aes.c
Normal file
@@ -0,0 +1,478 @@
|
||||
/* Copyright (C) 2004-2006, Advanced Micro Devices, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/pci_ids.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <crypto/algapi.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <asm/delay.h>
|
||||
|
||||
#include "geode-aes.h"
|
||||
|
||||
/* Register definitions */
|
||||
|
||||
#define AES_CTRLA_REG 0x0000
|
||||
|
||||
#define AES_CTRL_START 0x01
|
||||
#define AES_CTRL_DECRYPT 0x00
|
||||
#define AES_CTRL_ENCRYPT 0x02
|
||||
#define AES_CTRL_WRKEY 0x04
|
||||
#define AES_CTRL_DCA 0x08
|
||||
#define AES_CTRL_SCA 0x10
|
||||
#define AES_CTRL_CBC 0x20
|
||||
|
||||
#define AES_INTR_REG 0x0008
|
||||
|
||||
#define AES_INTRA_PENDING (1 << 16)
|
||||
#define AES_INTRB_PENDING (1 << 17)
|
||||
|
||||
#define AES_INTR_PENDING (AES_INTRA_PENDING | AES_INTRB_PENDING)
|
||||
#define AES_INTR_MASK 0x07
|
||||
|
||||
#define AES_SOURCEA_REG 0x0010
|
||||
#define AES_DSTA_REG 0x0014
|
||||
#define AES_LENA_REG 0x0018
|
||||
#define AES_WRITEKEY0_REG 0x0030
|
||||
#define AES_WRITEIV0_REG 0x0040
|
||||
|
||||
/* A very large counter that is used to gracefully bail out of an
|
||||
* operation in case of trouble
|
||||
*/
|
||||
|
||||
#define AES_OP_TIMEOUT 0x50000
|
||||
|
||||
/* Static structures */
|
||||
|
||||
static void __iomem * _iobase;
|
||||
static spinlock_t lock;
|
||||
|
||||
/* Write a 128 bit field (either a writable key or IV) */
|
||||
static inline void
|
||||
_writefield(u32 offset, void *value)
|
||||
{
|
||||
int i;
|
||||
for(i = 0; i < 4; i++)
|
||||
iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4));
|
||||
}
|
||||
|
||||
/* Read a 128 bit field (either a writable key or IV) */
|
||||
static inline void
|
||||
_readfield(u32 offset, void *value)
|
||||
{
|
||||
int i;
|
||||
for(i = 0; i < 4; i++)
|
||||
((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4));
|
||||
}
|
||||
|
||||
static int
|
||||
do_crypt(void *src, void *dst, int len, u32 flags)
|
||||
{
|
||||
u32 status;
|
||||
u32 counter = AES_OP_TIMEOUT;
|
||||
|
||||
iowrite32(virt_to_phys(src), _iobase + AES_SOURCEA_REG);
|
||||
iowrite32(virt_to_phys(dst), _iobase + AES_DSTA_REG);
|
||||
iowrite32(len, _iobase + AES_LENA_REG);
|
||||
|
||||
/* Start the operation */
|
||||
iowrite32(AES_CTRL_START | flags, _iobase + AES_CTRLA_REG);
|
||||
|
||||
do
|
||||
status = ioread32(_iobase + AES_INTR_REG);
|
||||
while(!(status & AES_INTRA_PENDING) && --counter);
|
||||
|
||||
/* Clear the event */
|
||||
iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG);
|
||||
return counter ? 0 : 1;
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
geode_aes_crypt(struct geode_aes_op *op)
|
||||
{
|
||||
u32 flags = 0;
|
||||
unsigned long iflags;
|
||||
|
||||
if (op->len == 0)
|
||||
return 0;
|
||||
|
||||
/* If the source and destination is the same, then
|
||||
* we need to turn on the coherent flags, otherwise
|
||||
* we don't need to worry
|
||||
*/
|
||||
|
||||
if (op->src == op->dst)
|
||||
flags |= (AES_CTRL_DCA | AES_CTRL_SCA);
|
||||
|
||||
if (op->dir == AES_DIR_ENCRYPT)
|
||||
flags |= AES_CTRL_ENCRYPT;
|
||||
|
||||
/* Start the critical section */
|
||||
|
||||
spin_lock_irqsave(&lock, iflags);
|
||||
|
||||
if (op->mode == AES_MODE_CBC) {
|
||||
flags |= AES_CTRL_CBC;
|
||||
_writefield(AES_WRITEIV0_REG, op->iv);
|
||||
}
|
||||
|
||||
if (!(op->flags & AES_FLAGS_HIDDENKEY)) {
|
||||
flags |= AES_CTRL_WRKEY;
|
||||
_writefield(AES_WRITEKEY0_REG, op->key);
|
||||
}
|
||||
|
||||
do_crypt(op->src, op->dst, op->len, flags);
|
||||
|
||||
if (op->mode == AES_MODE_CBC)
|
||||
_readfield(AES_WRITEIV0_REG, op->iv);
|
||||
|
||||
spin_unlock_irqrestore(&lock, iflags);
|
||||
|
||||
return op->len;
|
||||
}
|
||||
|
||||
/* CRYPTO-API Functions */
|
||||
|
||||
static int
|
||||
geode_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int len)
|
||||
{
|
||||
struct geode_aes_op *op = crypto_tfm_ctx(tfm);
|
||||
|
||||
if (len != AES_KEY_LENGTH) {
|
||||
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memcpy(op->key, key, len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
||||
{
|
||||
struct geode_aes_op *op = crypto_tfm_ctx(tfm);
|
||||
|
||||
if ((out == NULL) || (in == NULL))
|
||||
return;
|
||||
|
||||
op->src = (void *) in;
|
||||
op->dst = (void *) out;
|
||||
op->mode = AES_MODE_ECB;
|
||||
op->flags = 0;
|
||||
op->len = AES_MIN_BLOCK_SIZE;
|
||||
op->dir = AES_DIR_ENCRYPT;
|
||||
|
||||
geode_aes_crypt(op);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
||||
{
|
||||
struct geode_aes_op *op = crypto_tfm_ctx(tfm);
|
||||
|
||||
if ((out == NULL) || (in == NULL))
|
||||
return;
|
||||
|
||||
op->src = (void *) in;
|
||||
op->dst = (void *) out;
|
||||
op->mode = AES_MODE_ECB;
|
||||
op->flags = 0;
|
||||
op->len = AES_MIN_BLOCK_SIZE;
|
||||
op->dir = AES_DIR_DECRYPT;
|
||||
|
||||
geode_aes_crypt(op);
|
||||
}
|
||||
|
||||
|
||||
static struct crypto_alg geode_alg = {
|
||||
.cra_name = "aes",
|
||||
.cra_driver_name = "geode-aes-128",
|
||||
.cra_priority = 300,
|
||||
.cra_alignmask = 15,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
||||
.cra_blocksize = AES_MIN_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct geode_aes_op),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(geode_alg.cra_list),
|
||||
.cra_u = {
|
||||
.cipher = {
|
||||
.cia_min_keysize = AES_KEY_LENGTH,
|
||||
.cia_max_keysize = AES_KEY_LENGTH,
|
||||
.cia_setkey = geode_setkey,
|
||||
.cia_encrypt = geode_encrypt,
|
||||
.cia_decrypt = geode_decrypt
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static int
|
||||
geode_cbc_decrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
int err, ret;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
|
||||
while((nbytes = walk.nbytes)) {
|
||||
op->src = walk.src.virt.addr,
|
||||
op->dst = walk.dst.virt.addr;
|
||||
op->mode = AES_MODE_CBC;
|
||||
op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
|
||||
op->dir = AES_DIR_DECRYPT;
|
||||
|
||||
memcpy(op->iv, walk.iv, AES_IV_LENGTH);
|
||||
|
||||
ret = geode_aes_crypt(op);
|
||||
|
||||
memcpy(walk.iv, op->iv, AES_IV_LENGTH);
|
||||
nbytes -= ret;
|
||||
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int
|
||||
geode_cbc_encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
int err, ret;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
|
||||
while((nbytes = walk.nbytes)) {
|
||||
op->src = walk.src.virt.addr,
|
||||
op->dst = walk.dst.virt.addr;
|
||||
op->mode = AES_MODE_CBC;
|
||||
op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
|
||||
op->dir = AES_DIR_ENCRYPT;
|
||||
|
||||
memcpy(op->iv, walk.iv, AES_IV_LENGTH);
|
||||
|
||||
ret = geode_aes_crypt(op);
|
||||
nbytes -= ret;
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct crypto_alg geode_cbc_alg = {
|
||||
.cra_name = "cbc(aes)",
|
||||
.cra_driver_name = "cbc-aes-geode-128",
|
||||
.cra_priority = 400,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = AES_MIN_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct geode_aes_op),
|
||||
.cra_alignmask = 15,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(geode_cbc_alg.cra_list),
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = AES_KEY_LENGTH,
|
||||
.max_keysize = AES_KEY_LENGTH,
|
||||
.setkey = geode_setkey,
|
||||
.encrypt = geode_cbc_encrypt,
|
||||
.decrypt = geode_cbc_decrypt,
|
||||
.ivsize = AES_IV_LENGTH,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static int
|
||||
geode_ecb_decrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
int err, ret;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
|
||||
while((nbytes = walk.nbytes)) {
|
||||
op->src = walk.src.virt.addr,
|
||||
op->dst = walk.dst.virt.addr;
|
||||
op->mode = AES_MODE_ECB;
|
||||
op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
|
||||
op->dir = AES_DIR_DECRYPT;
|
||||
|
||||
ret = geode_aes_crypt(op);
|
||||
nbytes -= ret;
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int
|
||||
geode_ecb_encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
int err, ret;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
|
||||
while((nbytes = walk.nbytes)) {
|
||||
op->src = walk.src.virt.addr,
|
||||
op->dst = walk.dst.virt.addr;
|
||||
op->mode = AES_MODE_ECB;
|
||||
op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
|
||||
op->dir = AES_DIR_ENCRYPT;
|
||||
|
||||
ret = geode_aes_crypt(op);
|
||||
nbytes -= ret;
|
||||
ret = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct crypto_alg geode_ecb_alg = {
|
||||
.cra_name = "ecb(aes)",
|
||||
.cra_driver_name = "ecb-aes-geode-128",
|
||||
.cra_priority = 400,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = AES_MIN_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct geode_aes_op),
|
||||
.cra_alignmask = 15,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(geode_ecb_alg.cra_list),
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = AES_KEY_LENGTH,
|
||||
.max_keysize = AES_KEY_LENGTH,
|
||||
.setkey = geode_setkey,
|
||||
.encrypt = geode_ecb_encrypt,
|
||||
.decrypt = geode_ecb_decrypt,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static void
|
||||
geode_aes_remove(struct pci_dev *dev)
|
||||
{
|
||||
crypto_unregister_alg(&geode_alg);
|
||||
crypto_unregister_alg(&geode_ecb_alg);
|
||||
crypto_unregister_alg(&geode_cbc_alg);
|
||||
|
||||
pci_iounmap(dev, _iobase);
|
||||
_iobase = NULL;
|
||||
|
||||
pci_release_regions(dev);
|
||||
pci_disable_device(dev);
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if ((ret = pci_enable_device(dev)))
|
||||
return ret;
|
||||
|
||||
if ((ret = pci_request_regions(dev, "geode-aes-128")))
|
||||
goto eenable;
|
||||
|
||||
_iobase = pci_iomap(dev, 0, 0);
|
||||
|
||||
if (_iobase == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto erequest;
|
||||
}
|
||||
|
||||
spin_lock_init(&lock);
|
||||
|
||||
/* Clear any pending activity */
|
||||
iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG);
|
||||
|
||||
if ((ret = crypto_register_alg(&geode_alg)))
|
||||
goto eiomap;
|
||||
|
||||
if ((ret = crypto_register_alg(&geode_ecb_alg)))
|
||||
goto ealg;
|
||||
|
||||
if ((ret = crypto_register_alg(&geode_cbc_alg)))
|
||||
goto eecb;
|
||||
|
||||
printk(KERN_NOTICE "geode-aes: GEODE AES engine enabled.\n");
|
||||
return 0;
|
||||
|
||||
eecb:
|
||||
crypto_unregister_alg(&geode_ecb_alg);
|
||||
|
||||
ealg:
|
||||
crypto_unregister_alg(&geode_alg);
|
||||
|
||||
eiomap:
|
||||
pci_iounmap(dev, _iobase);
|
||||
|
||||
erequest:
|
||||
pci_release_regions(dev);
|
||||
|
||||
eenable:
|
||||
pci_disable_device(dev);
|
||||
|
||||
printk(KERN_ERR "geode-aes: GEODE AES initialization failed.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct pci_device_id geode_aes_tbl[] = {
|
||||
{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LX_AES, PCI_ANY_ID, PCI_ANY_ID} ,
|
||||
{ 0, }
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(pci, geode_aes_tbl);
|
||||
|
||||
static struct pci_driver geode_aes_driver = {
|
||||
.name = "Geode LX AES",
|
||||
.id_table = geode_aes_tbl,
|
||||
.probe = geode_aes_probe,
|
||||
.remove = __devexit_p(geode_aes_remove)
|
||||
};
|
||||
|
||||
static int __init
|
||||
geode_aes_init(void)
|
||||
{
|
||||
return pci_register_driver(&geode_aes_driver);
|
||||
}
|
||||
|
||||
static void __exit
|
||||
geode_aes_exit(void)
|
||||
{
|
||||
pci_unregister_driver(&geode_aes_driver);
|
||||
}
|
||||
|
||||
MODULE_AUTHOR("Advanced Micro Devices, Inc.");
|
||||
MODULE_DESCRIPTION("Geode LX Hardware AES driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
module_init(geode_aes_init);
|
||||
module_exit(geode_aes_exit);
|
||||
39
drivers/crypto/geode-aes.h
Normal file
39
drivers/crypto/geode-aes.h
Normal file
@@ -0,0 +1,39 @@
|
||||
/* Copyright (C) 2003-2006, Advanced Micro Devices, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*/
|
||||
|
||||
#ifndef _GEODE_AES_H_
|
||||
#define _GEODE_AES_H_
|
||||
|
||||
#define AES_KEY_LENGTH 16
|
||||
#define AES_IV_LENGTH 16
|
||||
|
||||
#define AES_MIN_BLOCK_SIZE 16
|
||||
|
||||
#define AES_MODE_ECB 0
|
||||
#define AES_MODE_CBC 1
|
||||
|
||||
#define AES_DIR_DECRYPT 0
|
||||
#define AES_DIR_ENCRYPT 1
|
||||
|
||||
#define AES_FLAGS_HIDDENKEY (1 << 0)
|
||||
|
||||
struct geode_aes_op {
|
||||
|
||||
void *src;
|
||||
void *dst;
|
||||
|
||||
u32 mode;
|
||||
u32 dir;
|
||||
u32 flags;
|
||||
int len;
|
||||
|
||||
u8 key[AES_KEY_LENGTH];
|
||||
u8 iv[AES_IV_LENGTH];
|
||||
};
|
||||
|
||||
#endif
|
||||
663
drivers/crypto/padlock-aes.c
Normal file
663
drivers/crypto/padlock-aes.c
Normal file
@@ -0,0 +1,663 @@
|
||||
/*
|
||||
* Cryptographic API.
|
||||
*
|
||||
* Support for VIA PadLock hardware crypto engine.
|
||||
*
|
||||
* Copyright (c) 2004 Michal Ludvig <michal@logix.cz>
|
||||
*
|
||||
* Key expansion routine taken from crypto/aes.c
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* ---------------------------------------------------------------------------
|
||||
* Copyright (c) 2002, Dr Brian Gladman <brg@gladman.me.uk>, Worcester, UK.
|
||||
* All rights reserved.
|
||||
*
|
||||
* LICENSE TERMS
|
||||
*
|
||||
* The free distribution and use of this software in both source and binary
|
||||
* form is allowed (with or without changes) provided that:
|
||||
*
|
||||
* 1. distributions of this source code include the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
*
|
||||
* 2. distributions in binary form include the above copyright
|
||||
* notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other associated materials;
|
||||
*
|
||||
* 3. the copyright holder's name is not used to endorse products
|
||||
* built using this software without specific written permission.
|
||||
*
|
||||
* ALTERNATIVELY, provided that this notice is retained in full, this product
|
||||
* may be distributed under the terms of the GNU General Public License (GPL),
|
||||
* in which case the provisions of the GPL apply INSTEAD OF those given above.
|
||||
*
|
||||
* DISCLAIMER
|
||||
*
|
||||
* This software is provided 'as is' with no explicit or implied warranties
|
||||
* in respect of its properties, including, but not limited to, correctness
|
||||
* and/or fitness for purpose.
|
||||
* ---------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#include <crypto/algapi.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include "padlock.h"
|
||||
|
||||
#define AES_MIN_KEY_SIZE 16 /* in uint8_t units */
|
||||
#define AES_MAX_KEY_SIZE 32 /* ditto */
|
||||
#define AES_BLOCK_SIZE 16 /* ditto */
|
||||
#define AES_EXTENDED_KEY_SIZE 64 /* in uint32_t units */
|
||||
#define AES_EXTENDED_KEY_SIZE_B (AES_EXTENDED_KEY_SIZE * sizeof(uint32_t))
|
||||
|
||||
/* Control word. */
|
||||
struct cword {
|
||||
unsigned int __attribute__ ((__packed__))
|
||||
rounds:4,
|
||||
algo:3,
|
||||
keygen:1,
|
||||
interm:1,
|
||||
encdec:1,
|
||||
ksize:2;
|
||||
} __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
|
||||
|
||||
/* Whenever making any changes to the following
|
||||
* structure *make sure* you keep E, d_data
|
||||
* and cword aligned on 16 Bytes boundaries!!! */
|
||||
struct aes_ctx {
|
||||
struct {
|
||||
struct cword encrypt;
|
||||
struct cword decrypt;
|
||||
} cword;
|
||||
u32 *D;
|
||||
int key_length;
|
||||
u32 E[AES_EXTENDED_KEY_SIZE]
|
||||
__attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
|
||||
u32 d_data[AES_EXTENDED_KEY_SIZE]
|
||||
__attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
|
||||
};
|
||||
|
||||
/* ====== Key management routines ====== */
|
||||
|
||||
static inline uint32_t
|
||||
generic_rotr32 (const uint32_t x, const unsigned bits)
|
||||
{
|
||||
const unsigned n = bits % 32;
|
||||
return (x >> n) | (x << (32 - n));
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
generic_rotl32 (const uint32_t x, const unsigned bits)
|
||||
{
|
||||
const unsigned n = bits % 32;
|
||||
return (x << n) | (x >> (32 - n));
|
||||
}
|
||||
|
||||
#define rotl generic_rotl32
|
||||
#define rotr generic_rotr32
|
||||
|
||||
/*
|
||||
* #define byte(x, nr) ((unsigned char)((x) >> (nr*8)))
|
||||
*/
|
||||
static inline uint8_t
|
||||
byte(const uint32_t x, const unsigned n)
|
||||
{
|
||||
return x >> (n << 3);
|
||||
}
|
||||
|
||||
#define E_KEY ctx->E
|
||||
#define D_KEY ctx->D
|
||||
|
||||
static uint8_t pow_tab[256];
|
||||
static uint8_t log_tab[256];
|
||||
static uint8_t sbx_tab[256];
|
||||
static uint8_t isb_tab[256];
|
||||
static uint32_t rco_tab[10];
|
||||
static uint32_t ft_tab[4][256];
|
||||
static uint32_t it_tab[4][256];
|
||||
|
||||
static uint32_t fl_tab[4][256];
|
||||
static uint32_t il_tab[4][256];
|
||||
|
||||
static inline uint8_t
|
||||
f_mult (uint8_t a, uint8_t b)
|
||||
{
|
||||
uint8_t aa = log_tab[a], cc = aa + log_tab[b];
|
||||
|
||||
return pow_tab[cc + (cc < aa ? 1 : 0)];
|
||||
}
|
||||
|
||||
#define ff_mult(a,b) (a && b ? f_mult(a, b) : 0)
|
||||
|
||||
#define f_rn(bo, bi, n, k) \
|
||||
bo[n] = ft_tab[0][byte(bi[n],0)] ^ \
|
||||
ft_tab[1][byte(bi[(n + 1) & 3],1)] ^ \
|
||||
ft_tab[2][byte(bi[(n + 2) & 3],2)] ^ \
|
||||
ft_tab[3][byte(bi[(n + 3) & 3],3)] ^ *(k + n)
|
||||
|
||||
#define i_rn(bo, bi, n, k) \
|
||||
bo[n] = it_tab[0][byte(bi[n],0)] ^ \
|
||||
it_tab[1][byte(bi[(n + 3) & 3],1)] ^ \
|
||||
it_tab[2][byte(bi[(n + 2) & 3],2)] ^ \
|
||||
it_tab[3][byte(bi[(n + 1) & 3],3)] ^ *(k + n)
|
||||
|
||||
#define ls_box(x) \
|
||||
( fl_tab[0][byte(x, 0)] ^ \
|
||||
fl_tab[1][byte(x, 1)] ^ \
|
||||
fl_tab[2][byte(x, 2)] ^ \
|
||||
fl_tab[3][byte(x, 3)] )
|
||||
|
||||
#define f_rl(bo, bi, n, k) \
|
||||
bo[n] = fl_tab[0][byte(bi[n],0)] ^ \
|
||||
fl_tab[1][byte(bi[(n + 1) & 3],1)] ^ \
|
||||
fl_tab[2][byte(bi[(n + 2) & 3],2)] ^ \
|
||||
fl_tab[3][byte(bi[(n + 3) & 3],3)] ^ *(k + n)
|
||||
|
||||
#define i_rl(bo, bi, n, k) \
|
||||
bo[n] = il_tab[0][byte(bi[n],0)] ^ \
|
||||
il_tab[1][byte(bi[(n + 3) & 3],1)] ^ \
|
||||
il_tab[2][byte(bi[(n + 2) & 3],2)] ^ \
|
||||
il_tab[3][byte(bi[(n + 1) & 3],3)] ^ *(k + n)
|
||||
|
||||
static void
|
||||
gen_tabs (void)
|
||||
{
|
||||
uint32_t i, t;
|
||||
uint8_t p, q;
|
||||
|
||||
/* log and power tables for GF(2**8) finite field with
|
||||
0x011b as modular polynomial - the simplest prmitive
|
||||
root is 0x03, used here to generate the tables */
|
||||
|
||||
for (i = 0, p = 1; i < 256; ++i) {
|
||||
pow_tab[i] = (uint8_t) p;
|
||||
log_tab[p] = (uint8_t) i;
|
||||
|
||||
p ^= (p << 1) ^ (p & 0x80 ? 0x01b : 0);
|
||||
}
|
||||
|
||||
log_tab[1] = 0;
|
||||
|
||||
for (i = 0, p = 1; i < 10; ++i) {
|
||||
rco_tab[i] = p;
|
||||
|
||||
p = (p << 1) ^ (p & 0x80 ? 0x01b : 0);
|
||||
}
|
||||
|
||||
for (i = 0; i < 256; ++i) {
|
||||
p = (i ? pow_tab[255 - log_tab[i]] : 0);
|
||||
q = ((p >> 7) | (p << 1)) ^ ((p >> 6) | (p << 2));
|
||||
p ^= 0x63 ^ q ^ ((q >> 6) | (q << 2));
|
||||
sbx_tab[i] = p;
|
||||
isb_tab[p] = (uint8_t) i;
|
||||
}
|
||||
|
||||
for (i = 0; i < 256; ++i) {
|
||||
p = sbx_tab[i];
|
||||
|
||||
t = p;
|
||||
fl_tab[0][i] = t;
|
||||
fl_tab[1][i] = rotl (t, 8);
|
||||
fl_tab[2][i] = rotl (t, 16);
|
||||
fl_tab[3][i] = rotl (t, 24);
|
||||
|
||||
t = ((uint32_t) ff_mult (2, p)) |
|
||||
((uint32_t) p << 8) |
|
||||
((uint32_t) p << 16) | ((uint32_t) ff_mult (3, p) << 24);
|
||||
|
||||
ft_tab[0][i] = t;
|
||||
ft_tab[1][i] = rotl (t, 8);
|
||||
ft_tab[2][i] = rotl (t, 16);
|
||||
ft_tab[3][i] = rotl (t, 24);
|
||||
|
||||
p = isb_tab[i];
|
||||
|
||||
t = p;
|
||||
il_tab[0][i] = t;
|
||||
il_tab[1][i] = rotl (t, 8);
|
||||
il_tab[2][i] = rotl (t, 16);
|
||||
il_tab[3][i] = rotl (t, 24);
|
||||
|
||||
t = ((uint32_t) ff_mult (14, p)) |
|
||||
((uint32_t) ff_mult (9, p) << 8) |
|
||||
((uint32_t) ff_mult (13, p) << 16) |
|
||||
((uint32_t) ff_mult (11, p) << 24);
|
||||
|
||||
it_tab[0][i] = t;
|
||||
it_tab[1][i] = rotl (t, 8);
|
||||
it_tab[2][i] = rotl (t, 16);
|
||||
it_tab[3][i] = rotl (t, 24);
|
||||
}
|
||||
}
|
||||
|
||||
#define star_x(x) (((x) & 0x7f7f7f7f) << 1) ^ ((((x) & 0x80808080) >> 7) * 0x1b)
|
||||
|
||||
#define imix_col(y,x) \
|
||||
u = star_x(x); \
|
||||
v = star_x(u); \
|
||||
w = star_x(v); \
|
||||
t = w ^ (x); \
|
||||
(y) = u ^ v ^ w; \
|
||||
(y) ^= rotr(u ^ t, 8) ^ \
|
||||
rotr(v ^ t, 16) ^ \
|
||||
rotr(t,24)
|
||||
|
||||
/* initialise the key schedule from the user supplied key */
|
||||
|
||||
#define loop4(i) \
|
||||
{ t = rotr(t, 8); t = ls_box(t) ^ rco_tab[i]; \
|
||||
t ^= E_KEY[4 * i]; E_KEY[4 * i + 4] = t; \
|
||||
t ^= E_KEY[4 * i + 1]; E_KEY[4 * i + 5] = t; \
|
||||
t ^= E_KEY[4 * i + 2]; E_KEY[4 * i + 6] = t; \
|
||||
t ^= E_KEY[4 * i + 3]; E_KEY[4 * i + 7] = t; \
|
||||
}
|
||||
|
||||
#define loop6(i) \
|
||||
{ t = rotr(t, 8); t = ls_box(t) ^ rco_tab[i]; \
|
||||
t ^= E_KEY[6 * i]; E_KEY[6 * i + 6] = t; \
|
||||
t ^= E_KEY[6 * i + 1]; E_KEY[6 * i + 7] = t; \
|
||||
t ^= E_KEY[6 * i + 2]; E_KEY[6 * i + 8] = t; \
|
||||
t ^= E_KEY[6 * i + 3]; E_KEY[6 * i + 9] = t; \
|
||||
t ^= E_KEY[6 * i + 4]; E_KEY[6 * i + 10] = t; \
|
||||
t ^= E_KEY[6 * i + 5]; E_KEY[6 * i + 11] = t; \
|
||||
}
|
||||
|
||||
#define loop8(i) \
|
||||
{ t = rotr(t, 8); ; t = ls_box(t) ^ rco_tab[i]; \
|
||||
t ^= E_KEY[8 * i]; E_KEY[8 * i + 8] = t; \
|
||||
t ^= E_KEY[8 * i + 1]; E_KEY[8 * i + 9] = t; \
|
||||
t ^= E_KEY[8 * i + 2]; E_KEY[8 * i + 10] = t; \
|
||||
t ^= E_KEY[8 * i + 3]; E_KEY[8 * i + 11] = t; \
|
||||
t = E_KEY[8 * i + 4] ^ ls_box(t); \
|
||||
E_KEY[8 * i + 12] = t; \
|
||||
t ^= E_KEY[8 * i + 5]; E_KEY[8 * i + 13] = t; \
|
||||
t ^= E_KEY[8 * i + 6]; E_KEY[8 * i + 14] = t; \
|
||||
t ^= E_KEY[8 * i + 7]; E_KEY[8 * i + 15] = t; \
|
||||
}
|
||||
|
||||
/* Tells whether the ACE is capable to generate
|
||||
the extended key for a given key_len. */
|
||||
static inline int
|
||||
aes_hw_extkey_available(uint8_t key_len)
|
||||
{
|
||||
/* TODO: We should check the actual CPU model/stepping
|
||||
as it's possible that the capability will be
|
||||
added in the next CPU revisions. */
|
||||
if (key_len == 16)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct aes_ctx *aes_ctx_common(void *ctx)
|
||||
{
|
||||
unsigned long addr = (unsigned long)ctx;
|
||||
unsigned long align = PADLOCK_ALIGNMENT;
|
||||
|
||||
if (align <= crypto_tfm_ctx_alignment())
|
||||
align = 1;
|
||||
return (struct aes_ctx *)ALIGN(addr, align);
|
||||
}
|
||||
|
||||
static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
|
||||
{
|
||||
return aes_ctx_common(crypto_tfm_ctx(tfm));
|
||||
}
|
||||
|
||||
static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm)
|
||||
{
|
||||
return aes_ctx_common(crypto_blkcipher_ctx(tfm));
|
||||
}
|
||||
|
||||
static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
struct aes_ctx *ctx = aes_ctx(tfm);
|
||||
const __le32 *key = (const __le32 *)in_key;
|
||||
u32 *flags = &tfm->crt_flags;
|
||||
uint32_t i, t, u, v, w;
|
||||
uint32_t P[AES_EXTENDED_KEY_SIZE];
|
||||
uint32_t rounds;
|
||||
|
||||
if (key_len % 8) {
|
||||
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ctx->key_length = key_len;
|
||||
|
||||
/*
|
||||
* If the hardware is capable of generating the extended key
|
||||
* itself we must supply the plain key for both encryption
|
||||
* and decryption.
|
||||
*/
|
||||
ctx->D = ctx->E;
|
||||
|
||||
E_KEY[0] = le32_to_cpu(key[0]);
|
||||
E_KEY[1] = le32_to_cpu(key[1]);
|
||||
E_KEY[2] = le32_to_cpu(key[2]);
|
||||
E_KEY[3] = le32_to_cpu(key[3]);
|
||||
|
||||
/* Prepare control words. */
|
||||
memset(&ctx->cword, 0, sizeof(ctx->cword));
|
||||
|
||||
ctx->cword.decrypt.encdec = 1;
|
||||
ctx->cword.encrypt.rounds = 10 + (key_len - 16) / 4;
|
||||
ctx->cword.decrypt.rounds = ctx->cword.encrypt.rounds;
|
||||
ctx->cword.encrypt.ksize = (key_len - 16) / 8;
|
||||
ctx->cword.decrypt.ksize = ctx->cword.encrypt.ksize;
|
||||
|
||||
/* Don't generate extended keys if the hardware can do it. */
|
||||
if (aes_hw_extkey_available(key_len))
|
||||
return 0;
|
||||
|
||||
ctx->D = ctx->d_data;
|
||||
ctx->cword.encrypt.keygen = 1;
|
||||
ctx->cword.decrypt.keygen = 1;
|
||||
|
||||
switch (key_len) {
|
||||
case 16:
|
||||
t = E_KEY[3];
|
||||
for (i = 0; i < 10; ++i)
|
||||
loop4 (i);
|
||||
break;
|
||||
|
||||
case 24:
|
||||
E_KEY[4] = le32_to_cpu(key[4]);
|
||||
t = E_KEY[5] = le32_to_cpu(key[5]);
|
||||
for (i = 0; i < 8; ++i)
|
||||
loop6 (i);
|
||||
break;
|
||||
|
||||
case 32:
|
||||
E_KEY[4] = le32_to_cpu(key[4]);
|
||||
E_KEY[5] = le32_to_cpu(key[5]);
|
||||
E_KEY[6] = le32_to_cpu(key[6]);
|
||||
t = E_KEY[7] = le32_to_cpu(key[7]);
|
||||
for (i = 0; i < 7; ++i)
|
||||
loop8 (i);
|
||||
break;
|
||||
}
|
||||
|
||||
D_KEY[0] = E_KEY[0];
|
||||
D_KEY[1] = E_KEY[1];
|
||||
D_KEY[2] = E_KEY[2];
|
||||
D_KEY[3] = E_KEY[3];
|
||||
|
||||
for (i = 4; i < key_len + 24; ++i) {
|
||||
imix_col (D_KEY[i], E_KEY[i]);
|
||||
}
|
||||
|
||||
/* PadLock needs a different format of the decryption key. */
|
||||
rounds = 10 + (key_len - 16) / 4;
|
||||
|
||||
for (i = 0; i < rounds; i++) {
|
||||
P[((i + 1) * 4) + 0] = D_KEY[((rounds - i - 1) * 4) + 0];
|
||||
P[((i + 1) * 4) + 1] = D_KEY[((rounds - i - 1) * 4) + 1];
|
||||
P[((i + 1) * 4) + 2] = D_KEY[((rounds - i - 1) * 4) + 2];
|
||||
P[((i + 1) * 4) + 3] = D_KEY[((rounds - i - 1) * 4) + 3];
|
||||
}
|
||||
|
||||
P[0] = E_KEY[(rounds * 4) + 0];
|
||||
P[1] = E_KEY[(rounds * 4) + 1];
|
||||
P[2] = E_KEY[(rounds * 4) + 2];
|
||||
P[3] = E_KEY[(rounds * 4) + 3];
|
||||
|
||||
memcpy(D_KEY, P, AES_EXTENDED_KEY_SIZE_B);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* ====== Encryption/decryption routines ====== */
|
||||
|
||||
/* These are the real call to PadLock. */
|
||||
static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
|
||||
void *control_word, u32 count)
|
||||
{
|
||||
asm volatile ("pushfl; popfl"); /* enforce key reload. */
|
||||
asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
|
||||
: "+S"(input), "+D"(output)
|
||||
: "d"(control_word), "b"(key), "c"(count));
|
||||
}
|
||||
|
||||
static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
|
||||
u8 *iv, void *control_word, u32 count)
|
||||
{
|
||||
/* Enforce key reload. */
|
||||
asm volatile ("pushfl; popfl");
|
||||
/* rep xcryptcbc */
|
||||
asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"
|
||||
: "+S" (input), "+D" (output), "+a" (iv)
|
||||
: "d" (control_word), "b" (key), "c" (count));
|
||||
return iv;
|
||||
}
|
||||
|
||||
static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
||||
{
|
||||
struct aes_ctx *ctx = aes_ctx(tfm);
|
||||
padlock_xcrypt_ecb(in, out, ctx->E, &ctx->cword.encrypt, 1);
|
||||
}
|
||||
|
||||
static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
||||
{
|
||||
struct aes_ctx *ctx = aes_ctx(tfm);
|
||||
padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, 1);
|
||||
}
|
||||
|
||||
static struct crypto_alg aes_alg = {
|
||||
.cra_name = "aes",
|
||||
.cra_driver_name = "aes-padlock",
|
||||
.cra_priority = PADLOCK_CRA_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct aes_ctx),
|
||||
.cra_alignmask = PADLOCK_ALIGNMENT - 1,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
|
||||
.cra_u = {
|
||||
.cipher = {
|
||||
.cia_min_keysize = AES_MIN_KEY_SIZE,
|
||||
.cia_max_keysize = AES_MAX_KEY_SIZE,
|
||||
.cia_setkey = aes_set_key,
|
||||
.cia_encrypt = aes_encrypt,
|
||||
.cia_decrypt = aes_decrypt,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static int ecb_aes_encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
|
||||
ctx->E, &ctx->cword.encrypt,
|
||||
nbytes / AES_BLOCK_SIZE);
|
||||
nbytes &= AES_BLOCK_SIZE - 1;
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ecb_aes_decrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
|
||||
ctx->D, &ctx->cword.decrypt,
|
||||
nbytes / AES_BLOCK_SIZE);
|
||||
nbytes &= AES_BLOCK_SIZE - 1;
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct crypto_alg ecb_aes_alg = {
|
||||
.cra_name = "ecb(aes)",
|
||||
.cra_driver_name = "ecb-aes-padlock",
|
||||
.cra_priority = PADLOCK_COMPOSITE_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct aes_ctx),
|
||||
.cra_alignmask = PADLOCK_ALIGNMENT - 1,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list),
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.setkey = aes_set_key,
|
||||
.encrypt = ecb_aes_encrypt,
|
||||
.decrypt = ecb_aes_decrypt,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static int cbc_aes_encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
|
||||
walk.dst.virt.addr, ctx->E,
|
||||
walk.iv, &ctx->cword.encrypt,
|
||||
nbytes / AES_BLOCK_SIZE);
|
||||
memcpy(walk.iv, iv, AES_BLOCK_SIZE);
|
||||
nbytes &= AES_BLOCK_SIZE - 1;
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int cbc_aes_decrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
|
||||
ctx->D, walk.iv, &ctx->cword.decrypt,
|
||||
nbytes / AES_BLOCK_SIZE);
|
||||
nbytes &= AES_BLOCK_SIZE - 1;
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct crypto_alg cbc_aes_alg = {
|
||||
.cra_name = "cbc(aes)",
|
||||
.cra_driver_name = "cbc-aes-padlock",
|
||||
.cra_priority = PADLOCK_COMPOSITE_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct aes_ctx),
|
||||
.cra_alignmask = PADLOCK_ALIGNMENT - 1,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list),
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.setkey = aes_set_key,
|
||||
.encrypt = cbc_aes_encrypt,
|
||||
.decrypt = cbc_aes_decrypt,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static int __init padlock_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!cpu_has_xcrypt) {
|
||||
printk(KERN_ERR PFX "VIA PadLock not detected.\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (!cpu_has_xcrypt_enabled) {
|
||||
printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
gen_tabs();
|
||||
if ((ret = crypto_register_alg(&aes_alg)))
|
||||
goto aes_err;
|
||||
|
||||
if ((ret = crypto_register_alg(&ecb_aes_alg)))
|
||||
goto ecb_aes_err;
|
||||
|
||||
if ((ret = crypto_register_alg(&cbc_aes_alg)))
|
||||
goto cbc_aes_err;
|
||||
|
||||
printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
|
||||
|
||||
out:
|
||||
return ret;
|
||||
|
||||
cbc_aes_err:
|
||||
crypto_unregister_alg(&ecb_aes_alg);
|
||||
ecb_aes_err:
|
||||
crypto_unregister_alg(&aes_alg);
|
||||
aes_err:
|
||||
printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
static void __exit padlock_fini(void)
|
||||
{
|
||||
crypto_unregister_alg(&cbc_aes_alg);
|
||||
crypto_unregister_alg(&ecb_aes_alg);
|
||||
crypto_unregister_alg(&aes_alg);
|
||||
}
|
||||
|
||||
module_init(padlock_init);
|
||||
module_exit(padlock_fini);
|
||||
|
||||
MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Michal Ludvig");
|
||||
|
||||
MODULE_ALIAS("aes-padlock");
|
||||
318
drivers/crypto/padlock-sha.c
Normal file
318
drivers/crypto/padlock-sha.c
Normal file
@@ -0,0 +1,318 @@
|
||||
/*
|
||||
* Cryptographic API.
|
||||
*
|
||||
* Support for VIA PadLock hardware crypto engine.
|
||||
*
|
||||
* Copyright (c) 2006 Michal Ludvig <michal@logix.cz>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <crypto/algapi.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/cryptohash.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include "padlock.h"
|
||||
|
||||
#define SHA1_DEFAULT_FALLBACK "sha1-generic"
|
||||
#define SHA1_DIGEST_SIZE 20
|
||||
#define SHA1_HMAC_BLOCK_SIZE 64
|
||||
|
||||
#define SHA256_DEFAULT_FALLBACK "sha256-generic"
|
||||
#define SHA256_DIGEST_SIZE 32
|
||||
#define SHA256_HMAC_BLOCK_SIZE 64
|
||||
|
||||
struct padlock_sha_ctx {
|
||||
char *data;
|
||||
size_t used;
|
||||
int bypass;
|
||||
void (*f_sha_padlock)(const char *in, char *out, int count);
|
||||
struct hash_desc fallback;
|
||||
};
|
||||
|
||||
static inline struct padlock_sha_ctx *ctx(struct crypto_tfm *tfm)
|
||||
{
|
||||
return crypto_tfm_ctx(tfm);
|
||||
}
|
||||
|
||||
/* We'll need aligned address on the stack */
|
||||
#define NEAREST_ALIGNED(ptr) \
|
||||
((void *)ALIGN((size_t)(ptr), PADLOCK_ALIGNMENT))
|
||||
|
||||
static struct crypto_alg sha1_alg, sha256_alg;
|
||||
|
||||
static void padlock_sha_bypass(struct crypto_tfm *tfm)
|
||||
{
|
||||
if (ctx(tfm)->bypass)
|
||||
return;
|
||||
|
||||
crypto_hash_init(&ctx(tfm)->fallback);
|
||||
if (ctx(tfm)->data && ctx(tfm)->used) {
|
||||
struct scatterlist sg;
|
||||
|
||||
sg_set_buf(&sg, ctx(tfm)->data, ctx(tfm)->used);
|
||||
crypto_hash_update(&ctx(tfm)->fallback, &sg, sg.length);
|
||||
}
|
||||
|
||||
ctx(tfm)->used = 0;
|
||||
ctx(tfm)->bypass = 1;
|
||||
}
|
||||
|
||||
static void padlock_sha_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
ctx(tfm)->used = 0;
|
||||
ctx(tfm)->bypass = 0;
|
||||
}
|
||||
|
||||
static void padlock_sha_update(struct crypto_tfm *tfm,
|
||||
const uint8_t *data, unsigned int length)
|
||||
{
|
||||
/* Our buffer is always one page. */
|
||||
if (unlikely(!ctx(tfm)->bypass &&
|
||||
(ctx(tfm)->used + length > PAGE_SIZE)))
|
||||
padlock_sha_bypass(tfm);
|
||||
|
||||
if (unlikely(ctx(tfm)->bypass)) {
|
||||
struct scatterlist sg;
|
||||
sg_set_buf(&sg, (uint8_t *)data, length);
|
||||
crypto_hash_update(&ctx(tfm)->fallback, &sg, length);
|
||||
return;
|
||||
}
|
||||
|
||||
memcpy(ctx(tfm)->data + ctx(tfm)->used, data, length);
|
||||
ctx(tfm)->used += length;
|
||||
}
|
||||
|
||||
static inline void padlock_output_block(uint32_t *src,
|
||||
uint32_t *dst, size_t count)
|
||||
{
|
||||
while (count--)
|
||||
*dst++ = swab32(*src++);
|
||||
}
|
||||
|
||||
static void padlock_do_sha1(const char *in, char *out, int count)
|
||||
{
|
||||
/* We can't store directly to *out as it may be unaligned. */
|
||||
/* BTW Don't reduce the buffer size below 128 Bytes!
|
||||
* PadLock microcode needs it that big. */
|
||||
char buf[128+16];
|
||||
char *result = NEAREST_ALIGNED(buf);
|
||||
|
||||
((uint32_t *)result)[0] = 0x67452301;
|
||||
((uint32_t *)result)[1] = 0xEFCDAB89;
|
||||
((uint32_t *)result)[2] = 0x98BADCFE;
|
||||
((uint32_t *)result)[3] = 0x10325476;
|
||||
((uint32_t *)result)[4] = 0xC3D2E1F0;
|
||||
|
||||
asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
|
||||
: "+S"(in), "+D"(result)
|
||||
: "c"(count), "a"(0));
|
||||
|
||||
padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
|
||||
}
|
||||
|
||||
static void padlock_do_sha256(const char *in, char *out, int count)
|
||||
{
|
||||
/* We can't store directly to *out as it may be unaligned. */
|
||||
/* BTW Don't reduce the buffer size below 128 Bytes!
|
||||
* PadLock microcode needs it that big. */
|
||||
char buf[128+16];
|
||||
char *result = NEAREST_ALIGNED(buf);
|
||||
|
||||
((uint32_t *)result)[0] = 0x6A09E667;
|
||||
((uint32_t *)result)[1] = 0xBB67AE85;
|
||||
((uint32_t *)result)[2] = 0x3C6EF372;
|
||||
((uint32_t *)result)[3] = 0xA54FF53A;
|
||||
((uint32_t *)result)[4] = 0x510E527F;
|
||||
((uint32_t *)result)[5] = 0x9B05688C;
|
||||
((uint32_t *)result)[6] = 0x1F83D9AB;
|
||||
((uint32_t *)result)[7] = 0x5BE0CD19;
|
||||
|
||||
asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
|
||||
: "+S"(in), "+D"(result)
|
||||
: "c"(count), "a"(0));
|
||||
|
||||
padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
|
||||
}
|
||||
|
||||
static void padlock_sha_final(struct crypto_tfm *tfm, uint8_t *out)
|
||||
{
|
||||
if (unlikely(ctx(tfm)->bypass)) {
|
||||
crypto_hash_final(&ctx(tfm)->fallback, out);
|
||||
ctx(tfm)->bypass = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Pass the input buffer to PadLock microcode... */
|
||||
ctx(tfm)->f_sha_padlock(ctx(tfm)->data, out, ctx(tfm)->used);
|
||||
|
||||
ctx(tfm)->used = 0;
|
||||
}
|
||||
|
||||
static int padlock_cra_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
const char *fallback_driver_name = tfm->__crt_alg->cra_name;
|
||||
struct crypto_hash *fallback_tfm;
|
||||
|
||||
/* For now we'll allocate one page. This
|
||||
* could eventually be configurable one day. */
|
||||
ctx(tfm)->data = (char *)__get_free_page(GFP_KERNEL);
|
||||
if (!ctx(tfm)->data)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Allocate a fallback and abort if it failed. */
|
||||
fallback_tfm = crypto_alloc_hash(fallback_driver_name, 0,
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_NEED_FALLBACK);
|
||||
if (IS_ERR(fallback_tfm)) {
|
||||
printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
|
||||
fallback_driver_name);
|
||||
free_page((unsigned long)(ctx(tfm)->data));
|
||||
return PTR_ERR(fallback_tfm);
|
||||
}
|
||||
|
||||
ctx(tfm)->fallback.tfm = fallback_tfm;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int padlock_sha1_cra_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
ctx(tfm)->f_sha_padlock = padlock_do_sha1;
|
||||
|
||||
return padlock_cra_init(tfm);
|
||||
}
|
||||
|
||||
static int padlock_sha256_cra_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
ctx(tfm)->f_sha_padlock = padlock_do_sha256;
|
||||
|
||||
return padlock_cra_init(tfm);
|
||||
}
|
||||
|
||||
static void padlock_cra_exit(struct crypto_tfm *tfm)
|
||||
{
|
||||
if (ctx(tfm)->data) {
|
||||
free_page((unsigned long)(ctx(tfm)->data));
|
||||
ctx(tfm)->data = NULL;
|
||||
}
|
||||
|
||||
crypto_free_hash(ctx(tfm)->fallback.tfm);
|
||||
ctx(tfm)->fallback.tfm = NULL;
|
||||
}
|
||||
|
||||
static struct crypto_alg sha1_alg = {
|
||||
.cra_name = "sha1",
|
||||
.cra_driver_name = "sha1-padlock",
|
||||
.cra_priority = PADLOCK_CRA_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_DIGEST |
|
||||
CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_blocksize = SHA1_HMAC_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct padlock_sha_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(sha1_alg.cra_list),
|
||||
.cra_init = padlock_sha1_cra_init,
|
||||
.cra_exit = padlock_cra_exit,
|
||||
.cra_u = {
|
||||
.digest = {
|
||||
.dia_digestsize = SHA1_DIGEST_SIZE,
|
||||
.dia_init = padlock_sha_init,
|
||||
.dia_update = padlock_sha_update,
|
||||
.dia_final = padlock_sha_final,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static struct crypto_alg sha256_alg = {
|
||||
.cra_name = "sha256",
|
||||
.cra_driver_name = "sha256-padlock",
|
||||
.cra_priority = PADLOCK_CRA_PRIORITY,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_DIGEST |
|
||||
CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_blocksize = SHA256_HMAC_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct padlock_sha_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(sha256_alg.cra_list),
|
||||
.cra_init = padlock_sha256_cra_init,
|
||||
.cra_exit = padlock_cra_exit,
|
||||
.cra_u = {
|
||||
.digest = {
|
||||
.dia_digestsize = SHA256_DIGEST_SIZE,
|
||||
.dia_init = padlock_sha_init,
|
||||
.dia_update = padlock_sha_update,
|
||||
.dia_final = padlock_sha_final,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static void __init padlock_sha_check_fallbacks(void)
|
||||
{
|
||||
if (!crypto_has_hash("sha1", 0, CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_NEED_FALLBACK))
|
||||
printk(KERN_WARNING PFX
|
||||
"Couldn't load fallback module for sha1.\n");
|
||||
|
||||
if (!crypto_has_hash("sha256", 0, CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_NEED_FALLBACK))
|
||||
printk(KERN_WARNING PFX
|
||||
"Couldn't load fallback module for sha256.\n");
|
||||
}
|
||||
|
||||
static int __init padlock_init(void)
|
||||
{
|
||||
int rc = -ENODEV;
|
||||
|
||||
if (!cpu_has_phe) {
|
||||
printk(KERN_ERR PFX "VIA PadLock Hash Engine not detected.\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (!cpu_has_phe_enabled) {
|
||||
printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
padlock_sha_check_fallbacks();
|
||||
|
||||
rc = crypto_register_alg(&sha1_alg);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
rc = crypto_register_alg(&sha256_alg);
|
||||
if (rc)
|
||||
goto out_unreg1;
|
||||
|
||||
printk(KERN_NOTICE PFX "Using VIA PadLock ACE for SHA1/SHA256 algorithms.\n");
|
||||
|
||||
return 0;
|
||||
|
||||
out_unreg1:
|
||||
crypto_unregister_alg(&sha1_alg);
|
||||
out:
|
||||
printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void __exit padlock_fini(void)
|
||||
{
|
||||
crypto_unregister_alg(&sha1_alg);
|
||||
crypto_unregister_alg(&sha256_alg);
|
||||
}
|
||||
|
||||
module_init(padlock_init);
|
||||
module_exit(padlock_fini);
|
||||
|
||||
MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Michal Ludvig");
|
||||
|
||||
MODULE_ALIAS("sha1-padlock");
|
||||
MODULE_ALIAS("sha256-padlock");
|
||||
58
drivers/crypto/padlock.c
Normal file
58
drivers/crypto/padlock.c
Normal file
@@ -0,0 +1,58 @@
|
||||
/*
|
||||
* Cryptographic API.
|
||||
*
|
||||
* Support for VIA PadLock hardware crypto engine.
|
||||
*
|
||||
* Copyright (c) 2006 Michal Ludvig <michal@logix.cz>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/cryptohash.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include "padlock.h"
|
||||
|
||||
static int __init padlock_init(void)
|
||||
{
|
||||
int success = 0;
|
||||
|
||||
if (crypto_has_cipher("aes-padlock", 0, 0))
|
||||
success++;
|
||||
|
||||
if (crypto_has_hash("sha1-padlock", 0, 0))
|
||||
success++;
|
||||
|
||||
if (crypto_has_hash("sha256-padlock", 0, 0))
|
||||
success++;
|
||||
|
||||
if (!success) {
|
||||
printk(KERN_WARNING PFX "No VIA PadLock drivers have been loaded.\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
printk(KERN_NOTICE PFX "%d drivers are available.\n", success);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit padlock_fini(void)
|
||||
{
|
||||
}
|
||||
|
||||
module_init(padlock_init);
|
||||
module_exit(padlock_fini);
|
||||
|
||||
MODULE_DESCRIPTION("Load all configured PadLock algorithms.");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Michal Ludvig");
|
||||
|
||||
23
drivers/crypto/padlock.h
Normal file
23
drivers/crypto/padlock.h
Normal file
@@ -0,0 +1,23 @@
|
||||
/*
|
||||
* Driver for VIA PadLock
|
||||
*
|
||||
* Copyright (c) 2004 Michal Ludvig <michal@logix.cz>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _CRYPTO_PADLOCK_H
|
||||
#define _CRYPTO_PADLOCK_H
|
||||
|
||||
#define PADLOCK_ALIGNMENT 16
|
||||
|
||||
#define PFX "padlock: "
|
||||
|
||||
#define PADLOCK_CRA_PRIORITY 300
|
||||
#define PADLOCK_COMPOSITE_PRIORITY 400
|
||||
|
||||
#endif /* _CRYPTO_PADLOCK_H */
|
||||
Reference in New Issue
Block a user