1 /* Copyright (C) 2004-2006, Advanced Micro Devices, Inc.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/pci.h>
12 #include <linux/pci_ids.h>
13 #include <linux/crypto.h>
14 #include <linux/spinlock.h>
15 #include <crypto/algapi.h>
16 #include <crypto/aes.h>
19 #include <asm/delay.h>
21 #include "geode-aes.h"
23 /* Static structures */
25 static void __iomem
* _iobase
;
26 static spinlock_t lock
;
28 /* Write a 128 bit field (either a writable key or IV) */
30 _writefield(u32 offset
, void *value
)
33 for(i
= 0; i
< 4; i
++)
34 iowrite32(((u32
*) value
)[i
], _iobase
+ offset
+ (i
* 4));
37 /* Read a 128 bit field (either a writable key or IV) */
39 _readfield(u32 offset
, void *value
)
42 for(i
= 0; i
< 4; i
++)
43 ((u32
*) value
)[i
] = ioread32(_iobase
+ offset
+ (i
* 4));
47 do_crypt(void *src
, void *dst
, int len
, u32 flags
)
50 u32 counter
= AES_OP_TIMEOUT
;
52 iowrite32(virt_to_phys(src
), _iobase
+ AES_SOURCEA_REG
);
53 iowrite32(virt_to_phys(dst
), _iobase
+ AES_DSTA_REG
);
54 iowrite32(len
, _iobase
+ AES_LENA_REG
);
56 /* Start the operation */
57 iowrite32(AES_CTRL_START
| flags
, _iobase
+ AES_CTRLA_REG
);
60 status
= ioread32(_iobase
+ AES_INTR_REG
);
62 } while(!(status
& AES_INTRA_PENDING
) && --counter
);
65 iowrite32((status
& 0xFF) | AES_INTRA_PENDING
, _iobase
+ AES_INTR_REG
);
66 return counter
? 0 : 1;
70 geode_aes_crypt(struct geode_aes_op
*op
)
79 /* If the source and destination is the same, then
80 * we need to turn on the coherent flags, otherwise
81 * we don't need to worry
84 flags
|= (AES_CTRL_DCA
| AES_CTRL_SCA
);
86 if (op
->dir
== AES_DIR_ENCRYPT
)
87 flags
|= AES_CTRL_ENCRYPT
;
89 /* Start the critical section */
91 spin_lock_irqsave(&lock
, iflags
);
93 if (op
->mode
== AES_MODE_CBC
) {
94 flags
|= AES_CTRL_CBC
;
95 _writefield(AES_WRITEIV0_REG
, op
->iv
);
98 if (!(op
->flags
& AES_FLAGS_HIDDENKEY
)) {
99 flags
|= AES_CTRL_WRKEY
;
100 _writefield(AES_WRITEKEY0_REG
, op
->key
);
103 ret
= do_crypt(op
->src
, op
->dst
, op
->len
, flags
);
106 if (op
->mode
== AES_MODE_CBC
)
107 _readfield(AES_WRITEIV0_REG
, op
->iv
);
109 spin_unlock_irqrestore(&lock
, iflags
);
114 /* CRYPTO-API Functions */
116 static int geode_setkey_cip(struct crypto_tfm
*tfm
, const u8
*key
,
119 struct geode_aes_op
*op
= crypto_tfm_ctx(tfm
);
124 if (len
== AES_KEYSIZE_128
) {
125 memcpy(op
->key
, key
, len
);
129 if (len
!= AES_KEYSIZE_192
&& len
!= AES_KEYSIZE_256
) {
130 /* not supported at all */
131 tfm
->crt_flags
|= CRYPTO_TFM_RES_BAD_KEY_LEN
;
136 * The requested key size is not supported by HW, do a fallback
138 op
->fallback
.blk
->base
.crt_flags
&= ~CRYPTO_TFM_REQ_MASK
;
139 op
->fallback
.blk
->base
.crt_flags
|= (tfm
->crt_flags
& CRYPTO_TFM_REQ_MASK
);
141 ret
= crypto_cipher_setkey(op
->fallback
.cip
, key
, len
);
143 tfm
->crt_flags
&= ~CRYPTO_TFM_RES_MASK
;
144 tfm
->crt_flags
|= (op
->fallback
.blk
->base
.crt_flags
& CRYPTO_TFM_RES_MASK
);
149 static int geode_setkey_blk(struct crypto_tfm
*tfm
, const u8
*key
,
152 struct geode_aes_op
*op
= crypto_tfm_ctx(tfm
);
157 if (len
== AES_KEYSIZE_128
) {
158 memcpy(op
->key
, key
, len
);
162 if (len
!= AES_KEYSIZE_192
&& len
!= AES_KEYSIZE_256
) {
163 /* not supported at all */
164 tfm
->crt_flags
|= CRYPTO_TFM_RES_BAD_KEY_LEN
;
169 * The requested key size is not supported by HW, do a fallback
171 op
->fallback
.blk
->base
.crt_flags
&= ~CRYPTO_TFM_REQ_MASK
;
172 op
->fallback
.blk
->base
.crt_flags
|= (tfm
->crt_flags
& CRYPTO_TFM_REQ_MASK
);
174 ret
= crypto_blkcipher_setkey(op
->fallback
.blk
, key
, len
);
176 tfm
->crt_flags
&= ~CRYPTO_TFM_RES_MASK
;
177 tfm
->crt_flags
|= (op
->fallback
.blk
->base
.crt_flags
& CRYPTO_TFM_RES_MASK
);
182 static int fallback_blk_dec(struct blkcipher_desc
*desc
,
183 struct scatterlist
*dst
, struct scatterlist
*src
,
187 struct crypto_blkcipher
*tfm
;
188 struct geode_aes_op
*op
= crypto_blkcipher_ctx(desc
->tfm
);
191 desc
->tfm
= op
->fallback
.blk
;
193 ret
= crypto_blkcipher_decrypt(desc
, dst
, src
, nbytes
);
198 static int fallback_blk_enc(struct blkcipher_desc
*desc
,
199 struct scatterlist
*dst
, struct scatterlist
*src
,
203 struct crypto_blkcipher
*tfm
;
204 struct geode_aes_op
*op
= crypto_blkcipher_ctx(desc
->tfm
);
207 desc
->tfm
= op
->fallback
.blk
;
209 ret
= crypto_blkcipher_encrypt(desc
, dst
, src
, nbytes
);
216 geode_encrypt(struct crypto_tfm
*tfm
, u8
*out
, const u8
*in
)
218 struct geode_aes_op
*op
= crypto_tfm_ctx(tfm
);
220 if (unlikely(op
->keylen
!= AES_KEYSIZE_128
)) {
221 crypto_cipher_encrypt_one(op
->fallback
.cip
, out
, in
);
225 op
->src
= (void *) in
;
226 op
->dst
= (void *) out
;
227 op
->mode
= AES_MODE_ECB
;
229 op
->len
= AES_MIN_BLOCK_SIZE
;
230 op
->dir
= AES_DIR_ENCRYPT
;
237 geode_decrypt(struct crypto_tfm
*tfm
, u8
*out
, const u8
*in
)
239 struct geode_aes_op
*op
= crypto_tfm_ctx(tfm
);
241 if (unlikely(op
->keylen
!= AES_KEYSIZE_128
)) {
242 crypto_cipher_decrypt_one(op
->fallback
.cip
, out
, in
);
246 op
->src
= (void *) in
;
247 op
->dst
= (void *) out
;
248 op
->mode
= AES_MODE_ECB
;
250 op
->len
= AES_MIN_BLOCK_SIZE
;
251 op
->dir
= AES_DIR_DECRYPT
;
256 static int fallback_init_cip(struct crypto_tfm
*tfm
)
258 const char *name
= tfm
->__crt_alg
->cra_name
;
259 struct geode_aes_op
*op
= crypto_tfm_ctx(tfm
);
261 op
->fallback
.cip
= crypto_alloc_cipher(name
, 0,
262 CRYPTO_ALG_ASYNC
| CRYPTO_ALG_NEED_FALLBACK
);
264 if (IS_ERR(op
->fallback
.cip
)) {
265 printk(KERN_ERR
"Error allocating fallback algo %s\n", name
);
266 return PTR_ERR(op
->fallback
.blk
);
272 static void fallback_exit_cip(struct crypto_tfm
*tfm
)
274 struct geode_aes_op
*op
= crypto_tfm_ctx(tfm
);
276 crypto_free_cipher(op
->fallback
.cip
);
277 op
->fallback
.cip
= NULL
;
280 static struct crypto_alg geode_alg
= {
282 .cra_driver_name
= "geode-aes",
285 .cra_flags
= CRYPTO_ALG_TYPE_CIPHER
|
286 CRYPTO_ALG_NEED_FALLBACK
,
287 .cra_init
= fallback_init_cip
,
288 .cra_exit
= fallback_exit_cip
,
289 .cra_blocksize
= AES_MIN_BLOCK_SIZE
,
290 .cra_ctxsize
= sizeof(struct geode_aes_op
),
291 .cra_module
= THIS_MODULE
,
292 .cra_list
= LIST_HEAD_INIT(geode_alg
.cra_list
),
295 .cia_min_keysize
= AES_MIN_KEY_SIZE
,
296 .cia_max_keysize
= AES_MAX_KEY_SIZE
,
297 .cia_setkey
= geode_setkey_cip
,
298 .cia_encrypt
= geode_encrypt
,
299 .cia_decrypt
= geode_decrypt
305 geode_cbc_decrypt(struct blkcipher_desc
*desc
,
306 struct scatterlist
*dst
, struct scatterlist
*src
,
309 struct geode_aes_op
*op
= crypto_blkcipher_ctx(desc
->tfm
);
310 struct blkcipher_walk walk
;
313 if (unlikely(op
->keylen
!= AES_KEYSIZE_128
))
314 return fallback_blk_dec(desc
, dst
, src
, nbytes
);
316 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
317 err
= blkcipher_walk_virt(desc
, &walk
);
318 memcpy(op
->iv
, walk
.iv
, AES_IV_LENGTH
);
320 while((nbytes
= walk
.nbytes
)) {
321 op
->src
= walk
.src
.virt
.addr
,
322 op
->dst
= walk
.dst
.virt
.addr
;
323 op
->mode
= AES_MODE_CBC
;
324 op
->len
= nbytes
- (nbytes
% AES_MIN_BLOCK_SIZE
);
325 op
->dir
= AES_DIR_DECRYPT
;
327 ret
= geode_aes_crypt(op
);
330 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
333 memcpy(walk
.iv
, op
->iv
, AES_IV_LENGTH
);
338 geode_cbc_encrypt(struct blkcipher_desc
*desc
,
339 struct scatterlist
*dst
, struct scatterlist
*src
,
342 struct geode_aes_op
*op
= crypto_blkcipher_ctx(desc
->tfm
);
343 struct blkcipher_walk walk
;
346 if (unlikely(op
->keylen
!= AES_KEYSIZE_128
))
347 return fallback_blk_enc(desc
, dst
, src
, nbytes
);
349 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
350 err
= blkcipher_walk_virt(desc
, &walk
);
351 memcpy(op
->iv
, walk
.iv
, AES_IV_LENGTH
);
353 while((nbytes
= walk
.nbytes
)) {
354 op
->src
= walk
.src
.virt
.addr
,
355 op
->dst
= walk
.dst
.virt
.addr
;
356 op
->mode
= AES_MODE_CBC
;
357 op
->len
= nbytes
- (nbytes
% AES_MIN_BLOCK_SIZE
);
358 op
->dir
= AES_DIR_ENCRYPT
;
360 ret
= geode_aes_crypt(op
);
362 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
365 memcpy(walk
.iv
, op
->iv
, AES_IV_LENGTH
);
369 static int fallback_init_blk(struct crypto_tfm
*tfm
)
371 const char *name
= tfm
->__crt_alg
->cra_name
;
372 struct geode_aes_op
*op
= crypto_tfm_ctx(tfm
);
374 op
->fallback
.blk
= crypto_alloc_blkcipher(name
, 0,
375 CRYPTO_ALG_ASYNC
| CRYPTO_ALG_NEED_FALLBACK
);
377 if (IS_ERR(op
->fallback
.blk
)) {
378 printk(KERN_ERR
"Error allocating fallback algo %s\n", name
);
379 return PTR_ERR(op
->fallback
.blk
);
385 static void fallback_exit_blk(struct crypto_tfm
*tfm
)
387 struct geode_aes_op
*op
= crypto_tfm_ctx(tfm
);
389 crypto_free_blkcipher(op
->fallback
.blk
);
390 op
->fallback
.blk
= NULL
;
393 static struct crypto_alg geode_cbc_alg
= {
394 .cra_name
= "cbc(aes)",
395 .cra_driver_name
= "cbc-aes-geode",
397 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
|
398 CRYPTO_ALG_NEED_FALLBACK
,
399 .cra_init
= fallback_init_blk
,
400 .cra_exit
= fallback_exit_blk
,
401 .cra_blocksize
= AES_MIN_BLOCK_SIZE
,
402 .cra_ctxsize
= sizeof(struct geode_aes_op
),
404 .cra_type
= &crypto_blkcipher_type
,
405 .cra_module
= THIS_MODULE
,
406 .cra_list
= LIST_HEAD_INIT(geode_cbc_alg
.cra_list
),
409 .min_keysize
= AES_MIN_KEY_SIZE
,
410 .max_keysize
= AES_MAX_KEY_SIZE
,
411 .setkey
= geode_setkey_blk
,
412 .encrypt
= geode_cbc_encrypt
,
413 .decrypt
= geode_cbc_decrypt
,
414 .ivsize
= AES_IV_LENGTH
,
420 geode_ecb_decrypt(struct blkcipher_desc
*desc
,
421 struct scatterlist
*dst
, struct scatterlist
*src
,
424 struct geode_aes_op
*op
= crypto_blkcipher_ctx(desc
->tfm
);
425 struct blkcipher_walk walk
;
428 if (unlikely(op
->keylen
!= AES_KEYSIZE_128
))
429 return fallback_blk_dec(desc
, dst
, src
, nbytes
);
431 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
432 err
= blkcipher_walk_virt(desc
, &walk
);
434 while((nbytes
= walk
.nbytes
)) {
435 op
->src
= walk
.src
.virt
.addr
,
436 op
->dst
= walk
.dst
.virt
.addr
;
437 op
->mode
= AES_MODE_ECB
;
438 op
->len
= nbytes
- (nbytes
% AES_MIN_BLOCK_SIZE
);
439 op
->dir
= AES_DIR_DECRYPT
;
441 ret
= geode_aes_crypt(op
);
443 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
450 geode_ecb_encrypt(struct blkcipher_desc
*desc
,
451 struct scatterlist
*dst
, struct scatterlist
*src
,
454 struct geode_aes_op
*op
= crypto_blkcipher_ctx(desc
->tfm
);
455 struct blkcipher_walk walk
;
458 if (unlikely(op
->keylen
!= AES_KEYSIZE_128
))
459 return fallback_blk_enc(desc
, dst
, src
, nbytes
);
461 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
462 err
= blkcipher_walk_virt(desc
, &walk
);
464 while((nbytes
= walk
.nbytes
)) {
465 op
->src
= walk
.src
.virt
.addr
,
466 op
->dst
= walk
.dst
.virt
.addr
;
467 op
->mode
= AES_MODE_ECB
;
468 op
->len
= nbytes
- (nbytes
% AES_MIN_BLOCK_SIZE
);
469 op
->dir
= AES_DIR_ENCRYPT
;
471 ret
= geode_aes_crypt(op
);
473 ret
= blkcipher_walk_done(desc
, &walk
, nbytes
);
479 static struct crypto_alg geode_ecb_alg
= {
480 .cra_name
= "ecb(aes)",
481 .cra_driver_name
= "ecb-aes-geode",
483 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
|
484 CRYPTO_ALG_NEED_FALLBACK
,
485 .cra_init
= fallback_init_blk
,
486 .cra_exit
= fallback_exit_blk
,
487 .cra_blocksize
= AES_MIN_BLOCK_SIZE
,
488 .cra_ctxsize
= sizeof(struct geode_aes_op
),
490 .cra_type
= &crypto_blkcipher_type
,
491 .cra_module
= THIS_MODULE
,
492 .cra_list
= LIST_HEAD_INIT(geode_ecb_alg
.cra_list
),
495 .min_keysize
= AES_MIN_KEY_SIZE
,
496 .max_keysize
= AES_MAX_KEY_SIZE
,
497 .setkey
= geode_setkey_blk
,
498 .encrypt
= geode_ecb_encrypt
,
499 .decrypt
= geode_ecb_decrypt
,
505 geode_aes_remove(struct pci_dev
*dev
)
507 crypto_unregister_alg(&geode_alg
);
508 crypto_unregister_alg(&geode_ecb_alg
);
509 crypto_unregister_alg(&geode_cbc_alg
);
511 pci_iounmap(dev
, _iobase
);
514 pci_release_regions(dev
);
515 pci_disable_device(dev
);
520 geode_aes_probe(struct pci_dev
*dev
, const struct pci_device_id
*id
)
524 if ((ret
= pci_enable_device(dev
)))
527 if ((ret
= pci_request_regions(dev
, "geode-aes")))
530 _iobase
= pci_iomap(dev
, 0, 0);
532 if (_iobase
== NULL
) {
537 spin_lock_init(&lock
);
539 /* Clear any pending activity */
540 iowrite32(AES_INTR_PENDING
| AES_INTR_MASK
, _iobase
+ AES_INTR_REG
);
542 if ((ret
= crypto_register_alg(&geode_alg
)))
545 if ((ret
= crypto_register_alg(&geode_ecb_alg
)))
548 if ((ret
= crypto_register_alg(&geode_cbc_alg
)))
551 printk(KERN_NOTICE
"geode-aes: GEODE AES engine enabled.\n");
555 crypto_unregister_alg(&geode_ecb_alg
);
558 crypto_unregister_alg(&geode_alg
);
561 pci_iounmap(dev
, _iobase
);
564 pci_release_regions(dev
);
567 pci_disable_device(dev
);
569 printk(KERN_ERR
"geode-aes: GEODE AES initialization failed.\n");
573 static struct pci_device_id geode_aes_tbl
[] = {
574 { PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_LX_AES
, PCI_ANY_ID
, PCI_ANY_ID
} ,
578 MODULE_DEVICE_TABLE(pci
, geode_aes_tbl
);
580 static struct pci_driver geode_aes_driver
= {
581 .name
= "Geode LX AES",
582 .id_table
= geode_aes_tbl
,
583 .probe
= geode_aes_probe
,
584 .remove
= __devexit_p(geode_aes_remove
)
590 return pci_register_driver(&geode_aes_driver
);
596 pci_unregister_driver(&geode_aes_driver
);
599 MODULE_AUTHOR("Advanced Micro Devices, Inc.");
600 MODULE_DESCRIPTION("Geode LX Hardware AES driver");
601 MODULE_LICENSE("GPL");
603 module_init(geode_aes_init
);
604 module_exit(geode_aes_exit
);