crypto: shash - Make descsize a run-time attribute
[deliverable/linux.git] / drivers / crypto / padlock-sha.c
CommitLineData
6c833275
ML
1/*
2 * Cryptographic API.
3 *
4 * Support for VIA PadLock hardware crypto engine.
5 *
6 * Copyright (c) 2006 Michal Ludvig <michal@logix.cz>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 */
14
7d024608 15#include <crypto/internal/hash.h>
5265eeb2 16#include <crypto/sha.h>
6010439f 17#include <linux/err.h>
6c833275
ML
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/errno.h>
6c833275
ML
21#include <linux/interrupt.h>
22#include <linux/kernel.h>
23#include <linux/scatterlist.h>
e4914012 24#include <asm/i387.h>
6c833275
ML
25#include "padlock.h"
26
6c833275
ML
27struct padlock_sha_ctx {
28 char *data;
29 size_t used;
30 int bypass;
31 void (*f_sha_padlock)(const char *in, char *out, int count);
7d024608 32 struct shash_desc *fallback;
6c833275
ML
33};
34
35static inline struct padlock_sha_ctx *ctx(struct crypto_tfm *tfm)
36{
6010439f 37 return crypto_tfm_ctx(tfm);
6c833275
ML
38}
39
40/* We'll need aligned address on the stack */
41#define NEAREST_ALIGNED(ptr) \
42 ((void *)ALIGN((size_t)(ptr), PADLOCK_ALIGNMENT))
43
44static struct crypto_alg sha1_alg, sha256_alg;
45
7d024608 46static int padlock_sha_bypass(struct crypto_tfm *tfm)
6c833275 47{
7d024608
HX
48 int err = 0;
49
6c833275 50 if (ctx(tfm)->bypass)
7d024608 51 goto out;
6c833275 52
7d024608
HX
53 err = crypto_shash_init(ctx(tfm)->fallback);
54 if (err)
55 goto out;
6c833275 56
7d024608
HX
57 if (ctx(tfm)->data && ctx(tfm)->used)
58 err = crypto_shash_update(ctx(tfm)->fallback, ctx(tfm)->data,
59 ctx(tfm)->used);
6c833275
ML
60
61 ctx(tfm)->used = 0;
62 ctx(tfm)->bypass = 1;
7d024608
HX
63
64out:
65 return err;
6c833275
ML
66}
67
68static void padlock_sha_init(struct crypto_tfm *tfm)
69{
70 ctx(tfm)->used = 0;
71 ctx(tfm)->bypass = 0;
72}
73
74static void padlock_sha_update(struct crypto_tfm *tfm,
75 const uint8_t *data, unsigned int length)
76{
7d024608
HX
77 int err;
78
6c833275
ML
79 /* Our buffer is always one page. */
80 if (unlikely(!ctx(tfm)->bypass &&
7d024608
HX
81 (ctx(tfm)->used + length > PAGE_SIZE))) {
82 err = padlock_sha_bypass(tfm);
83 BUG_ON(err);
84 }
6c833275
ML
85
86 if (unlikely(ctx(tfm)->bypass)) {
7d024608
HX
87 err = crypto_shash_update(ctx(tfm)->fallback, data, length);
88 BUG_ON(err);
6c833275
ML
89 return;
90 }
91
92 memcpy(ctx(tfm)->data + ctx(tfm)->used, data, length);
93 ctx(tfm)->used += length;
94}
95
96static inline void padlock_output_block(uint32_t *src,
97 uint32_t *dst, size_t count)
98{
99 while (count--)
100 *dst++ = swab32(*src++);
101}
102
cb17530b 103static void padlock_do_sha1(const char *in, char *out, int count)
6c833275
ML
104{
105 /* We can't store directly to *out as it may be unaligned. */
106 /* BTW Don't reduce the buffer size below 128 Bytes!
107 * PadLock microcode needs it that big. */
108 char buf[128+16];
109 char *result = NEAREST_ALIGNED(buf);
e4914012 110 int ts_state;
6c833275 111
5265eeb2
JG
112 ((uint32_t *)result)[0] = SHA1_H0;
113 ((uint32_t *)result)[1] = SHA1_H1;
114 ((uint32_t *)result)[2] = SHA1_H2;
115 ((uint32_t *)result)[3] = SHA1_H3;
116 ((uint32_t *)result)[4] = SHA1_H4;
6c833275 117
e4914012
SS
118 /* prevent taking the spurious DNA fault with padlock. */
119 ts_state = irq_ts_save();
6c833275
ML
120 asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
121 : "+S"(in), "+D"(result)
122 : "c"(count), "a"(0));
e4914012 123 irq_ts_restore(ts_state);
6c833275
ML
124
125 padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
126}
127
cb17530b 128static void padlock_do_sha256(const char *in, char *out, int count)
6c833275
ML
129{
130 /* We can't store directly to *out as it may be unaligned. */
131 /* BTW Don't reduce the buffer size below 128 Bytes!
132 * PadLock microcode needs it that big. */
133 char buf[128+16];
134 char *result = NEAREST_ALIGNED(buf);
e4914012 135 int ts_state;
6c833275 136
5265eeb2
JG
137 ((uint32_t *)result)[0] = SHA256_H0;
138 ((uint32_t *)result)[1] = SHA256_H1;
139 ((uint32_t *)result)[2] = SHA256_H2;
140 ((uint32_t *)result)[3] = SHA256_H3;
141 ((uint32_t *)result)[4] = SHA256_H4;
142 ((uint32_t *)result)[5] = SHA256_H5;
143 ((uint32_t *)result)[6] = SHA256_H6;
144 ((uint32_t *)result)[7] = SHA256_H7;
6c833275 145
e4914012
SS
146 /* prevent taking the spurious DNA fault with padlock. */
147 ts_state = irq_ts_save();
6c833275
ML
148 asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
149 : "+S"(in), "+D"(result)
150 : "c"(count), "a"(0));
e4914012 151 irq_ts_restore(ts_state);
6c833275
ML
152
153 padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
154}
155
156static void padlock_sha_final(struct crypto_tfm *tfm, uint8_t *out)
157{
7d024608
HX
158 int err;
159
6c833275 160 if (unlikely(ctx(tfm)->bypass)) {
7d024608
HX
161 err = crypto_shash_final(ctx(tfm)->fallback, out);
162 BUG_ON(err);
6c833275
ML
163 ctx(tfm)->bypass = 0;
164 return;
165 }
166
167 /* Pass the input buffer to PadLock microcode... */
168 ctx(tfm)->f_sha_padlock(ctx(tfm)->data, out, ctx(tfm)->used);
169
170 ctx(tfm)->used = 0;
171}
172
6010439f 173static int padlock_cra_init(struct crypto_tfm *tfm)
6c833275 174{
6010439f 175 const char *fallback_driver_name = tfm->__crt_alg->cra_name;
7d024608
HX
176 struct crypto_shash *fallback_tfm;
177 int err = -ENOMEM;
6010439f 178
6c833275
ML
179 /* For now we'll allocate one page. This
180 * could eventually be configurable one day. */
181 ctx(tfm)->data = (char *)__get_free_page(GFP_KERNEL);
182 if (!ctx(tfm)->data)
7d024608 183 goto out;
6c833275
ML
184
185 /* Allocate a fallback and abort if it failed. */
7d024608
HX
186 fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
187 CRYPTO_ALG_NEED_FALLBACK);
6010439f 188 if (IS_ERR(fallback_tfm)) {
6c833275
ML
189 printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
190 fallback_driver_name);
7d024608
HX
191 err = PTR_ERR(fallback_tfm);
192 goto out_free_page;
6c833275
ML
193 }
194
7d024608
HX
195 ctx(tfm)->fallback = kmalloc(sizeof(struct shash_desc) +
196 crypto_shash_descsize(fallback_tfm),
197 GFP_KERNEL);
198 if (!ctx(tfm)->fallback)
199 goto out_free_tfm;
200
201 ctx(tfm)->fallback->tfm = fallback_tfm;
202 ctx(tfm)->fallback->flags = 0;
6c833275 203 return 0;
7d024608
HX
204
205out_free_tfm:
206 crypto_free_shash(fallback_tfm);
207out_free_page:
208 free_page((unsigned long)(ctx(tfm)->data));
209out:
210 return err;
6c833275
ML
211}
212
213static int padlock_sha1_cra_init(struct crypto_tfm *tfm)
214{
215 ctx(tfm)->f_sha_padlock = padlock_do_sha1;
216
6010439f 217 return padlock_cra_init(tfm);
6c833275
ML
218}
219
220static int padlock_sha256_cra_init(struct crypto_tfm *tfm)
221{
222 ctx(tfm)->f_sha_padlock = padlock_do_sha256;
223
6010439f 224 return padlock_cra_init(tfm);
6c833275
ML
225}
226
227static void padlock_cra_exit(struct crypto_tfm *tfm)
228{
229 if (ctx(tfm)->data) {
230 free_page((unsigned long)(ctx(tfm)->data));
231 ctx(tfm)->data = NULL;
232 }
233
7d024608
HX
234 crypto_free_shash(ctx(tfm)->fallback->tfm);
235
236 kzfree(ctx(tfm)->fallback);
6c833275
ML
237}
238
239static struct crypto_alg sha1_alg = {
240 .cra_name = "sha1",
241 .cra_driver_name = "sha1-padlock",
242 .cra_priority = PADLOCK_CRA_PRIORITY,
6010439f
HX
243 .cra_flags = CRYPTO_ALG_TYPE_DIGEST |
244 CRYPTO_ALG_NEED_FALLBACK,
5265eeb2 245 .cra_blocksize = SHA1_BLOCK_SIZE,
6c833275
ML
246 .cra_ctxsize = sizeof(struct padlock_sha_ctx),
247 .cra_module = THIS_MODULE,
248 .cra_list = LIST_HEAD_INIT(sha1_alg.cra_list),
249 .cra_init = padlock_sha1_cra_init,
250 .cra_exit = padlock_cra_exit,
251 .cra_u = {
252 .digest = {
253 .dia_digestsize = SHA1_DIGEST_SIZE,
254 .dia_init = padlock_sha_init,
255 .dia_update = padlock_sha_update,
256 .dia_final = padlock_sha_final,
257 }
258 }
259};
260
261static struct crypto_alg sha256_alg = {
262 .cra_name = "sha256",
263 .cra_driver_name = "sha256-padlock",
264 .cra_priority = PADLOCK_CRA_PRIORITY,
6010439f
HX
265 .cra_flags = CRYPTO_ALG_TYPE_DIGEST |
266 CRYPTO_ALG_NEED_FALLBACK,
5265eeb2 267 .cra_blocksize = SHA256_BLOCK_SIZE,
6c833275
ML
268 .cra_ctxsize = sizeof(struct padlock_sha_ctx),
269 .cra_module = THIS_MODULE,
270 .cra_list = LIST_HEAD_INIT(sha256_alg.cra_list),
271 .cra_init = padlock_sha256_cra_init,
272 .cra_exit = padlock_cra_exit,
273 .cra_u = {
274 .digest = {
275 .dia_digestsize = SHA256_DIGEST_SIZE,
276 .dia_init = padlock_sha_init,
277 .dia_update = padlock_sha_update,
278 .dia_final = padlock_sha_final,
279 }
280 }
281};
282
6c833275
ML
283static int __init padlock_init(void)
284{
285 int rc = -ENODEV;
286
287 if (!cpu_has_phe) {
b43e726b 288 printk(KERN_NOTICE PFX "VIA PadLock Hash Engine not detected.\n");
6c833275
ML
289 return -ENODEV;
290 }
291
292 if (!cpu_has_phe_enabled) {
b43e726b 293 printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
6c833275
ML
294 return -ENODEV;
295 }
296
6c833275
ML
297 rc = crypto_register_alg(&sha1_alg);
298 if (rc)
299 goto out;
300
301 rc = crypto_register_alg(&sha256_alg);
302 if (rc)
303 goto out_unreg1;
304
305 printk(KERN_NOTICE PFX "Using VIA PadLock ACE for SHA1/SHA256 algorithms.\n");
306
307 return 0;
308
309out_unreg1:
310 crypto_unregister_alg(&sha1_alg);
311out:
312 printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n");
313 return rc;
314}
315
316static void __exit padlock_fini(void)
317{
318 crypto_unregister_alg(&sha1_alg);
319 crypto_unregister_alg(&sha256_alg);
320}
321
322module_init(padlock_init);
323module_exit(padlock_fini);
324
325MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
326MODULE_LICENSE("GPL");
327MODULE_AUTHOR("Michal Ludvig");
328
a760a665
HX
329MODULE_ALIAS("sha1-all");
330MODULE_ALIAS("sha256-all");
6c833275
ML
331MODULE_ALIAS("sha1-padlock");
332MODULE_ALIAS("sha256-padlock");
This page took 0.312539 seconds and 5 git commands to generate.