Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid
[deliverable/linux.git] / drivers / crypto / padlock-sha.c
1 /*
2 * Cryptographic API.
3 *
4 * Support for VIA PadLock hardware crypto engine.
5 *
6 * Copyright (c) 2006 Michal Ludvig <michal@logix.cz>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 */
14
15 #include <crypto/internal/hash.h>
16 #include <crypto/sha.h>
17 #include <linux/err.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/errno.h>
21 #include <linux/interrupt.h>
22 #include <linux/kernel.h>
23 #include <linux/scatterlist.h>
24 #include <asm/i387.h>
25 #include "padlock.h"
26
27 #ifdef CONFIG_64BIT
28 #define STACK_ALIGN 16
29 #else
30 #define STACK_ALIGN 4
31 #endif
32
33 struct padlock_sha_desc {
34 struct shash_desc fallback;
35 };
36
37 struct padlock_sha_ctx {
38 struct crypto_shash *fallback;
39 };
40
41 static int padlock_sha_init(struct shash_desc *desc)
42 {
43 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
44 struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
45
46 dctx->fallback.tfm = ctx->fallback;
47 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
48 return crypto_shash_init(&dctx->fallback);
49 }
50
51 static int padlock_sha_update(struct shash_desc *desc,
52 const u8 *data, unsigned int length)
53 {
54 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
55
56 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
57 return crypto_shash_update(&dctx->fallback, data, length);
58 }
59
60 static int padlock_sha_export(struct shash_desc *desc, void *out)
61 {
62 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
63
64 return crypto_shash_export(&dctx->fallback, out);
65 }
66
67 static int padlock_sha_import(struct shash_desc *desc, const void *in)
68 {
69 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
70 struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
71
72 dctx->fallback.tfm = ctx->fallback;
73 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
74 return crypto_shash_import(&dctx->fallback, in);
75 }
76
77 static inline void padlock_output_block(uint32_t *src,
78 uint32_t *dst, size_t count)
79 {
80 while (count--)
81 *dst++ = swab32(*src++);
82 }
83
84 static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
85 unsigned int count, u8 *out)
86 {
87 /* We can't store directly to *out as it may be unaligned. */
88 /* BTW Don't reduce the buffer size below 128 Bytes!
89 * PadLock microcode needs it that big. */
90 char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
91 ((aligned(STACK_ALIGN)));
92 char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
93 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
94 struct sha1_state state;
95 unsigned int space;
96 unsigned int leftover;
97 int ts_state;
98 int err;
99
100 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
101 err = crypto_shash_export(&dctx->fallback, &state);
102 if (err)
103 goto out;
104
105 if (state.count + count > ULONG_MAX)
106 return crypto_shash_finup(&dctx->fallback, in, count, out);
107
108 leftover = ((state.count - 1) & (SHA1_BLOCK_SIZE - 1)) + 1;
109 space = SHA1_BLOCK_SIZE - leftover;
110 if (space) {
111 if (count > space) {
112 err = crypto_shash_update(&dctx->fallback, in, space) ?:
113 crypto_shash_export(&dctx->fallback, &state);
114 if (err)
115 goto out;
116 count -= space;
117 in += space;
118 } else {
119 memcpy(state.buffer + leftover, in, count);
120 in = state.buffer;
121 count += leftover;
122 state.count &= ~(SHA1_BLOCK_SIZE - 1);
123 }
124 }
125
126 memcpy(result, &state.state, SHA1_DIGEST_SIZE);
127
128 /* prevent taking the spurious DNA fault with padlock. */
129 ts_state = irq_ts_save();
130 asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
131 : \
132 : "c"((unsigned long)state.count + count), \
133 "a"((unsigned long)state.count), \
134 "S"(in), "D"(result));
135 irq_ts_restore(ts_state);
136
137 padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
138
139 out:
140 return err;
141 }
142
143 static int padlock_sha1_final(struct shash_desc *desc, u8 *out)
144 {
145 u8 buf[4];
146
147 return padlock_sha1_finup(desc, buf, 0, out);
148 }
149
150 static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
151 unsigned int count, u8 *out)
152 {
153 /* We can't store directly to *out as it may be unaligned. */
154 /* BTW Don't reduce the buffer size below 128 Bytes!
155 * PadLock microcode needs it that big. */
156 char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
157 ((aligned(STACK_ALIGN)));
158 char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
159 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
160 struct sha256_state state;
161 unsigned int space;
162 unsigned int leftover;
163 int ts_state;
164 int err;
165
166 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
167 err = crypto_shash_export(&dctx->fallback, &state);
168 if (err)
169 goto out;
170
171 if (state.count + count > ULONG_MAX)
172 return crypto_shash_finup(&dctx->fallback, in, count, out);
173
174 leftover = ((state.count - 1) & (SHA256_BLOCK_SIZE - 1)) + 1;
175 space = SHA256_BLOCK_SIZE - leftover;
176 if (space) {
177 if (count > space) {
178 err = crypto_shash_update(&dctx->fallback, in, space) ?:
179 crypto_shash_export(&dctx->fallback, &state);
180 if (err)
181 goto out;
182 count -= space;
183 in += space;
184 } else {
185 memcpy(state.buf + leftover, in, count);
186 in = state.buf;
187 count += leftover;
188 state.count &= ~(SHA1_BLOCK_SIZE - 1);
189 }
190 }
191
192 memcpy(result, &state.state, SHA256_DIGEST_SIZE);
193
194 /* prevent taking the spurious DNA fault with padlock. */
195 ts_state = irq_ts_save();
196 asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
197 : \
198 : "c"((unsigned long)state.count + count), \
199 "a"((unsigned long)state.count), \
200 "S"(in), "D"(result));
201 irq_ts_restore(ts_state);
202
203 padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
204
205 out:
206 return err;
207 }
208
209 static int padlock_sha256_final(struct shash_desc *desc, u8 *out)
210 {
211 u8 buf[4];
212
213 return padlock_sha256_finup(desc, buf, 0, out);
214 }
215
216 static int padlock_cra_init(struct crypto_tfm *tfm)
217 {
218 struct crypto_shash *hash = __crypto_shash_cast(tfm);
219 const char *fallback_driver_name = tfm->__crt_alg->cra_name;
220 struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
221 struct crypto_shash *fallback_tfm;
222 int err = -ENOMEM;
223
224 /* Allocate a fallback and abort if it failed. */
225 fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
226 CRYPTO_ALG_NEED_FALLBACK);
227 if (IS_ERR(fallback_tfm)) {
228 printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
229 fallback_driver_name);
230 err = PTR_ERR(fallback_tfm);
231 goto out;
232 }
233
234 ctx->fallback = fallback_tfm;
235 hash->descsize += crypto_shash_descsize(fallback_tfm);
236 return 0;
237
238 out:
239 return err;
240 }
241
242 static void padlock_cra_exit(struct crypto_tfm *tfm)
243 {
244 struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
245
246 crypto_free_shash(ctx->fallback);
247 }
248
249 static struct shash_alg sha1_alg = {
250 .digestsize = SHA1_DIGEST_SIZE,
251 .init = padlock_sha_init,
252 .update = padlock_sha_update,
253 .finup = padlock_sha1_finup,
254 .final = padlock_sha1_final,
255 .export = padlock_sha_export,
256 .import = padlock_sha_import,
257 .descsize = sizeof(struct padlock_sha_desc),
258 .statesize = sizeof(struct sha1_state),
259 .base = {
260 .cra_name = "sha1",
261 .cra_driver_name = "sha1-padlock",
262 .cra_priority = PADLOCK_CRA_PRIORITY,
263 .cra_flags = CRYPTO_ALG_TYPE_SHASH |
264 CRYPTO_ALG_NEED_FALLBACK,
265 .cra_blocksize = SHA1_BLOCK_SIZE,
266 .cra_ctxsize = sizeof(struct padlock_sha_ctx),
267 .cra_module = THIS_MODULE,
268 .cra_init = padlock_cra_init,
269 .cra_exit = padlock_cra_exit,
270 }
271 };
272
273 static struct shash_alg sha256_alg = {
274 .digestsize = SHA256_DIGEST_SIZE,
275 .init = padlock_sha_init,
276 .update = padlock_sha_update,
277 .finup = padlock_sha256_finup,
278 .final = padlock_sha256_final,
279 .export = padlock_sha_export,
280 .import = padlock_sha_import,
281 .descsize = sizeof(struct padlock_sha_desc),
282 .statesize = sizeof(struct sha256_state),
283 .base = {
284 .cra_name = "sha256",
285 .cra_driver_name = "sha256-padlock",
286 .cra_priority = PADLOCK_CRA_PRIORITY,
287 .cra_flags = CRYPTO_ALG_TYPE_SHASH |
288 CRYPTO_ALG_NEED_FALLBACK,
289 .cra_blocksize = SHA256_BLOCK_SIZE,
290 .cra_ctxsize = sizeof(struct padlock_sha_ctx),
291 .cra_module = THIS_MODULE,
292 .cra_init = padlock_cra_init,
293 .cra_exit = padlock_cra_exit,
294 }
295 };
296
297 static int __init padlock_init(void)
298 {
299 int rc = -ENODEV;
300
301 if (!cpu_has_phe) {
302 printk(KERN_NOTICE PFX "VIA PadLock Hash Engine not detected.\n");
303 return -ENODEV;
304 }
305
306 if (!cpu_has_phe_enabled) {
307 printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
308 return -ENODEV;
309 }
310
311 rc = crypto_register_shash(&sha1_alg);
312 if (rc)
313 goto out;
314
315 rc = crypto_register_shash(&sha256_alg);
316 if (rc)
317 goto out_unreg1;
318
319 printk(KERN_NOTICE PFX "Using VIA PadLock ACE for SHA1/SHA256 algorithms.\n");
320
321 return 0;
322
323 out_unreg1:
324 crypto_unregister_shash(&sha1_alg);
325 out:
326 printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n");
327 return rc;
328 }
329
330 static void __exit padlock_fini(void)
331 {
332 crypto_unregister_shash(&sha1_alg);
333 crypto_unregister_shash(&sha256_alg);
334 }
335
336 module_init(padlock_init);
337 module_exit(padlock_fini);
338
339 MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
340 MODULE_LICENSE("GPL");
341 MODULE_AUTHOR("Michal Ludvig");
342
343 MODULE_ALIAS("sha1-all");
344 MODULE_ALIAS("sha256-all");
345 MODULE_ALIAS("sha1-padlock");
346 MODULE_ALIAS("sha256-padlock");
This page took 0.043883 seconds and 5 git commands to generate.