Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Cryptographic API. | |
3 | * | |
4 | * Support for VIA PadLock hardware crypto engine. | |
5 | * | |
6 | * Copyright (c) 2004 Michal Ludvig <michal@logix.cz> | |
7 | * | |
1da177e4 LT |
8 | */ |
9 | ||
28ce728a | 10 | #include <crypto/algapi.h> |
89e12654 | 11 | #include <crypto/aes.h> |
21493088 | 12 | #include <crypto/padlock.h> |
1da177e4 LT |
13 | #include <linux/module.h> |
14 | #include <linux/init.h> | |
15 | #include <linux/types.h> | |
16 | #include <linux/errno.h> | |
1da177e4 | 17 | #include <linux/interrupt.h> |
6789b2dc | 18 | #include <linux/kernel.h> |
420a4b20 HX |
19 | #include <linux/percpu.h> |
20 | #include <linux/smp.h> | |
5a0e3ad6 | 21 | #include <linux/slab.h> |
3bd391f0 | 22 | #include <asm/cpu_device_id.h> |
1da177e4 | 23 | #include <asm/byteorder.h> |
a76c1c23 | 24 | #include <asm/processor.h> |
df6b35f4 | 25 | #include <asm/fpu/api.h> |
1da177e4 | 26 | |
8d8409f7 CE |
27 | /* |
28 | * Number of data blocks actually fetched for each xcrypt insn. | |
29 | * Processors with prefetch errata will fetch extra blocks. | |
30 | */ | |
a76c1c23 | 31 | static unsigned int ecb_fetch_blocks = 2; |
8d8409f7 | 32 | #define MAX_ECB_FETCH_BLOCKS (8) |
a76c1c23 | 33 | #define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE) |
8d8409f7 CE |
34 | |
35 | static unsigned int cbc_fetch_blocks = 1; | |
36 | #define MAX_CBC_FETCH_BLOCKS (4) | |
a76c1c23 CE |
37 | #define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE) |
38 | ||
ccc17c34 ML |
39 | /* Control word. */ |
40 | struct cword { | |
41 | unsigned int __attribute__ ((__packed__)) | |
42 | rounds:4, | |
43 | algo:3, | |
44 | keygen:1, | |
45 | interm:1, | |
46 | encdec:1, | |
47 | ksize:2; | |
48 | } __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); | |
49 | ||
cc08632f ML |
50 | /* Whenever making any changes to the following |
51 | * structure *make sure* you keep E, d_data | |
7dc748e4 SS |
52 | * and cword aligned on 16 Bytes boundaries and |
53 | * the Hardware can access 16 * 16 bytes of E and d_data | |
54 | * (only the first 15 * 16 bytes matter but the HW reads | |
55 | * more). | |
56 | */ | |
1da177e4 | 57 | struct aes_ctx { |
7dc748e4 SS |
58 | u32 E[AES_MAX_KEYLENGTH_U32] |
59 | __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); | |
60 | u32 d_data[AES_MAX_KEYLENGTH_U32] | |
61 | __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); | |
6789b2dc HX |
62 | struct { |
63 | struct cword encrypt; | |
64 | struct cword decrypt; | |
65 | } cword; | |
82062c72 | 66 | u32 *D; |
1da177e4 LT |
67 | }; |
68 | ||
390dfd95 | 69 | static DEFINE_PER_CPU(struct cword *, paes_last_cword); |
420a4b20 | 70 | |
1da177e4 LT |
71 | /* Tells whether the ACE is capable to generate |
72 | the extended key for a given key_len. */ | |
73 | static inline int | |
74 | aes_hw_extkey_available(uint8_t key_len) | |
75 | { | |
76 | /* TODO: We should check the actual CPU model/stepping | |
77 | as it's possible that the capability will be | |
78 | added in the next CPU revisions. */ | |
79 | if (key_len == 16) | |
80 | return 1; | |
81 | return 0; | |
82 | } | |
83 | ||
28ce728a | 84 | static inline struct aes_ctx *aes_ctx_common(void *ctx) |
6789b2dc | 85 | { |
28ce728a | 86 | unsigned long addr = (unsigned long)ctx; |
f10b7897 HX |
87 | unsigned long align = PADLOCK_ALIGNMENT; |
88 | ||
89 | if (align <= crypto_tfm_ctx_alignment()) | |
90 | align = 1; | |
6c2bb98b | 91 | return (struct aes_ctx *)ALIGN(addr, align); |
6789b2dc HX |
92 | } |
93 | ||
28ce728a HX |
94 | static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm) |
95 | { | |
96 | return aes_ctx_common(crypto_tfm_ctx(tfm)); | |
97 | } | |
98 | ||
99 | static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm) | |
100 | { | |
101 | return aes_ctx_common(crypto_blkcipher_ctx(tfm)); | |
102 | } | |
103 | ||
6c2bb98b | 104 | static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, |
560c06ae | 105 | unsigned int key_len) |
1da177e4 | 106 | { |
6c2bb98b | 107 | struct aes_ctx *ctx = aes_ctx(tfm); |
06ace7a9 | 108 | const __le32 *key = (const __le32 *)in_key; |
560c06ae | 109 | u32 *flags = &tfm->crt_flags; |
7dc748e4 | 110 | struct crypto_aes_ctx gen_aes; |
420a4b20 | 111 | int cpu; |
1da177e4 | 112 | |
560c06ae | 113 | if (key_len % 8) { |
1da177e4 LT |
114 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; |
115 | return -EINVAL; | |
116 | } | |
117 | ||
6789b2dc HX |
118 | /* |
119 | * If the hardware is capable of generating the extended key | |
120 | * itself we must supply the plain key for both encryption | |
121 | * and decryption. | |
122 | */ | |
82062c72 | 123 | ctx->D = ctx->E; |
1da177e4 | 124 | |
7dc748e4 SS |
125 | ctx->E[0] = le32_to_cpu(key[0]); |
126 | ctx->E[1] = le32_to_cpu(key[1]); | |
127 | ctx->E[2] = le32_to_cpu(key[2]); | |
128 | ctx->E[3] = le32_to_cpu(key[3]); | |
1da177e4 | 129 | |
6789b2dc HX |
130 | /* Prepare control words. */ |
131 | memset(&ctx->cword, 0, sizeof(ctx->cword)); | |
132 | ||
133 | ctx->cword.decrypt.encdec = 1; | |
134 | ctx->cword.encrypt.rounds = 10 + (key_len - 16) / 4; | |
135 | ctx->cword.decrypt.rounds = ctx->cword.encrypt.rounds; | |
136 | ctx->cword.encrypt.ksize = (key_len - 16) / 8; | |
137 | ctx->cword.decrypt.ksize = ctx->cword.encrypt.ksize; | |
138 | ||
1da177e4 LT |
139 | /* Don't generate extended keys if the hardware can do it. */ |
140 | if (aes_hw_extkey_available(key_len)) | |
420a4b20 | 141 | goto ok; |
1da177e4 | 142 | |
6789b2dc HX |
143 | ctx->D = ctx->d_data; |
144 | ctx->cword.encrypt.keygen = 1; | |
145 | ctx->cword.decrypt.keygen = 1; | |
146 | ||
7dc748e4 SS |
147 | if (crypto_aes_expand_key(&gen_aes, in_key, key_len)) { |
148 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | |
149 | return -EINVAL; | |
1da177e4 LT |
150 | } |
151 | ||
7dc748e4 SS |
152 | memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH); |
153 | memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH); | |
420a4b20 HX |
154 | |
155 | ok: | |
156 | for_each_online_cpu(cpu) | |
390dfd95 TH |
157 | if (&ctx->cword.encrypt == per_cpu(paes_last_cword, cpu) || |
158 | &ctx->cword.decrypt == per_cpu(paes_last_cword, cpu)) | |
159 | per_cpu(paes_last_cword, cpu) = NULL; | |
420a4b20 | 160 | |
1da177e4 LT |
161 | return 0; |
162 | } | |
163 | ||
164 | /* ====== Encryption/decryption routines ====== */ | |
165 | ||
28e8c3ad | 166 | /* These are the real call to PadLock. */ |
420a4b20 HX |
167 | static inline void padlock_reset_key(struct cword *cword) |
168 | { | |
169 | int cpu = raw_smp_processor_id(); | |
170 | ||
390dfd95 | 171 | if (cword != per_cpu(paes_last_cword, cpu)) |
d1c8b0a7 | 172 | #ifndef CONFIG_X86_64 |
420a4b20 | 173 | asm volatile ("pushfl; popfl"); |
d1c8b0a7 SAS |
174 | #else |
175 | asm volatile ("pushfq; popfq"); | |
176 | #endif | |
420a4b20 HX |
177 | } |
178 | ||
179 | static inline void padlock_store_cword(struct cword *cword) | |
866cd902 | 180 | { |
390dfd95 | 181 | per_cpu(paes_last_cword, raw_smp_processor_id()) = cword; |
866cd902 HX |
182 | } |
183 | ||
e4914012 SS |
184 | /* |
185 | * While the padlock instructions don't use FP/SSE registers, they | |
186 | * generate a spurious DNA fault when cr0.ts is '1'. These instructions | |
187 | * should be used only inside the irq_ts_save/restore() context | |
188 | */ | |
189 | ||
8d8409f7 | 190 | static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key, |
a76c1c23 | 191 | struct cword *control_word, int count) |
d4a7dd8e HX |
192 | { |
193 | asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ | |
194 | : "+S"(input), "+D"(output) | |
a76c1c23 | 195 | : "d"(control_word), "b"(key), "c"(count)); |
d4a7dd8e HX |
196 | } |
197 | ||
8d8409f7 CE |
198 | static inline u8 *rep_xcrypt_cbc(const u8 *input, u8 *output, void *key, |
199 | u8 *iv, struct cword *control_word, int count) | |
200 | { | |
201 | asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ | |
202 | : "+S" (input), "+D" (output), "+a" (iv) | |
203 | : "d" (control_word), "b" (key), "c" (count)); | |
204 | return iv; | |
205 | } | |
206 | ||
207 | static void ecb_crypt_copy(const u8 *in, u8 *out, u32 *key, | |
a76c1c23 | 208 | struct cword *cword, int count) |
d4a7dd8e | 209 | { |
a76c1c23 CE |
210 | /* |
211 | * Padlock prefetches extra data so we must provide mapped input buffers. | |
212 | * Assume there are at least 16 bytes of stack already in use. | |
213 | */ | |
8d8409f7 | 214 | u8 buf[AES_BLOCK_SIZE * (MAX_ECB_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1]; |
490fe3f0 | 215 | u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); |
d4a7dd8e | 216 | |
a76c1c23 | 217 | memcpy(tmp, in, count * AES_BLOCK_SIZE); |
8d8409f7 | 218 | rep_xcrypt_ecb(tmp, out, key, cword, count); |
d4a7dd8e HX |
219 | } |
220 | ||
8d8409f7 CE |
221 | static u8 *cbc_crypt_copy(const u8 *in, u8 *out, u32 *key, |
222 | u8 *iv, struct cword *cword, int count) | |
223 | { | |
224 | /* | |
225 | * Padlock prefetches extra data so we must provide mapped input buffers. | |
226 | * Assume there are at least 16 bytes of stack already in use. | |
227 | */ | |
228 | u8 buf[AES_BLOCK_SIZE * (MAX_CBC_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1]; | |
229 | u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); | |
230 | ||
231 | memcpy(tmp, in, count * AES_BLOCK_SIZE); | |
232 | return rep_xcrypt_cbc(tmp, out, key, iv, cword, count); | |
233 | } | |
234 | ||
235 | static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key, | |
a76c1c23 | 236 | struct cword *cword, int count) |
d4a7dd8e | 237 | { |
a76c1c23 CE |
238 | /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data. |
239 | * We could avoid some copying here but it's probably not worth it. | |
240 | */ | |
1d4bbc5a | 241 | if (unlikely(offset_in_page(in) + ecb_fetch_bytes > PAGE_SIZE)) { |
8d8409f7 | 242 | ecb_crypt_copy(in, out, key, cword, count); |
d4a7dd8e HX |
243 | return; |
244 | } | |
245 | ||
8d8409f7 CE |
246 | rep_xcrypt_ecb(in, out, key, cword, count); |
247 | } | |
248 | ||
249 | static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key, | |
250 | u8 *iv, struct cword *cword, int count) | |
251 | { | |
252 | /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */ | |
1d4bbc5a | 253 | if (unlikely(offset_in_page(in) + cbc_fetch_bytes > PAGE_SIZE)) |
8d8409f7 CE |
254 | return cbc_crypt_copy(in, out, key, iv, cword, count); |
255 | ||
256 | return rep_xcrypt_cbc(in, out, key, iv, cword, count); | |
d4a7dd8e HX |
257 | } |
258 | ||
6789b2dc HX |
259 | static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, |
260 | void *control_word, u32 count) | |
1da177e4 | 261 | { |
a76c1c23 CE |
262 | u32 initial = count & (ecb_fetch_blocks - 1); |
263 | ||
264 | if (count < ecb_fetch_blocks) { | |
8d8409f7 | 265 | ecb_crypt(input, output, key, control_word, count); |
d4a7dd8e HX |
266 | return; |
267 | } | |
268 | ||
a76c1c23 CE |
269 | if (initial) |
270 | asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ | |
271 | : "+S"(input), "+D"(output) | |
272 | : "d"(control_word), "b"(key), "c"(initial)); | |
273 | ||
274 | asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ | |
1da177e4 | 275 | : "+S"(input), "+D"(output) |
a76c1c23 | 276 | : "d"(control_word), "b"(key), "c"(count - initial)); |
1da177e4 LT |
277 | } |
278 | ||
476df259 HX |
279 | static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, |
280 | u8 *iv, void *control_word, u32 count) | |
28e8c3ad | 281 | { |
8d8409f7 CE |
282 | u32 initial = count & (cbc_fetch_blocks - 1); |
283 | ||
284 | if (count < cbc_fetch_blocks) | |
285 | return cbc_crypt(input, output, key, iv, control_word, count); | |
286 | ||
287 | if (initial) | |
288 | asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ | |
289 | : "+S" (input), "+D" (output), "+a" (iv) | |
c054a076 | 290 | : "d" (control_word), "b" (key), "c" (initial)); |
8d8409f7 CE |
291 | |
292 | asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ | |
28e8c3ad | 293 | : "+S" (input), "+D" (output), "+a" (iv) |
8d8409f7 | 294 | : "d" (control_word), "b" (key), "c" (count-initial)); |
476df259 | 295 | return iv; |
28e8c3ad HX |
296 | } |
297 | ||
6c2bb98b | 298 | static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) |
1da177e4 | 299 | { |
6c2bb98b | 300 | struct aes_ctx *ctx = aes_ctx(tfm); |
e4914012 | 301 | int ts_state; |
e4914012 | 302 | |
420a4b20 | 303 | padlock_reset_key(&ctx->cword.encrypt); |
e4914012 | 304 | ts_state = irq_ts_save(); |
8d8409f7 | 305 | ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1); |
e4914012 | 306 | irq_ts_restore(ts_state); |
420a4b20 | 307 | padlock_store_cword(&ctx->cword.encrypt); |
1da177e4 LT |
308 | } |
309 | ||
6c2bb98b | 310 | static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) |
1da177e4 | 311 | { |
6c2bb98b | 312 | struct aes_ctx *ctx = aes_ctx(tfm); |
e4914012 | 313 | int ts_state; |
e4914012 | 314 | |
420a4b20 | 315 | padlock_reset_key(&ctx->cword.encrypt); |
e4914012 | 316 | ts_state = irq_ts_save(); |
8d8409f7 | 317 | ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1); |
e4914012 | 318 | irq_ts_restore(ts_state); |
420a4b20 | 319 | padlock_store_cword(&ctx->cword.encrypt); |
1da177e4 LT |
320 | } |
321 | ||
322 | static struct crypto_alg aes_alg = { | |
323 | .cra_name = "aes", | |
c8a19c91 | 324 | .cra_driver_name = "aes-padlock", |
ccc17c34 | 325 | .cra_priority = PADLOCK_CRA_PRIORITY, |
1da177e4 LT |
326 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, |
327 | .cra_blocksize = AES_BLOCK_SIZE, | |
fbdae9f3 | 328 | .cra_ctxsize = sizeof(struct aes_ctx), |
6789b2dc | 329 | .cra_alignmask = PADLOCK_ALIGNMENT - 1, |
1da177e4 | 330 | .cra_module = THIS_MODULE, |
1da177e4 LT |
331 | .cra_u = { |
332 | .cipher = { | |
333 | .cia_min_keysize = AES_MIN_KEY_SIZE, | |
334 | .cia_max_keysize = AES_MAX_KEY_SIZE, | |
335 | .cia_setkey = aes_set_key, | |
336 | .cia_encrypt = aes_encrypt, | |
28e8c3ad | 337 | .cia_decrypt = aes_decrypt, |
1da177e4 LT |
338 | } |
339 | } | |
340 | }; | |
341 | ||
28ce728a HX |
342 | static int ecb_aes_encrypt(struct blkcipher_desc *desc, |
343 | struct scatterlist *dst, struct scatterlist *src, | |
344 | unsigned int nbytes) | |
345 | { | |
346 | struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); | |
347 | struct blkcipher_walk walk; | |
348 | int err; | |
e4914012 | 349 | int ts_state; |
28ce728a | 350 | |
420a4b20 | 351 | padlock_reset_key(&ctx->cword.encrypt); |
866cd902 | 352 | |
28ce728a HX |
353 | blkcipher_walk_init(&walk, dst, src, nbytes); |
354 | err = blkcipher_walk_virt(desc, &walk); | |
355 | ||
e4914012 | 356 | ts_state = irq_ts_save(); |
28ce728a HX |
357 | while ((nbytes = walk.nbytes)) { |
358 | padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, | |
359 | ctx->E, &ctx->cword.encrypt, | |
360 | nbytes / AES_BLOCK_SIZE); | |
361 | nbytes &= AES_BLOCK_SIZE - 1; | |
362 | err = blkcipher_walk_done(desc, &walk, nbytes); | |
363 | } | |
e4914012 | 364 | irq_ts_restore(ts_state); |
28ce728a | 365 | |
420a4b20 HX |
366 | padlock_store_cword(&ctx->cword.encrypt); |
367 | ||
28ce728a HX |
368 | return err; |
369 | } | |
370 | ||
371 | static int ecb_aes_decrypt(struct blkcipher_desc *desc, | |
372 | struct scatterlist *dst, struct scatterlist *src, | |
373 | unsigned int nbytes) | |
374 | { | |
375 | struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); | |
376 | struct blkcipher_walk walk; | |
377 | int err; | |
e4914012 | 378 | int ts_state; |
28ce728a | 379 | |
420a4b20 | 380 | padlock_reset_key(&ctx->cword.decrypt); |
866cd902 | 381 | |
28ce728a HX |
382 | blkcipher_walk_init(&walk, dst, src, nbytes); |
383 | err = blkcipher_walk_virt(desc, &walk); | |
384 | ||
e4914012 | 385 | ts_state = irq_ts_save(); |
28ce728a HX |
386 | while ((nbytes = walk.nbytes)) { |
387 | padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, | |
388 | ctx->D, &ctx->cword.decrypt, | |
389 | nbytes / AES_BLOCK_SIZE); | |
390 | nbytes &= AES_BLOCK_SIZE - 1; | |
391 | err = blkcipher_walk_done(desc, &walk, nbytes); | |
392 | } | |
e4914012 | 393 | irq_ts_restore(ts_state); |
420a4b20 HX |
394 | |
395 | padlock_store_cword(&ctx->cword.encrypt); | |
396 | ||
28ce728a HX |
397 | return err; |
398 | } | |
399 | ||
400 | static struct crypto_alg ecb_aes_alg = { | |
401 | .cra_name = "ecb(aes)", | |
402 | .cra_driver_name = "ecb-aes-padlock", | |
403 | .cra_priority = PADLOCK_COMPOSITE_PRIORITY, | |
404 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, | |
405 | .cra_blocksize = AES_BLOCK_SIZE, | |
406 | .cra_ctxsize = sizeof(struct aes_ctx), | |
407 | .cra_alignmask = PADLOCK_ALIGNMENT - 1, | |
408 | .cra_type = &crypto_blkcipher_type, | |
409 | .cra_module = THIS_MODULE, | |
28ce728a HX |
410 | .cra_u = { |
411 | .blkcipher = { | |
412 | .min_keysize = AES_MIN_KEY_SIZE, | |
413 | .max_keysize = AES_MAX_KEY_SIZE, | |
414 | .setkey = aes_set_key, | |
415 | .encrypt = ecb_aes_encrypt, | |
416 | .decrypt = ecb_aes_decrypt, | |
417 | } | |
418 | } | |
419 | }; | |
420 | ||
421 | static int cbc_aes_encrypt(struct blkcipher_desc *desc, | |
422 | struct scatterlist *dst, struct scatterlist *src, | |
423 | unsigned int nbytes) | |
424 | { | |
425 | struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); | |
426 | struct blkcipher_walk walk; | |
427 | int err; | |
e4914012 | 428 | int ts_state; |
28ce728a | 429 | |
420a4b20 | 430 | padlock_reset_key(&ctx->cword.encrypt); |
866cd902 | 431 | |
28ce728a HX |
432 | blkcipher_walk_init(&walk, dst, src, nbytes); |
433 | err = blkcipher_walk_virt(desc, &walk); | |
434 | ||
e4914012 | 435 | ts_state = irq_ts_save(); |
28ce728a HX |
436 | while ((nbytes = walk.nbytes)) { |
437 | u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr, | |
438 | walk.dst.virt.addr, ctx->E, | |
439 | walk.iv, &ctx->cword.encrypt, | |
440 | nbytes / AES_BLOCK_SIZE); | |
441 | memcpy(walk.iv, iv, AES_BLOCK_SIZE); | |
442 | nbytes &= AES_BLOCK_SIZE - 1; | |
443 | err = blkcipher_walk_done(desc, &walk, nbytes); | |
444 | } | |
e4914012 | 445 | irq_ts_restore(ts_state); |
28ce728a | 446 | |
420a4b20 HX |
447 | padlock_store_cword(&ctx->cword.decrypt); |
448 | ||
28ce728a HX |
449 | return err; |
450 | } | |
451 | ||
452 | static int cbc_aes_decrypt(struct blkcipher_desc *desc, | |
453 | struct scatterlist *dst, struct scatterlist *src, | |
454 | unsigned int nbytes) | |
455 | { | |
456 | struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); | |
457 | struct blkcipher_walk walk; | |
458 | int err; | |
e4914012 | 459 | int ts_state; |
28ce728a | 460 | |
420a4b20 | 461 | padlock_reset_key(&ctx->cword.encrypt); |
866cd902 | 462 | |
28ce728a HX |
463 | blkcipher_walk_init(&walk, dst, src, nbytes); |
464 | err = blkcipher_walk_virt(desc, &walk); | |
465 | ||
e4914012 | 466 | ts_state = irq_ts_save(); |
28ce728a HX |
467 | while ((nbytes = walk.nbytes)) { |
468 | padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr, | |
469 | ctx->D, walk.iv, &ctx->cword.decrypt, | |
470 | nbytes / AES_BLOCK_SIZE); | |
471 | nbytes &= AES_BLOCK_SIZE - 1; | |
472 | err = blkcipher_walk_done(desc, &walk, nbytes); | |
473 | } | |
474 | ||
e4914012 | 475 | irq_ts_restore(ts_state); |
420a4b20 HX |
476 | |
477 | padlock_store_cword(&ctx->cword.encrypt); | |
478 | ||
28ce728a HX |
479 | return err; |
480 | } | |
481 | ||
482 | static struct crypto_alg cbc_aes_alg = { | |
483 | .cra_name = "cbc(aes)", | |
484 | .cra_driver_name = "cbc-aes-padlock", | |
485 | .cra_priority = PADLOCK_COMPOSITE_PRIORITY, | |
486 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, | |
487 | .cra_blocksize = AES_BLOCK_SIZE, | |
488 | .cra_ctxsize = sizeof(struct aes_ctx), | |
489 | .cra_alignmask = PADLOCK_ALIGNMENT - 1, | |
490 | .cra_type = &crypto_blkcipher_type, | |
491 | .cra_module = THIS_MODULE, | |
28ce728a HX |
492 | .cra_u = { |
493 | .blkcipher = { | |
494 | .min_keysize = AES_MIN_KEY_SIZE, | |
495 | .max_keysize = AES_MAX_KEY_SIZE, | |
496 | .ivsize = AES_BLOCK_SIZE, | |
497 | .setkey = aes_set_key, | |
498 | .encrypt = cbc_aes_encrypt, | |
499 | .decrypt = cbc_aes_decrypt, | |
500 | } | |
501 | } | |
502 | }; | |
503 | ||
3bd391f0 AK |
504 | static struct x86_cpu_id padlock_cpu_id[] = { |
505 | X86_FEATURE_MATCH(X86_FEATURE_XCRYPT), | |
506 | {} | |
507 | }; | |
508 | MODULE_DEVICE_TABLE(x86cpu, padlock_cpu_id); | |
509 | ||
1191f0a4 | 510 | static int __init padlock_init(void) |
1da177e4 | 511 | { |
1191f0a4 | 512 | int ret; |
a76c1c23 | 513 | struct cpuinfo_x86 *c = &cpu_data(0); |
1191f0a4 | 514 | |
3bd391f0 | 515 | if (!x86_match_cpu(padlock_cpu_id)) |
1191f0a4 | 516 | return -ENODEV; |
1191f0a4 | 517 | |
362f924b | 518 | if (!boot_cpu_has(X86_FEATURE_XCRYPT_EN)) { |
b43e726b | 519 | printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); |
1191f0a4 ML |
520 | return -ENODEV; |
521 | } | |
1da177e4 | 522 | |
28ce728a HX |
523 | if ((ret = crypto_register_alg(&aes_alg))) |
524 | goto aes_err; | |
525 | ||
526 | if ((ret = crypto_register_alg(&ecb_aes_alg))) | |
527 | goto ecb_aes_err; | |
528 | ||
529 | if ((ret = crypto_register_alg(&cbc_aes_alg))) | |
530 | goto cbc_aes_err; | |
1191f0a4 ML |
531 | |
532 | printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); | |
533 | ||
a76c1c23 | 534 | if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) { |
8d8409f7 CE |
535 | ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS; |
536 | cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS; | |
a76c1c23 CE |
537 | printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n"); |
538 | } | |
539 | ||
28ce728a | 540 | out: |
1191f0a4 | 541 | return ret; |
28ce728a HX |
542 | |
543 | cbc_aes_err: | |
544 | crypto_unregister_alg(&ecb_aes_alg); | |
545 | ecb_aes_err: | |
546 | crypto_unregister_alg(&aes_alg); | |
547 | aes_err: | |
548 | printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n"); | |
549 | goto out; | |
1da177e4 LT |
550 | } |
551 | ||
1191f0a4 | 552 | static void __exit padlock_fini(void) |
1da177e4 | 553 | { |
28ce728a HX |
554 | crypto_unregister_alg(&cbc_aes_alg); |
555 | crypto_unregister_alg(&ecb_aes_alg); | |
1da177e4 LT |
556 | crypto_unregister_alg(&aes_alg); |
557 | } | |
1191f0a4 ML |
558 | |
559 | module_init(padlock_init); | |
560 | module_exit(padlock_fini); | |
561 | ||
562 | MODULE_DESCRIPTION("VIA PadLock AES algorithm support"); | |
563 | MODULE_LICENSE("GPL"); | |
564 | MODULE_AUTHOR("Michal Ludvig"); | |
565 | ||
5d26a105 | 566 | MODULE_ALIAS_CRYPTO("aes"); |