Commit | Line | Data |
---|---|---|
1d6b8a6f TL |
1 | /* |
2 | * AMD Cryptographic Coprocessor (CCP) AES XTS crypto API support | |
3 | * | |
4 | * Copyright (C) 2013 Advanced Micro Devices, Inc. | |
5 | * | |
6 | * Author: Tom Lendacky <thomas.lendacky@amd.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | */ | |
12 | ||
13 | #include <linux/module.h> | |
14 | #include <linux/sched.h> | |
15 | #include <linux/delay.h> | |
16 | #include <linux/scatterlist.h> | |
17 | #include <linux/crypto.h> | |
18 | #include <crypto/algapi.h> | |
19 | #include <crypto/aes.h> | |
20 | #include <crypto/scatterwalk.h> | |
21 | ||
22 | #include "ccp-crypto.h" | |
23 | ||
24 | ||
25 | struct ccp_aes_xts_def { | |
26 | const char *name; | |
27 | const char *drv_name; | |
28 | }; | |
29 | ||
30 | static struct ccp_aes_xts_def aes_xts_algs[] = { | |
31 | { | |
32 | .name = "xts(aes)", | |
33 | .drv_name = "xts-aes-ccp", | |
34 | }, | |
35 | }; | |
36 | ||
37 | struct ccp_unit_size_map { | |
38 | unsigned int size; | |
39 | u32 value; | |
40 | }; | |
41 | ||
42 | static struct ccp_unit_size_map unit_size_map[] = { | |
43 | { | |
44 | .size = 4096, | |
45 | .value = CCP_XTS_AES_UNIT_SIZE_4096, | |
46 | }, | |
47 | { | |
48 | .size = 2048, | |
49 | .value = CCP_XTS_AES_UNIT_SIZE_2048, | |
50 | }, | |
51 | { | |
52 | .size = 1024, | |
53 | .value = CCP_XTS_AES_UNIT_SIZE_1024, | |
54 | }, | |
55 | { | |
56 | .size = 512, | |
57 | .value = CCP_XTS_AES_UNIT_SIZE_512, | |
58 | }, | |
59 | { | |
60 | .size = 256, | |
61 | .value = CCP_XTS_AES_UNIT_SIZE__LAST, | |
62 | }, | |
63 | { | |
64 | .size = 128, | |
65 | .value = CCP_XTS_AES_UNIT_SIZE__LAST, | |
66 | }, | |
67 | { | |
68 | .size = 64, | |
69 | .value = CCP_XTS_AES_UNIT_SIZE__LAST, | |
70 | }, | |
71 | { | |
72 | .size = 32, | |
73 | .value = CCP_XTS_AES_UNIT_SIZE__LAST, | |
74 | }, | |
75 | { | |
76 | .size = 16, | |
77 | .value = CCP_XTS_AES_UNIT_SIZE_16, | |
78 | }, | |
79 | { | |
80 | .size = 1, | |
81 | .value = CCP_XTS_AES_UNIT_SIZE__LAST, | |
82 | }, | |
83 | }; | |
84 | ||
85 | static int ccp_aes_xts_complete(struct crypto_async_request *async_req, int ret) | |
86 | { | |
87 | struct ablkcipher_request *req = ablkcipher_request_cast(async_req); | |
88 | struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); | |
89 | ||
90 | if (ret) | |
91 | return ret; | |
92 | ||
93 | memcpy(req->info, rctx->iv, AES_BLOCK_SIZE); | |
94 | ||
95 | return 0; | |
96 | } | |
97 | ||
98 | static int ccp_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | |
99 | unsigned int key_len) | |
100 | { | |
101 | struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm)); | |
102 | ||
103 | /* Only support 128-bit AES key with a 128-bit Tweak key, | |
104 | * otherwise use the fallback | |
105 | */ | |
106 | switch (key_len) { | |
107 | case AES_KEYSIZE_128 * 2: | |
108 | memcpy(ctx->u.aes.key, key, key_len); | |
109 | break; | |
110 | } | |
111 | ctx->u.aes.key_len = key_len / 2; | |
112 | sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len); | |
113 | ||
114 | return crypto_ablkcipher_setkey(ctx->u.aes.tfm_ablkcipher, key, | |
115 | key_len); | |
116 | } | |
117 | ||
118 | static int ccp_aes_xts_crypt(struct ablkcipher_request *req, | |
119 | unsigned int encrypt) | |
120 | { | |
121 | struct crypto_tfm *tfm = | |
122 | crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); | |
123 | struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | |
124 | struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); | |
125 | unsigned int unit; | |
126 | int ret; | |
127 | ||
128 | if (!ctx->u.aes.key_len) { | |
129 | pr_err("AES key not set\n"); | |
130 | return -EINVAL; | |
131 | } | |
132 | ||
133 | if (req->nbytes & (AES_BLOCK_SIZE - 1)) { | |
134 | pr_err("AES request size is not a multiple of the block size\n"); | |
135 | return -EINVAL; | |
136 | } | |
137 | ||
138 | if (!req->info) { | |
139 | pr_err("AES IV not supplied"); | |
140 | return -EINVAL; | |
141 | } | |
142 | ||
143 | for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) | |
144 | if (!(req->nbytes & (unit_size_map[unit].size - 1))) | |
145 | break; | |
146 | ||
147 | if ((unit_size_map[unit].value == CCP_XTS_AES_UNIT_SIZE__LAST) || | |
148 | (ctx->u.aes.key_len != AES_KEYSIZE_128)) { | |
149 | /* Use the fallback to process the request for any | |
150 | * unsupported unit sizes or key sizes | |
151 | */ | |
152 | ablkcipher_request_set_tfm(req, ctx->u.aes.tfm_ablkcipher); | |
153 | ret = (encrypt) ? crypto_ablkcipher_encrypt(req) : | |
154 | crypto_ablkcipher_decrypt(req); | |
155 | ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); | |
156 | ||
157 | return ret; | |
158 | } | |
159 | ||
160 | memcpy(rctx->iv, req->info, AES_BLOCK_SIZE); | |
161 | sg_init_one(&rctx->iv_sg, rctx->iv, AES_BLOCK_SIZE); | |
162 | ||
163 | memset(&rctx->cmd, 0, sizeof(rctx->cmd)); | |
164 | INIT_LIST_HEAD(&rctx->cmd.entry); | |
165 | rctx->cmd.engine = CCP_ENGINE_XTS_AES_128; | |
166 | rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT | |
167 | : CCP_AES_ACTION_DECRYPT; | |
168 | rctx->cmd.u.xts.unit_size = unit_size_map[unit].value; | |
169 | rctx->cmd.u.xts.key = &ctx->u.aes.key_sg; | |
170 | rctx->cmd.u.xts.key_len = ctx->u.aes.key_len; | |
171 | rctx->cmd.u.xts.iv = &rctx->iv_sg; | |
172 | rctx->cmd.u.xts.iv_len = AES_BLOCK_SIZE; | |
173 | rctx->cmd.u.xts.src = req->src; | |
174 | rctx->cmd.u.xts.src_len = req->nbytes; | |
175 | rctx->cmd.u.xts.dst = req->dst; | |
176 | ||
177 | ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); | |
178 | ||
179 | return ret; | |
180 | } | |
181 | ||
182 | static int ccp_aes_xts_encrypt(struct ablkcipher_request *req) | |
183 | { | |
184 | return ccp_aes_xts_crypt(req, 1); | |
185 | } | |
186 | ||
187 | static int ccp_aes_xts_decrypt(struct ablkcipher_request *req) | |
188 | { | |
189 | return ccp_aes_xts_crypt(req, 0); | |
190 | } | |
191 | ||
192 | static int ccp_aes_xts_cra_init(struct crypto_tfm *tfm) | |
193 | { | |
194 | struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); | |
195 | struct crypto_ablkcipher *fallback_tfm; | |
196 | ||
197 | ctx->complete = ccp_aes_xts_complete; | |
198 | ctx->u.aes.key_len = 0; | |
199 | ||
200 | fallback_tfm = crypto_alloc_ablkcipher(tfm->__crt_alg->cra_name, 0, | |
201 | CRYPTO_ALG_ASYNC | | |
202 | CRYPTO_ALG_NEED_FALLBACK); | |
203 | if (IS_ERR(fallback_tfm)) { | |
204 | pr_warn("could not load fallback driver %s\n", | |
205 | tfm->__crt_alg->cra_name); | |
206 | return PTR_ERR(fallback_tfm); | |
207 | } | |
208 | ctx->u.aes.tfm_ablkcipher = fallback_tfm; | |
209 | ||
210 | tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx) + | |
211 | fallback_tfm->base.crt_ablkcipher.reqsize; | |
212 | ||
213 | return 0; | |
214 | } | |
215 | ||
216 | static void ccp_aes_xts_cra_exit(struct crypto_tfm *tfm) | |
217 | { | |
218 | struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); | |
219 | ||
220 | if (ctx->u.aes.tfm_ablkcipher) | |
221 | crypto_free_ablkcipher(ctx->u.aes.tfm_ablkcipher); | |
222 | ctx->u.aes.tfm_ablkcipher = NULL; | |
223 | } | |
224 | ||
225 | ||
226 | static int ccp_register_aes_xts_alg(struct list_head *head, | |
227 | const struct ccp_aes_xts_def *def) | |
228 | { | |
229 | struct ccp_crypto_ablkcipher_alg *ccp_alg; | |
230 | struct crypto_alg *alg; | |
231 | int ret; | |
232 | ||
233 | ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); | |
234 | if (!ccp_alg) | |
235 | return -ENOMEM; | |
236 | ||
237 | INIT_LIST_HEAD(&ccp_alg->entry); | |
238 | ||
239 | alg = &ccp_alg->alg; | |
240 | ||
241 | snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); | |
242 | snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", | |
243 | def->drv_name); | |
244 | alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC | | |
245 | CRYPTO_ALG_KERN_DRIVER_ONLY | | |
246 | CRYPTO_ALG_NEED_FALLBACK; | |
247 | alg->cra_blocksize = AES_BLOCK_SIZE; | |
248 | alg->cra_ctxsize = sizeof(struct ccp_ctx); | |
249 | alg->cra_priority = CCP_CRA_PRIORITY; | |
250 | alg->cra_type = &crypto_ablkcipher_type; | |
251 | alg->cra_ablkcipher.setkey = ccp_aes_xts_setkey; | |
252 | alg->cra_ablkcipher.encrypt = ccp_aes_xts_encrypt; | |
253 | alg->cra_ablkcipher.decrypt = ccp_aes_xts_decrypt; | |
254 | alg->cra_ablkcipher.min_keysize = AES_MIN_KEY_SIZE * 2; | |
255 | alg->cra_ablkcipher.max_keysize = AES_MAX_KEY_SIZE * 2; | |
256 | alg->cra_ablkcipher.ivsize = AES_BLOCK_SIZE; | |
257 | alg->cra_init = ccp_aes_xts_cra_init; | |
258 | alg->cra_exit = ccp_aes_xts_cra_exit; | |
259 | alg->cra_module = THIS_MODULE; | |
260 | ||
261 | ret = crypto_register_alg(alg); | |
262 | if (ret) { | |
263 | pr_err("%s ablkcipher algorithm registration error (%d)\n", | |
264 | alg->cra_name, ret); | |
265 | kfree(ccp_alg); | |
266 | return ret; | |
267 | } | |
268 | ||
269 | list_add(&ccp_alg->entry, head); | |
270 | ||
271 | return 0; | |
272 | } | |
273 | ||
274 | int ccp_register_aes_xts_algs(struct list_head *head) | |
275 | { | |
276 | int i, ret; | |
277 | ||
278 | for (i = 0; i < ARRAY_SIZE(aes_xts_algs); i++) { | |
279 | ret = ccp_register_aes_xts_alg(head, &aes_xts_algs[i]); | |
280 | if (ret) | |
281 | return ret; | |
282 | } | |
283 | ||
284 | return 0; | |
285 | } |