Commit | Line | Data |
---|---|---|
2789c08f AW |
1 | /** |
2 | * Copyright (C) ST-Ericsson SA 2010 | |
3 | * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson. | |
4 | * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson. | |
5 | * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson. | |
6 | * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson. | |
7 | * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson. | |
8 | * Author: Andreas Westin <andreas.westin@stericsson.com> for ST-Ericsson. | |
9 | * License terms: GNU General Public License (GPL) version 2 | |
10 | */ | |
11 | ||
12 | #include <linux/clk.h> | |
13 | #include <linux/completion.h> | |
14 | #include <linux/crypto.h> | |
15 | #include <linux/dmaengine.h> | |
16 | #include <linux/err.h> | |
17 | #include <linux/errno.h> | |
18 | #include <linux/interrupt.h> | |
19 | #include <linux/io.h> | |
20 | #include <linux/irqreturn.h> | |
21 | #include <linux/klist.h> | |
22 | #include <linux/module.h> | |
23 | #include <linux/platform_device.h> | |
24 | #include <linux/regulator/consumer.h> | |
25 | #include <linux/semaphore.h> | |
26 | ||
27 | #include <crypto/aes.h> | |
28 | #include <crypto/algapi.h> | |
29 | #include <crypto/ctr.h> | |
30 | #include <crypto/des.h> | |
31 | #include <crypto/scatterwalk.h> | |
32 | ||
33 | #include <plat/ste_dma40.h> | |
34 | ||
35 | #include <mach/crypto-ux500.h> | |
36 | #include <mach/hardware.h> | |
37 | ||
38 | #include "cryp_p.h" | |
39 | #include "cryp.h" | |
40 | ||
41 | #define CRYP_MAX_KEY_SIZE 32 | |
42 | #define BYTES_PER_WORD 4 | |
43 | ||
44 | static int cryp_mode; | |
45 | static atomic_t session_id; | |
46 | ||
47 | static struct stedma40_chan_cfg *mem_to_engine; | |
48 | static struct stedma40_chan_cfg *engine_to_mem; | |
49 | ||
50 | /** | |
51 | * struct cryp_driver_data - data specific to the driver. | |
52 | * | |
53 | * @device_list: A list of registered devices to choose from. | |
54 | * @device_allocation: A semaphore initialized with number of devices. | |
55 | */ | |
56 | struct cryp_driver_data { | |
57 | struct klist device_list; | |
58 | struct semaphore device_allocation; | |
59 | }; | |
60 | ||
61 | /** | |
62 | * struct cryp_ctx - Crypto context | |
63 | * @config: Crypto mode. | |
64 | * @key[CRYP_MAX_KEY_SIZE]: Key. | |
65 | * @keylen: Length of key. | |
66 | * @iv: Pointer to initialization vector. | |
67 | * @indata: Pointer to indata. | |
68 | * @outdata: Pointer to outdata. | |
69 | * @datalen: Length of indata. | |
70 | * @outlen: Length of outdata. | |
71 | * @blocksize: Size of blocks. | |
72 | * @updated: Updated flag. | |
73 | * @dev_ctx: Device dependent context. | |
74 | * @device: Pointer to the device. | |
75 | */ | |
76 | struct cryp_ctx { | |
77 | struct cryp_config config; | |
78 | u8 key[CRYP_MAX_KEY_SIZE]; | |
79 | u32 keylen; | |
80 | u8 *iv; | |
81 | const u8 *indata; | |
82 | u8 *outdata; | |
83 | u32 datalen; | |
84 | u32 outlen; | |
85 | u32 blocksize; | |
86 | u8 updated; | |
87 | struct cryp_device_context dev_ctx; | |
88 | struct cryp_device_data *device; | |
89 | u32 session_id; | |
90 | }; | |
91 | ||
92 | static struct cryp_driver_data driver_data; | |
93 | ||
94 | /** | |
95 | * uint8p_to_uint32_be - 4*uint8 to uint32 big endian | |
96 | * @in: Data to convert. | |
97 | */ | |
98 | static inline u32 uint8p_to_uint32_be(u8 *in) | |
99 | { | |
100 | u32 *data = (u32 *)in; | |
101 | ||
102 | return cpu_to_be32p(data); | |
103 | } | |
104 | ||
105 | /** | |
106 | * swap_bits_in_byte - mirror the bits in a byte | |
107 | * @b: the byte to be mirrored | |
108 | * | |
109 | * The bits are swapped the following way: | |
110 | * Byte b include bits 0-7, nibble 1 (n1) include bits 0-3 and | |
111 | * nibble 2 (n2) bits 4-7. | |
112 | * | |
113 | * Nibble 1 (n1): | |
114 | * (The "old" (moved) bit is replaced with a zero) | |
115 | * 1. Move bit 6 and 7, 4 positions to the left. | |
116 | * 2. Move bit 3 and 5, 2 positions to the left. | |
117 | * 3. Move bit 1-4, 1 position to the left. | |
118 | * | |
119 | * Nibble 2 (n2): | |
120 | * 1. Move bit 0 and 1, 4 positions to the right. | |
121 | * 2. Move bit 2 and 4, 2 positions to the right. | |
122 | * 3. Move bit 3-6, 1 position to the right. | |
123 | * | |
124 | * Combine the two nibbles to a complete and swapped byte. | |
125 | */ | |
126 | ||
127 | static inline u8 swap_bits_in_byte(u8 b) | |
128 | { | |
129 | #define R_SHIFT_4_MASK 0xc0 /* Bits 6 and 7, right shift 4 */ | |
130 | #define R_SHIFT_2_MASK 0x28 /* (After right shift 4) Bits 3 and 5, | |
131 | right shift 2 */ | |
132 | #define R_SHIFT_1_MASK 0x1e /* (After right shift 2) Bits 1-4, | |
133 | right shift 1 */ | |
134 | #define L_SHIFT_4_MASK 0x03 /* Bits 0 and 1, left shift 4 */ | |
135 | #define L_SHIFT_2_MASK 0x14 /* (After left shift 4) Bits 2 and 4, | |
136 | left shift 2 */ | |
137 | #define L_SHIFT_1_MASK 0x78 /* (After left shift 1) Bits 3-6, | |
138 | left shift 1 */ | |
139 | ||
140 | u8 n1; | |
141 | u8 n2; | |
142 | ||
143 | /* Swap most significant nibble */ | |
144 | /* Right shift 4, bits 6 and 7 */ | |
145 | n1 = ((b & R_SHIFT_4_MASK) >> 4) | (b & ~(R_SHIFT_4_MASK >> 4)); | |
146 | /* Right shift 2, bits 3 and 5 */ | |
147 | n1 = ((n1 & R_SHIFT_2_MASK) >> 2) | (n1 & ~(R_SHIFT_2_MASK >> 2)); | |
148 | /* Right shift 1, bits 1-4 */ | |
149 | n1 = (n1 & R_SHIFT_1_MASK) >> 1; | |
150 | ||
151 | /* Swap least significant nibble */ | |
152 | /* Left shift 4, bits 0 and 1 */ | |
153 | n2 = ((b & L_SHIFT_4_MASK) << 4) | (b & ~(L_SHIFT_4_MASK << 4)); | |
154 | /* Left shift 2, bits 2 and 4 */ | |
155 | n2 = ((n2 & L_SHIFT_2_MASK) << 2) | (n2 & ~(L_SHIFT_2_MASK << 2)); | |
156 | /* Left shift 1, bits 3-6 */ | |
157 | n2 = (n2 & L_SHIFT_1_MASK) << 1; | |
158 | ||
159 | return n1 | n2; | |
160 | } | |
161 | ||
162 | static inline void swap_words_in_key_and_bits_in_byte(const u8 *in, | |
163 | u8 *out, u32 len) | |
164 | { | |
165 | unsigned int i = 0; | |
166 | int j; | |
167 | int index = 0; | |
168 | ||
169 | j = len - BYTES_PER_WORD; | |
170 | while (j >= 0) { | |
171 | for (i = 0; i < BYTES_PER_WORD; i++) { | |
172 | index = len - j - BYTES_PER_WORD + i; | |
173 | out[j + i] = | |
174 | swap_bits_in_byte(in[index]); | |
175 | } | |
176 | j -= BYTES_PER_WORD; | |
177 | } | |
178 | } | |
179 | ||
180 | static void add_session_id(struct cryp_ctx *ctx) | |
181 | { | |
182 | /* | |
183 | * We never want 0 to be a valid value, since this is the default value | |
184 | * for the software context. | |
185 | */ | |
186 | if (unlikely(atomic_inc_and_test(&session_id))) | |
187 | atomic_inc(&session_id); | |
188 | ||
189 | ctx->session_id = atomic_read(&session_id); | |
190 | } | |
191 | ||
192 | static irqreturn_t cryp_interrupt_handler(int irq, void *param) | |
193 | { | |
194 | struct cryp_ctx *ctx; | |
195 | int i; | |
196 | struct cryp_device_data *device_data; | |
197 | ||
198 | if (param == NULL) { | |
199 | BUG_ON(!param); | |
200 | return IRQ_HANDLED; | |
201 | } | |
202 | ||
203 | /* The device is coming from the one found in hw_crypt_noxts. */ | |
204 | device_data = (struct cryp_device_data *)param; | |
205 | ||
206 | ctx = device_data->current_ctx; | |
207 | ||
208 | if (ctx == NULL) { | |
209 | BUG_ON(!ctx); | |
210 | return IRQ_HANDLED; | |
211 | } | |
212 | ||
213 | dev_dbg(ctx->device->dev, "[%s] (len: %d) %s, ", __func__, ctx->outlen, | |
214 | cryp_pending_irq_src(device_data, CRYP_IRQ_SRC_OUTPUT_FIFO) ? | |
215 | "out" : "in"); | |
216 | ||
217 | if (cryp_pending_irq_src(device_data, | |
218 | CRYP_IRQ_SRC_OUTPUT_FIFO)) { | |
219 | if (ctx->outlen / ctx->blocksize > 0) { | |
220 | for (i = 0; i < ctx->blocksize / 4; i++) { | |
221 | *(ctx->outdata) = readl_relaxed( | |
222 | &device_data->base->dout); | |
223 | ctx->outdata += 4; | |
224 | ctx->outlen -= 4; | |
225 | } | |
226 | ||
227 | if (ctx->outlen == 0) { | |
228 | cryp_disable_irq_src(device_data, | |
229 | CRYP_IRQ_SRC_OUTPUT_FIFO); | |
230 | } | |
231 | } | |
232 | } else if (cryp_pending_irq_src(device_data, | |
233 | CRYP_IRQ_SRC_INPUT_FIFO)) { | |
234 | if (ctx->datalen / ctx->blocksize > 0) { | |
235 | for (i = 0 ; i < ctx->blocksize / 4; i++) { | |
236 | writel_relaxed(ctx->indata, | |
237 | &device_data->base->din); | |
238 | ctx->indata += 4; | |
239 | ctx->datalen -= 4; | |
240 | } | |
241 | ||
242 | if (ctx->datalen == 0) | |
243 | cryp_disable_irq_src(device_data, | |
244 | CRYP_IRQ_SRC_INPUT_FIFO); | |
245 | ||
246 | if (ctx->config.algomode == CRYP_ALGO_AES_XTS) { | |
247 | CRYP_PUT_BITS(&device_data->base->cr, | |
248 | CRYP_START_ENABLE, | |
249 | CRYP_CR_START_POS, | |
250 | CRYP_CR_START_MASK); | |
251 | ||
252 | cryp_wait_until_done(device_data); | |
253 | } | |
254 | } | |
255 | } | |
256 | ||
257 | return IRQ_HANDLED; | |
258 | } | |
259 | ||
260 | static int mode_is_aes(enum cryp_algo_mode mode) | |
261 | { | |
262 | return CRYP_ALGO_AES_ECB == mode || | |
263 | CRYP_ALGO_AES_CBC == mode || | |
264 | CRYP_ALGO_AES_CTR == mode || | |
265 | CRYP_ALGO_AES_XTS == mode; | |
266 | } | |
267 | ||
268 | static int cfg_iv(struct cryp_device_data *device_data, u32 left, u32 right, | |
269 | enum cryp_init_vector_index index) | |
270 | { | |
271 | struct cryp_init_vector_value vector_value; | |
272 | ||
273 | dev_dbg(device_data->dev, "[%s]", __func__); | |
274 | ||
275 | vector_value.init_value_left = left; | |
276 | vector_value.init_value_right = right; | |
277 | ||
278 | return cryp_configure_init_vector(device_data, | |
279 | index, | |
280 | vector_value); | |
281 | } | |
282 | ||
283 | static int cfg_ivs(struct cryp_device_data *device_data, struct cryp_ctx *ctx) | |
284 | { | |
285 | int i; | |
286 | int status = 0; | |
287 | int num_of_regs = ctx->blocksize / 8; | |
288 | u32 iv[AES_BLOCK_SIZE / 4]; | |
289 | ||
290 | dev_dbg(device_data->dev, "[%s]", __func__); | |
291 | ||
292 | /* | |
293 | * Since we loop on num_of_regs we need to have a check in case | |
294 | * someone provides an incorrect blocksize which would force calling | |
295 | * cfg_iv with i greater than 2 which is an error. | |
296 | */ | |
297 | if (num_of_regs > 2) { | |
298 | dev_err(device_data->dev, "[%s] Incorrect blocksize %d", | |
299 | __func__, ctx->blocksize); | |
300 | return -EINVAL; | |
301 | } | |
302 | ||
303 | for (i = 0; i < ctx->blocksize / 4; i++) | |
304 | iv[i] = uint8p_to_uint32_be(ctx->iv + i*4); | |
305 | ||
306 | for (i = 0; i < num_of_regs; i++) { | |
307 | status = cfg_iv(device_data, iv[i*2], iv[i*2+1], | |
308 | (enum cryp_init_vector_index) i); | |
309 | if (status != 0) | |
310 | return status; | |
311 | } | |
312 | return status; | |
313 | } | |
314 | ||
315 | static int set_key(struct cryp_device_data *device_data, | |
316 | u32 left_key, | |
317 | u32 right_key, | |
318 | enum cryp_key_reg_index index) | |
319 | { | |
320 | struct cryp_key_value key_value; | |
321 | int cryp_error; | |
322 | ||
323 | dev_dbg(device_data->dev, "[%s]", __func__); | |
324 | ||
325 | key_value.key_value_left = left_key; | |
326 | key_value.key_value_right = right_key; | |
327 | ||
328 | cryp_error = cryp_configure_key_values(device_data, | |
329 | index, | |
330 | key_value); | |
331 | if (cryp_error != 0) | |
332 | dev_err(device_data->dev, "[%s]: " | |
333 | "cryp_configure_key_values() failed!", __func__); | |
334 | ||
335 | return cryp_error; | |
336 | } | |
337 | ||
338 | static int cfg_keys(struct cryp_ctx *ctx) | |
339 | { | |
340 | int i; | |
341 | int num_of_regs = ctx->keylen / 8; | |
342 | u32 swapped_key[CRYP_MAX_KEY_SIZE / 4]; | |
343 | int cryp_error = 0; | |
344 | ||
345 | dev_dbg(ctx->device->dev, "[%s]", __func__); | |
346 | ||
347 | if (mode_is_aes(ctx->config.algomode)) { | |
348 | swap_words_in_key_and_bits_in_byte((u8 *)ctx->key, | |
349 | (u8 *)swapped_key, | |
350 | ctx->keylen); | |
351 | } else { | |
352 | for (i = 0; i < ctx->keylen / 4; i++) | |
353 | swapped_key[i] = uint8p_to_uint32_be(ctx->key + i*4); | |
354 | } | |
355 | ||
356 | for (i = 0; i < num_of_regs; i++) { | |
357 | cryp_error = set_key(ctx->device, | |
358 | *(((u32 *)swapped_key)+i*2), | |
359 | *(((u32 *)swapped_key)+i*2+1), | |
360 | (enum cryp_key_reg_index) i); | |
361 | ||
362 | if (cryp_error != 0) { | |
363 | dev_err(ctx->device->dev, "[%s]: set_key() failed!", | |
364 | __func__); | |
365 | return cryp_error; | |
366 | } | |
367 | } | |
368 | return cryp_error; | |
369 | } | |
370 | ||
371 | static int cryp_setup_context(struct cryp_ctx *ctx, | |
372 | struct cryp_device_data *device_data) | |
373 | { | |
374 | u32 control_register = CRYP_CR_DEFAULT; | |
375 | ||
376 | switch (cryp_mode) { | |
377 | case CRYP_MODE_INTERRUPT: | |
378 | writel_relaxed(CRYP_IMSC_DEFAULT, &device_data->base->imsc); | |
379 | break; | |
380 | ||
381 | case CRYP_MODE_DMA: | |
382 | writel_relaxed(CRYP_DMACR_DEFAULT, &device_data->base->dmacr); | |
383 | break; | |
384 | ||
385 | default: | |
386 | break; | |
387 | } | |
388 | ||
389 | if (ctx->updated == 0) { | |
390 | cryp_flush_inoutfifo(device_data); | |
391 | if (cfg_keys(ctx) != 0) { | |
392 | dev_err(ctx->device->dev, "[%s]: cfg_keys failed!", | |
393 | __func__); | |
394 | return -EINVAL; | |
395 | } | |
396 | ||
397 | if (ctx->iv && | |
398 | CRYP_ALGO_AES_ECB != ctx->config.algomode && | |
399 | CRYP_ALGO_DES_ECB != ctx->config.algomode && | |
400 | CRYP_ALGO_TDES_ECB != ctx->config.algomode) { | |
401 | if (cfg_ivs(device_data, ctx) != 0) | |
402 | return -EPERM; | |
403 | } | |
404 | ||
405 | cryp_set_configuration(device_data, &ctx->config, | |
406 | &control_register); | |
407 | add_session_id(ctx); | |
408 | } else if (ctx->updated == 1 && | |
409 | ctx->session_id != atomic_read(&session_id)) { | |
410 | cryp_flush_inoutfifo(device_data); | |
411 | cryp_restore_device_context(device_data, &ctx->dev_ctx); | |
412 | ||
413 | add_session_id(ctx); | |
414 | control_register = ctx->dev_ctx.cr; | |
415 | } else | |
416 | control_register = ctx->dev_ctx.cr; | |
417 | ||
418 | writel(control_register | | |
419 | (CRYP_CRYPEN_ENABLE << CRYP_CR_CRYPEN_POS), | |
420 | &device_data->base->cr); | |
421 | ||
422 | return 0; | |
423 | } | |
424 | ||
425 | static int cryp_get_device_data(struct cryp_ctx *ctx, | |
426 | struct cryp_device_data **device_data) | |
427 | { | |
428 | int ret; | |
429 | struct klist_iter device_iterator; | |
430 | struct klist_node *device_node; | |
431 | struct cryp_device_data *local_device_data = NULL; | |
432 | pr_debug(DEV_DBG_NAME " [%s]", __func__); | |
433 | ||
434 | /* Wait until a device is available */ | |
435 | ret = down_interruptible(&driver_data.device_allocation); | |
436 | if (ret) | |
437 | return ret; /* Interrupted */ | |
438 | ||
439 | /* Select a device */ | |
440 | klist_iter_init(&driver_data.device_list, &device_iterator); | |
441 | ||
442 | device_node = klist_next(&device_iterator); | |
443 | while (device_node) { | |
444 | local_device_data = container_of(device_node, | |
445 | struct cryp_device_data, list_node); | |
446 | spin_lock(&local_device_data->ctx_lock); | |
447 | /* current_ctx allocates a device, NULL = unallocated */ | |
448 | if (local_device_data->current_ctx) { | |
449 | device_node = klist_next(&device_iterator); | |
450 | } else { | |
451 | local_device_data->current_ctx = ctx; | |
452 | ctx->device = local_device_data; | |
453 | spin_unlock(&local_device_data->ctx_lock); | |
454 | break; | |
455 | } | |
456 | spin_unlock(&local_device_data->ctx_lock); | |
457 | } | |
458 | klist_iter_exit(&device_iterator); | |
459 | ||
460 | if (!device_node) { | |
461 | /** | |
462 | * No free device found. | |
463 | * Since we allocated a device with down_interruptible, this | |
464 | * should not be able to happen. | |
465 | * Number of available devices, which are contained in | |
466 | * device_allocation, is therefore decremented by not doing | |
467 | * an up(device_allocation). | |
468 | */ | |
469 | return -EBUSY; | |
470 | } | |
471 | ||
472 | *device_data = local_device_data; | |
473 | ||
474 | return 0; | |
475 | } | |
476 | ||
477 | static void cryp_dma_setup_channel(struct cryp_device_data *device_data, | |
478 | struct device *dev) | |
479 | { | |
480 | dma_cap_zero(device_data->dma.mask); | |
481 | dma_cap_set(DMA_SLAVE, device_data->dma.mask); | |
482 | ||
483 | device_data->dma.cfg_mem2cryp = mem_to_engine; | |
484 | device_data->dma.chan_mem2cryp = | |
485 | dma_request_channel(device_data->dma.mask, | |
486 | stedma40_filter, | |
487 | device_data->dma.cfg_mem2cryp); | |
488 | ||
489 | device_data->dma.cfg_cryp2mem = engine_to_mem; | |
490 | device_data->dma.chan_cryp2mem = | |
491 | dma_request_channel(device_data->dma.mask, | |
492 | stedma40_filter, | |
493 | device_data->dma.cfg_cryp2mem); | |
494 | ||
495 | init_completion(&device_data->dma.cryp_dma_complete); | |
496 | } | |
497 | ||
498 | static void cryp_dma_out_callback(void *data) | |
499 | { | |
500 | struct cryp_ctx *ctx = (struct cryp_ctx *) data; | |
501 | dev_dbg(ctx->device->dev, "[%s]: ", __func__); | |
502 | ||
503 | complete(&ctx->device->dma.cryp_dma_complete); | |
504 | } | |
505 | ||
506 | static int cryp_set_dma_transfer(struct cryp_ctx *ctx, | |
507 | struct scatterlist *sg, | |
508 | int len, | |
509 | enum dma_data_direction direction) | |
510 | { | |
511 | struct dma_async_tx_descriptor *desc; | |
512 | struct dma_chan *channel = NULL; | |
513 | dma_cookie_t cookie; | |
514 | ||
515 | dev_dbg(ctx->device->dev, "[%s]: ", __func__); | |
516 | ||
517 | if (unlikely(!IS_ALIGNED((u32)sg, 4))) { | |
518 | dev_err(ctx->device->dev, "[%s]: Data in sg list isn't " | |
519 | "aligned! Addr: 0x%08x", __func__, (u32)sg); | |
520 | return -EFAULT; | |
521 | } | |
522 | ||
523 | switch (direction) { | |
524 | case DMA_TO_DEVICE: | |
525 | channel = ctx->device->dma.chan_mem2cryp; | |
526 | ctx->device->dma.sg_src = sg; | |
527 | ctx->device->dma.sg_src_len = dma_map_sg(channel->device->dev, | |
528 | ctx->device->dma.sg_src, | |
529 | ctx->device->dma.nents_src, | |
530 | direction); | |
531 | ||
532 | if (!ctx->device->dma.sg_src_len) { | |
533 | dev_dbg(ctx->device->dev, | |
534 | "[%s]: Could not map the sg list (TO_DEVICE)", | |
535 | __func__); | |
536 | return -EFAULT; | |
537 | } | |
538 | ||
539 | dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer " | |
540 | "(TO_DEVICE)", __func__); | |
541 | ||
542 | desc = channel->device->device_prep_slave_sg(channel, | |
543 | ctx->device->dma.sg_src, | |
544 | ctx->device->dma.sg_src_len, | |
f7329e71 | 545 | direction, DMA_CTRL_ACK, NULL); |
2789c08f AW |
546 | break; |
547 | ||
548 | case DMA_FROM_DEVICE: | |
549 | channel = ctx->device->dma.chan_cryp2mem; | |
550 | ctx->device->dma.sg_dst = sg; | |
551 | ctx->device->dma.sg_dst_len = dma_map_sg(channel->device->dev, | |
552 | ctx->device->dma.sg_dst, | |
553 | ctx->device->dma.nents_dst, | |
554 | direction); | |
555 | ||
556 | if (!ctx->device->dma.sg_dst_len) { | |
557 | dev_dbg(ctx->device->dev, | |
558 | "[%s]: Could not map the sg list (FROM_DEVICE)", | |
559 | __func__); | |
560 | return -EFAULT; | |
561 | } | |
562 | ||
563 | dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer " | |
564 | "(FROM_DEVICE)", __func__); | |
565 | ||
566 | desc = channel->device->device_prep_slave_sg(channel, | |
567 | ctx->device->dma.sg_dst, | |
568 | ctx->device->dma.sg_dst_len, | |
569 | direction, | |
570 | DMA_CTRL_ACK | | |
f7329e71 | 571 | DMA_PREP_INTERRUPT, NULL); |
2789c08f AW |
572 | |
573 | desc->callback = cryp_dma_out_callback; | |
574 | desc->callback_param = ctx; | |
575 | break; | |
576 | ||
577 | default: | |
578 | dev_dbg(ctx->device->dev, "[%s]: Invalid DMA direction", | |
579 | __func__); | |
580 | return -EFAULT; | |
581 | } | |
582 | ||
583 | cookie = desc->tx_submit(desc); | |
584 | dma_async_issue_pending(channel); | |
585 | ||
586 | return 0; | |
587 | } | |
588 | ||
589 | static void cryp_dma_done(struct cryp_ctx *ctx) | |
590 | { | |
591 | struct dma_chan *chan; | |
592 | ||
593 | dev_dbg(ctx->device->dev, "[%s]: ", __func__); | |
594 | ||
595 | chan = ctx->device->dma.chan_mem2cryp; | |
596 | chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); | |
597 | dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_src, | |
598 | ctx->device->dma.sg_src_len, DMA_TO_DEVICE); | |
599 | ||
600 | chan = ctx->device->dma.chan_cryp2mem; | |
601 | chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); | |
602 | dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_dst, | |
603 | ctx->device->dma.sg_dst_len, DMA_FROM_DEVICE); | |
604 | } | |
605 | ||
606 | static int cryp_dma_write(struct cryp_ctx *ctx, struct scatterlist *sg, | |
607 | int len) | |
608 | { | |
609 | int error = cryp_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE); | |
610 | dev_dbg(ctx->device->dev, "[%s]: ", __func__); | |
611 | ||
612 | if (error) { | |
613 | dev_dbg(ctx->device->dev, "[%s]: cryp_set_dma_transfer() " | |
614 | "failed", __func__); | |
615 | return error; | |
616 | } | |
617 | ||
618 | return len; | |
619 | } | |
620 | ||
621 | static int cryp_dma_read(struct cryp_ctx *ctx, struct scatterlist *sg, int len) | |
622 | { | |
623 | int error = cryp_set_dma_transfer(ctx, sg, len, DMA_FROM_DEVICE); | |
624 | if (error) { | |
625 | dev_dbg(ctx->device->dev, "[%s]: cryp_set_dma_transfer() " | |
626 | "failed", __func__); | |
627 | return error; | |
628 | } | |
629 | ||
630 | return len; | |
631 | } | |
632 | ||
633 | static void cryp_polling_mode(struct cryp_ctx *ctx, | |
634 | struct cryp_device_data *device_data) | |
635 | { | |
636 | int len = ctx->blocksize / BYTES_PER_WORD; | |
637 | int remaining_length = ctx->datalen; | |
638 | u32 *indata = (u32 *)ctx->indata; | |
639 | u32 *outdata = (u32 *)ctx->outdata; | |
640 | ||
641 | while (remaining_length > 0) { | |
642 | writesl(&device_data->base->din, indata, len); | |
643 | indata += len; | |
644 | remaining_length -= (len * BYTES_PER_WORD); | |
645 | cryp_wait_until_done(device_data); | |
646 | ||
647 | readsl(&device_data->base->dout, outdata, len); | |
648 | outdata += len; | |
649 | cryp_wait_until_done(device_data); | |
650 | } | |
651 | } | |
652 | ||
653 | static int cryp_disable_power(struct device *dev, | |
654 | struct cryp_device_data *device_data, | |
655 | bool save_device_context) | |
656 | { | |
657 | int ret = 0; | |
658 | ||
659 | dev_dbg(dev, "[%s]", __func__); | |
660 | ||
661 | spin_lock(&device_data->power_state_spinlock); | |
662 | if (!device_data->power_state) | |
663 | goto out; | |
664 | ||
665 | spin_lock(&device_data->ctx_lock); | |
666 | if (save_device_context && device_data->current_ctx) { | |
667 | cryp_save_device_context(device_data, | |
668 | &device_data->current_ctx->dev_ctx, | |
669 | cryp_mode); | |
670 | device_data->restore_dev_ctx = true; | |
671 | } | |
672 | spin_unlock(&device_data->ctx_lock); | |
673 | ||
674 | clk_disable(device_data->clk); | |
675 | ret = regulator_disable(device_data->pwr_regulator); | |
676 | if (ret) | |
677 | dev_err(dev, "[%s]: " | |
678 | "regulator_disable() failed!", | |
679 | __func__); | |
680 | ||
681 | device_data->power_state = false; | |
682 | ||
683 | out: | |
684 | spin_unlock(&device_data->power_state_spinlock); | |
685 | ||
686 | return ret; | |
687 | } | |
688 | ||
689 | static int cryp_enable_power( | |
690 | struct device *dev, | |
691 | struct cryp_device_data *device_data, | |
692 | bool restore_device_context) | |
693 | { | |
694 | int ret = 0; | |
695 | ||
696 | dev_dbg(dev, "[%s]", __func__); | |
697 | ||
698 | spin_lock(&device_data->power_state_spinlock); | |
699 | if (!device_data->power_state) { | |
700 | ret = regulator_enable(device_data->pwr_regulator); | |
701 | if (ret) { | |
702 | dev_err(dev, "[%s]: regulator_enable() failed!", | |
703 | __func__); | |
704 | goto out; | |
705 | } | |
706 | ||
707 | ret = clk_enable(device_data->clk); | |
708 | if (ret) { | |
709 | dev_err(dev, "[%s]: clk_enable() failed!", | |
710 | __func__); | |
711 | regulator_disable(device_data->pwr_regulator); | |
712 | goto out; | |
713 | } | |
714 | device_data->power_state = true; | |
715 | } | |
716 | ||
717 | if (device_data->restore_dev_ctx) { | |
718 | spin_lock(&device_data->ctx_lock); | |
719 | if (restore_device_context && device_data->current_ctx) { | |
720 | device_data->restore_dev_ctx = false; | |
721 | cryp_restore_device_context(device_data, | |
722 | &device_data->current_ctx->dev_ctx); | |
723 | } | |
724 | spin_unlock(&device_data->ctx_lock); | |
725 | } | |
726 | out: | |
727 | spin_unlock(&device_data->power_state_spinlock); | |
728 | ||
729 | return ret; | |
730 | } | |
731 | ||
732 | static int hw_crypt_noxts(struct cryp_ctx *ctx, | |
733 | struct cryp_device_data *device_data) | |
734 | { | |
735 | int ret = 0; | |
736 | ||
737 | const u8 *indata = ctx->indata; | |
738 | u8 *outdata = ctx->outdata; | |
739 | u32 datalen = ctx->datalen; | |
740 | u32 outlen = datalen; | |
741 | ||
742 | pr_debug(DEV_DBG_NAME " [%s]", __func__); | |
743 | ||
744 | ctx->outlen = ctx->datalen; | |
745 | ||
746 | if (unlikely(!IS_ALIGNED((u32)indata, 4))) { | |
747 | pr_debug(DEV_DBG_NAME " [%s]: Data isn't aligned! Addr: " | |
748 | "0x%08x", __func__, (u32)indata); | |
749 | return -EINVAL; | |
750 | } | |
751 | ||
752 | ret = cryp_setup_context(ctx, device_data); | |
753 | ||
754 | if (ret) | |
755 | goto out; | |
756 | ||
757 | if (cryp_mode == CRYP_MODE_INTERRUPT) { | |
758 | cryp_enable_irq_src(device_data, CRYP_IRQ_SRC_INPUT_FIFO | | |
759 | CRYP_IRQ_SRC_OUTPUT_FIFO); | |
760 | ||
761 | /* | |
762 | * ctx->outlen is decremented in the cryp_interrupt_handler | |
763 | * function. We had to add cpu_relax() (barrier) to make sure | |
764 | * that gcc didn't optimze away this variable. | |
765 | */ | |
766 | while (ctx->outlen > 0) | |
767 | cpu_relax(); | |
768 | } else if (cryp_mode == CRYP_MODE_POLLING || | |
769 | cryp_mode == CRYP_MODE_DMA) { | |
770 | /* | |
771 | * The reason for having DMA in this if case is that if we are | |
772 | * running cryp_mode = 2, then we separate DMA routines for | |
773 | * handling cipher/plaintext > blocksize, except when | |
774 | * running the normal CRYPTO_ALG_TYPE_CIPHER, then we still use | |
775 | * the polling mode. Overhead of doing DMA setup eats up the | |
776 | * benefits using it. | |
777 | */ | |
778 | cryp_polling_mode(ctx, device_data); | |
779 | } else { | |
780 | dev_err(ctx->device->dev, "[%s]: Invalid operation mode!", | |
781 | __func__); | |
782 | ret = -EPERM; | |
783 | goto out; | |
784 | } | |
785 | ||
786 | cryp_save_device_context(device_data, &ctx->dev_ctx, cryp_mode); | |
787 | ctx->updated = 1; | |
788 | ||
789 | out: | |
790 | ctx->indata = indata; | |
791 | ctx->outdata = outdata; | |
792 | ctx->datalen = datalen; | |
793 | ctx->outlen = outlen; | |
794 | ||
795 | return ret; | |
796 | } | |
797 | ||
798 | static int get_nents(struct scatterlist *sg, int nbytes) | |
799 | { | |
800 | int nents = 0; | |
801 | ||
802 | while (nbytes > 0) { | |
803 | nbytes -= sg->length; | |
804 | sg = scatterwalk_sg_next(sg); | |
805 | nents++; | |
806 | } | |
807 | ||
808 | return nents; | |
809 | } | |
810 | ||
811 | static int ablk_dma_crypt(struct ablkcipher_request *areq) | |
812 | { | |
813 | struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); | |
814 | struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); | |
815 | struct cryp_device_data *device_data; | |
816 | ||
817 | int bytes_written = 0; | |
818 | int bytes_read = 0; | |
819 | int ret; | |
820 | ||
821 | pr_debug(DEV_DBG_NAME " [%s]", __func__); | |
822 | ||
823 | ctx->datalen = areq->nbytes; | |
824 | ctx->outlen = areq->nbytes; | |
825 | ||
826 | ret = cryp_get_device_data(ctx, &device_data); | |
827 | if (ret) | |
828 | return ret; | |
829 | ||
830 | ret = cryp_setup_context(ctx, device_data); | |
831 | if (ret) | |
832 | goto out; | |
833 | ||
834 | /* We have the device now, so store the nents in the dma struct. */ | |
835 | ctx->device->dma.nents_src = get_nents(areq->src, ctx->datalen); | |
836 | ctx->device->dma.nents_dst = get_nents(areq->dst, ctx->outlen); | |
837 | ||
838 | /* Enable DMA in- and output. */ | |
839 | cryp_configure_for_dma(device_data, CRYP_DMA_ENABLE_BOTH_DIRECTIONS); | |
840 | ||
841 | bytes_written = cryp_dma_write(ctx, areq->src, ctx->datalen); | |
842 | bytes_read = cryp_dma_read(ctx, areq->dst, bytes_written); | |
843 | ||
844 | wait_for_completion(&ctx->device->dma.cryp_dma_complete); | |
845 | cryp_dma_done(ctx); | |
846 | ||
847 | cryp_save_device_context(device_data, &ctx->dev_ctx, cryp_mode); | |
848 | ctx->updated = 1; | |
849 | ||
850 | out: | |
851 | spin_lock(&device_data->ctx_lock); | |
852 | device_data->current_ctx = NULL; | |
853 | ctx->device = NULL; | |
854 | spin_unlock(&device_data->ctx_lock); | |
855 | ||
856 | /* | |
857 | * The down_interruptible part for this semaphore is called in | |
858 | * cryp_get_device_data. | |
859 | */ | |
860 | up(&driver_data.device_allocation); | |
861 | ||
862 | if (unlikely(bytes_written != bytes_read)) | |
863 | return -EPERM; | |
864 | ||
865 | return 0; | |
866 | } | |
867 | ||
868 | static int ablk_crypt(struct ablkcipher_request *areq) | |
869 | { | |
870 | struct ablkcipher_walk walk; | |
871 | struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); | |
872 | struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); | |
873 | struct cryp_device_data *device_data; | |
874 | unsigned long src_paddr; | |
875 | unsigned long dst_paddr; | |
876 | int ret; | |
877 | int nbytes; | |
878 | ||
879 | pr_debug(DEV_DBG_NAME " [%s]", __func__); | |
880 | ||
881 | ret = cryp_get_device_data(ctx, &device_data); | |
882 | if (ret) | |
883 | goto out; | |
884 | ||
885 | ablkcipher_walk_init(&walk, areq->dst, areq->src, areq->nbytes); | |
886 | ret = ablkcipher_walk_phys(areq, &walk); | |
887 | ||
888 | if (ret) { | |
889 | pr_err(DEV_DBG_NAME "[%s]: ablkcipher_walk_phys() failed!", | |
890 | __func__); | |
891 | goto out; | |
892 | } | |
893 | ||
894 | while ((nbytes = walk.nbytes) > 0) { | |
895 | ctx->iv = walk.iv; | |
896 | src_paddr = (page_to_phys(walk.src.page) + walk.src.offset); | |
897 | ctx->indata = phys_to_virt(src_paddr); | |
898 | ||
899 | dst_paddr = (page_to_phys(walk.dst.page) + walk.dst.offset); | |
900 | ctx->outdata = phys_to_virt(dst_paddr); | |
901 | ||
902 | ctx->datalen = nbytes - (nbytes % ctx->blocksize); | |
903 | ||
904 | ret = hw_crypt_noxts(ctx, device_data); | |
905 | if (ret) | |
906 | goto out; | |
907 | ||
908 | nbytes -= ctx->datalen; | |
909 | ret = ablkcipher_walk_done(areq, &walk, nbytes); | |
910 | if (ret) | |
911 | goto out; | |
912 | } | |
913 | ablkcipher_walk_complete(&walk); | |
914 | ||
915 | out: | |
916 | /* Release the device */ | |
917 | spin_lock(&device_data->ctx_lock); | |
918 | device_data->current_ctx = NULL; | |
919 | ctx->device = NULL; | |
920 | spin_unlock(&device_data->ctx_lock); | |
921 | ||
922 | /* | |
923 | * The down_interruptible part for this semaphore is called in | |
924 | * cryp_get_device_data. | |
925 | */ | |
926 | up(&driver_data.device_allocation); | |
927 | ||
928 | return ret; | |
929 | } | |
930 | ||
931 | static int aes_ablkcipher_setkey(struct crypto_ablkcipher *cipher, | |
932 | const u8 *key, unsigned int keylen) | |
933 | { | |
934 | struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); | |
935 | u32 *flags = &cipher->base.crt_flags; | |
936 | ||
937 | pr_debug(DEV_DBG_NAME " [%s]", __func__); | |
938 | ||
939 | switch (keylen) { | |
940 | case AES_KEYSIZE_128: | |
941 | ctx->config.keysize = CRYP_KEY_SIZE_128; | |
942 | break; | |
943 | ||
944 | case AES_KEYSIZE_192: | |
945 | ctx->config.keysize = CRYP_KEY_SIZE_192; | |
946 | break; | |
947 | ||
948 | case AES_KEYSIZE_256: | |
949 | ctx->config.keysize = CRYP_KEY_SIZE_256; | |
950 | break; | |
951 | ||
952 | default: | |
953 | pr_err(DEV_DBG_NAME "[%s]: Unknown keylen!", __func__); | |
954 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | |
955 | return -EINVAL; | |
956 | } | |
957 | ||
958 | memcpy(ctx->key, key, keylen); | |
959 | ctx->keylen = keylen; | |
960 | ||
961 | ctx->updated = 0; | |
962 | ||
963 | return 0; | |
964 | } | |
965 | ||
966 | static int des_ablkcipher_setkey(struct crypto_ablkcipher *cipher, | |
967 | const u8 *key, unsigned int keylen) | |
968 | { | |
969 | struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); | |
970 | u32 *flags = &cipher->base.crt_flags; | |
971 | u32 tmp[DES_EXPKEY_WORDS]; | |
972 | int ret; | |
973 | ||
974 | pr_debug(DEV_DBG_NAME " [%s]", __func__); | |
975 | if (keylen != DES_KEY_SIZE) { | |
976 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | |
977 | pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN", | |
978 | __func__); | |
979 | return -EINVAL; | |
980 | } | |
981 | ||
982 | ret = des_ekey(tmp, key); | |
983 | if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { | |
984 | *flags |= CRYPTO_TFM_RES_WEAK_KEY; | |
985 | pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY", | |
986 | __func__); | |
987 | return -EINVAL; | |
988 | } | |
989 | ||
990 | memcpy(ctx->key, key, keylen); | |
991 | ctx->keylen = keylen; | |
992 | ||
993 | ctx->updated = 0; | |
994 | return 0; | |
995 | } | |
996 | ||
997 | static int des3_ablkcipher_setkey(struct crypto_ablkcipher *cipher, | |
998 | const u8 *key, unsigned int keylen) | |
999 | { | |
1000 | struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); | |
1001 | u32 *flags = &cipher->base.crt_flags; | |
1002 | const u32 *K = (const u32 *)key; | |
1003 | u32 tmp[DES3_EDE_EXPKEY_WORDS]; | |
1004 | int i, ret; | |
1005 | ||
1006 | pr_debug(DEV_DBG_NAME " [%s]", __func__); | |
1007 | if (keylen != DES3_EDE_KEY_SIZE) { | |
1008 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | |
1009 | pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN", | |
1010 | __func__); | |
1011 | return -EINVAL; | |
1012 | } | |
1013 | ||
1014 | /* Checking key interdependency for weak key detection. */ | |
1015 | if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || | |
1016 | !((K[2] ^ K[4]) | (K[3] ^ K[5]))) && | |
1017 | (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { | |
1018 | *flags |= CRYPTO_TFM_RES_WEAK_KEY; | |
1019 | pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY", | |
1020 | __func__); | |
1021 | return -EINVAL; | |
1022 | } | |
1023 | for (i = 0; i < 3; i++) { | |
1024 | ret = des_ekey(tmp, key + i*DES_KEY_SIZE); | |
1025 | if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { | |
1026 | *flags |= CRYPTO_TFM_RES_WEAK_KEY; | |
1027 | pr_debug(DEV_DBG_NAME " [%s]: " | |
1028 | "CRYPTO_TFM_REQ_WEAK_KEY", __func__); | |
1029 | return -EINVAL; | |
1030 | } | |
1031 | } | |
1032 | ||
1033 | memcpy(ctx->key, key, keylen); | |
1034 | ctx->keylen = keylen; | |
1035 | ||
1036 | ctx->updated = 0; | |
1037 | return 0; | |
1038 | } | |
1039 | ||
1040 | static int cryp_blk_encrypt(struct ablkcipher_request *areq) | |
1041 | { | |
1042 | struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); | |
1043 | struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); | |
1044 | ||
1045 | pr_debug(DEV_DBG_NAME " [%s]", __func__); | |
1046 | ||
1047 | ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT; | |
1048 | ||
1049 | /* | |
1050 | * DMA does not work for DES due to a hw bug */ | |
1051 | if (cryp_mode == CRYP_MODE_DMA && mode_is_aes(ctx->config.algomode)) | |
1052 | return ablk_dma_crypt(areq); | |
1053 | ||
1054 | /* For everything except DMA, we run the non DMA version. */ | |
1055 | return ablk_crypt(areq); | |
1056 | } | |
1057 | ||
1058 | static int cryp_blk_decrypt(struct ablkcipher_request *areq) | |
1059 | { | |
1060 | struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); | |
1061 | struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); | |
1062 | ||
1063 | pr_debug(DEV_DBG_NAME " [%s]", __func__); | |
1064 | ||
1065 | ctx->config.algodir = CRYP_ALGORITHM_DECRYPT; | |
1066 | ||
1067 | /* DMA does not work for DES due to a hw bug */ | |
1068 | if (cryp_mode == CRYP_MODE_DMA && mode_is_aes(ctx->config.algomode)) | |
1069 | return ablk_dma_crypt(areq); | |
1070 | ||
1071 | /* For everything except DMA, we run the non DMA version. */ | |
1072 | return ablk_crypt(areq); | |
1073 | } | |
1074 | ||
1075 | struct cryp_algo_template { | |
1076 | enum cryp_algo_mode algomode; | |
1077 | struct crypto_alg crypto; | |
1078 | }; | |
1079 | ||
1080 | static int cryp_cra_init(struct crypto_tfm *tfm) | |
1081 | { | |
1082 | struct cryp_ctx *ctx = crypto_tfm_ctx(tfm); | |
1083 | struct crypto_alg *alg = tfm->__crt_alg; | |
1084 | struct cryp_algo_template *cryp_alg = container_of(alg, | |
1085 | struct cryp_algo_template, | |
1086 | crypto); | |
1087 | ||
1088 | ctx->config.algomode = cryp_alg->algomode; | |
1089 | ctx->blocksize = crypto_tfm_alg_blocksize(tfm); | |
1090 | ||
1091 | return 0; | |
1092 | } | |
1093 | ||
1094 | static struct cryp_algo_template cryp_algs[] = { | |
1095 | { | |
1096 | .algomode = CRYP_ALGO_AES_ECB, | |
1097 | .crypto = { | |
1098 | .cra_name = "aes", | |
1099 | .cra_driver_name = "aes-ux500", | |
1100 | .cra_priority = 300, | |
1101 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | |
1102 | CRYPTO_ALG_ASYNC, | |
1103 | .cra_blocksize = AES_BLOCK_SIZE, | |
1104 | .cra_ctxsize = sizeof(struct cryp_ctx), | |
1105 | .cra_alignmask = 3, | |
1106 | .cra_type = &crypto_ablkcipher_type, | |
1107 | .cra_init = cryp_cra_init, | |
1108 | .cra_module = THIS_MODULE, | |
1109 | .cra_u = { | |
1110 | .ablkcipher = { | |
1111 | .min_keysize = AES_MIN_KEY_SIZE, | |
1112 | .max_keysize = AES_MAX_KEY_SIZE, | |
1113 | .setkey = aes_ablkcipher_setkey, | |
1114 | .encrypt = cryp_blk_encrypt, | |
1115 | .decrypt = cryp_blk_decrypt | |
1116 | } | |
1117 | } | |
1118 | } | |
1119 | }, | |
1120 | { | |
1121 | .algomode = CRYP_ALGO_AES_ECB, | |
1122 | .crypto = { | |
1123 | .cra_name = "ecb(aes)", | |
1124 | .cra_driver_name = "ecb-aes-ux500", | |
1125 | .cra_priority = 300, | |
1126 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | |
1127 | CRYPTO_ALG_ASYNC, | |
1128 | .cra_blocksize = AES_BLOCK_SIZE, | |
1129 | .cra_ctxsize = sizeof(struct cryp_ctx), | |
1130 | .cra_alignmask = 3, | |
1131 | .cra_type = &crypto_ablkcipher_type, | |
1132 | .cra_init = cryp_cra_init, | |
1133 | .cra_module = THIS_MODULE, | |
1134 | .cra_u = { | |
1135 | .ablkcipher = { | |
1136 | .min_keysize = AES_MIN_KEY_SIZE, | |
1137 | .max_keysize = AES_MAX_KEY_SIZE, | |
1138 | .setkey = aes_ablkcipher_setkey, | |
1139 | .encrypt = cryp_blk_encrypt, | |
1140 | .decrypt = cryp_blk_decrypt, | |
1141 | } | |
1142 | } | |
1143 | } | |
1144 | }, | |
1145 | { | |
1146 | .algomode = CRYP_ALGO_AES_CBC, | |
1147 | .crypto = { | |
1148 | .cra_name = "cbc(aes)", | |
1149 | .cra_driver_name = "cbc-aes-ux500", | |
1150 | .cra_priority = 300, | |
1151 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | |
1152 | CRYPTO_ALG_ASYNC, | |
1153 | .cra_blocksize = AES_BLOCK_SIZE, | |
1154 | .cra_ctxsize = sizeof(struct cryp_ctx), | |
1155 | .cra_alignmask = 3, | |
1156 | .cra_type = &crypto_ablkcipher_type, | |
1157 | .cra_init = cryp_cra_init, | |
1158 | .cra_module = THIS_MODULE, | |
1159 | .cra_u = { | |
1160 | .ablkcipher = { | |
1161 | .min_keysize = AES_MIN_KEY_SIZE, | |
1162 | .max_keysize = AES_MAX_KEY_SIZE, | |
1163 | .setkey = aes_ablkcipher_setkey, | |
1164 | .encrypt = cryp_blk_encrypt, | |
1165 | .decrypt = cryp_blk_decrypt, | |
1166 | .ivsize = AES_BLOCK_SIZE, | |
1167 | } | |
1168 | } | |
1169 | } | |
1170 | }, | |
1171 | { | |
1172 | .algomode = CRYP_ALGO_AES_CTR, | |
1173 | .crypto = { | |
1174 | .cra_name = "ctr(aes)", | |
1175 | .cra_driver_name = "ctr-aes-ux500", | |
1176 | .cra_priority = 300, | |
1177 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | |
1178 | CRYPTO_ALG_ASYNC, | |
1179 | .cra_blocksize = AES_BLOCK_SIZE, | |
1180 | .cra_ctxsize = sizeof(struct cryp_ctx), | |
1181 | .cra_alignmask = 3, | |
1182 | .cra_type = &crypto_ablkcipher_type, | |
1183 | .cra_init = cryp_cra_init, | |
1184 | .cra_module = THIS_MODULE, | |
1185 | .cra_u = { | |
1186 | .ablkcipher = { | |
1187 | .min_keysize = AES_MIN_KEY_SIZE, | |
1188 | .max_keysize = AES_MAX_KEY_SIZE, | |
1189 | .setkey = aes_ablkcipher_setkey, | |
1190 | .encrypt = cryp_blk_encrypt, | |
1191 | .decrypt = cryp_blk_decrypt, | |
1192 | .ivsize = AES_BLOCK_SIZE, | |
1193 | } | |
1194 | } | |
1195 | } | |
1196 | }, | |
1197 | { | |
1198 | .algomode = CRYP_ALGO_DES_ECB, | |
1199 | .crypto = { | |
1200 | .cra_name = "des", | |
1201 | .cra_driver_name = "des-ux500", | |
1202 | .cra_priority = 300, | |
1203 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | |
1204 | CRYPTO_ALG_ASYNC, | |
1205 | .cra_blocksize = DES_BLOCK_SIZE, | |
1206 | .cra_ctxsize = sizeof(struct cryp_ctx), | |
1207 | .cra_alignmask = 3, | |
1208 | .cra_type = &crypto_ablkcipher_type, | |
1209 | .cra_init = cryp_cra_init, | |
1210 | .cra_module = THIS_MODULE, | |
1211 | .cra_u = { | |
1212 | .ablkcipher = { | |
1213 | .min_keysize = DES_KEY_SIZE, | |
1214 | .max_keysize = DES_KEY_SIZE, | |
1215 | .setkey = des_ablkcipher_setkey, | |
1216 | .encrypt = cryp_blk_encrypt, | |
1217 | .decrypt = cryp_blk_decrypt | |
1218 | } | |
1219 | } | |
1220 | } | |
1221 | ||
1222 | }, | |
1223 | { | |
1224 | .algomode = CRYP_ALGO_TDES_ECB, | |
1225 | .crypto = { | |
1226 | .cra_name = "des3_ede", | |
1227 | .cra_driver_name = "des3_ede-ux500", | |
1228 | .cra_priority = 300, | |
1229 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | |
1230 | CRYPTO_ALG_ASYNC, | |
1231 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | |
1232 | .cra_ctxsize = sizeof(struct cryp_ctx), | |
1233 | .cra_alignmask = 3, | |
1234 | .cra_type = &crypto_ablkcipher_type, | |
1235 | .cra_init = cryp_cra_init, | |
1236 | .cra_module = THIS_MODULE, | |
1237 | .cra_u = { | |
1238 | .ablkcipher = { | |
1239 | .min_keysize = DES3_EDE_KEY_SIZE, | |
1240 | .max_keysize = DES3_EDE_KEY_SIZE, | |
1241 | .setkey = des_ablkcipher_setkey, | |
1242 | .encrypt = cryp_blk_encrypt, | |
1243 | .decrypt = cryp_blk_decrypt | |
1244 | } | |
1245 | } | |
1246 | } | |
1247 | }, | |
1248 | { | |
1249 | .algomode = CRYP_ALGO_DES_ECB, | |
1250 | .crypto = { | |
1251 | .cra_name = "ecb(des)", | |
1252 | .cra_driver_name = "ecb-des-ux500", | |
1253 | .cra_priority = 300, | |
1254 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | |
1255 | CRYPTO_ALG_ASYNC, | |
1256 | .cra_blocksize = DES_BLOCK_SIZE, | |
1257 | .cra_ctxsize = sizeof(struct cryp_ctx), | |
1258 | .cra_alignmask = 3, | |
1259 | .cra_type = &crypto_ablkcipher_type, | |
1260 | .cra_init = cryp_cra_init, | |
1261 | .cra_module = THIS_MODULE, | |
1262 | .cra_u = { | |
1263 | .ablkcipher = { | |
1264 | .min_keysize = DES_KEY_SIZE, | |
1265 | .max_keysize = DES_KEY_SIZE, | |
1266 | .setkey = des_ablkcipher_setkey, | |
1267 | .encrypt = cryp_blk_encrypt, | |
1268 | .decrypt = cryp_blk_decrypt, | |
1269 | } | |
1270 | } | |
1271 | } | |
1272 | }, | |
1273 | { | |
1274 | .algomode = CRYP_ALGO_TDES_ECB, | |
1275 | .crypto = { | |
1276 | .cra_name = "ecb(des3_ede)", | |
1277 | .cra_driver_name = "ecb-des3_ede-ux500", | |
1278 | .cra_priority = 300, | |
1279 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | |
1280 | CRYPTO_ALG_ASYNC, | |
1281 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | |
1282 | .cra_ctxsize = sizeof(struct cryp_ctx), | |
1283 | .cra_alignmask = 3, | |
1284 | .cra_type = &crypto_ablkcipher_type, | |
1285 | .cra_init = cryp_cra_init, | |
1286 | .cra_module = THIS_MODULE, | |
1287 | .cra_u = { | |
1288 | .ablkcipher = { | |
1289 | .min_keysize = DES3_EDE_KEY_SIZE, | |
1290 | .max_keysize = DES3_EDE_KEY_SIZE, | |
1291 | .setkey = des3_ablkcipher_setkey, | |
1292 | .encrypt = cryp_blk_encrypt, | |
1293 | .decrypt = cryp_blk_decrypt, | |
1294 | } | |
1295 | } | |
1296 | } | |
1297 | }, | |
1298 | { | |
1299 | .algomode = CRYP_ALGO_DES_CBC, | |
1300 | .crypto = { | |
1301 | .cra_name = "cbc(des)", | |
1302 | .cra_driver_name = "cbc-des-ux500", | |
1303 | .cra_priority = 300, | |
1304 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | |
1305 | CRYPTO_ALG_ASYNC, | |
1306 | .cra_blocksize = DES_BLOCK_SIZE, | |
1307 | .cra_ctxsize = sizeof(struct cryp_ctx), | |
1308 | .cra_alignmask = 3, | |
1309 | .cra_type = &crypto_ablkcipher_type, | |
1310 | .cra_init = cryp_cra_init, | |
1311 | .cra_module = THIS_MODULE, | |
1312 | .cra_u = { | |
1313 | .ablkcipher = { | |
1314 | .min_keysize = DES_KEY_SIZE, | |
1315 | .max_keysize = DES_KEY_SIZE, | |
1316 | .setkey = des_ablkcipher_setkey, | |
1317 | .encrypt = cryp_blk_encrypt, | |
1318 | .decrypt = cryp_blk_decrypt, | |
1319 | } | |
1320 | } | |
1321 | } | |
1322 | }, | |
1323 | { | |
1324 | .algomode = CRYP_ALGO_TDES_CBC, | |
1325 | .crypto = { | |
1326 | .cra_name = "cbc(des3_ede)", | |
1327 | .cra_driver_name = "cbc-des3_ede-ux500", | |
1328 | .cra_priority = 300, | |
1329 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | |
1330 | CRYPTO_ALG_ASYNC, | |
1331 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | |
1332 | .cra_ctxsize = sizeof(struct cryp_ctx), | |
1333 | .cra_alignmask = 3, | |
1334 | .cra_type = &crypto_ablkcipher_type, | |
1335 | .cra_init = cryp_cra_init, | |
1336 | .cra_module = THIS_MODULE, | |
1337 | .cra_u = { | |
1338 | .ablkcipher = { | |
1339 | .min_keysize = DES3_EDE_KEY_SIZE, | |
1340 | .max_keysize = DES3_EDE_KEY_SIZE, | |
1341 | .setkey = des3_ablkcipher_setkey, | |
1342 | .encrypt = cryp_blk_encrypt, | |
1343 | .decrypt = cryp_blk_decrypt, | |
1344 | .ivsize = DES3_EDE_BLOCK_SIZE, | |
1345 | } | |
1346 | } | |
1347 | } | |
1348 | } | |
1349 | }; | |
1350 | ||
1351 | /** | |
1352 | * cryp_algs_register_all - | |
1353 | */ | |
1354 | static int cryp_algs_register_all(void) | |
1355 | { | |
1356 | int ret; | |
1357 | int i; | |
1358 | int count; | |
1359 | ||
1360 | pr_debug("[%s]", __func__); | |
1361 | ||
1362 | for (i = 0; i < ARRAY_SIZE(cryp_algs); i++) { | |
1363 | ret = crypto_register_alg(&cryp_algs[i].crypto); | |
1364 | if (ret) { | |
1365 | count = i; | |
1366 | pr_err("[%s] alg registration failed", | |
1367 | cryp_algs[i].crypto.cra_driver_name); | |
1368 | goto unreg; | |
1369 | } | |
1370 | } | |
1371 | return 0; | |
1372 | unreg: | |
1373 | for (i = 0; i < count; i++) | |
1374 | crypto_unregister_alg(&cryp_algs[i].crypto); | |
1375 | return ret; | |
1376 | } | |
1377 | ||
1378 | /** | |
1379 | * cryp_algs_unregister_all - | |
1380 | */ | |
1381 | static void cryp_algs_unregister_all(void) | |
1382 | { | |
1383 | int i; | |
1384 | ||
1385 | pr_debug(DEV_DBG_NAME " [%s]", __func__); | |
1386 | ||
1387 | for (i = 0; i < ARRAY_SIZE(cryp_algs); i++) | |
1388 | crypto_unregister_alg(&cryp_algs[i].crypto); | |
1389 | } | |
1390 | ||
1391 | static int ux500_cryp_probe(struct platform_device *pdev) | |
1392 | { | |
1393 | int ret; | |
1394 | int cryp_error = 0; | |
1395 | struct resource *res = NULL; | |
1396 | struct resource *res_irq = NULL; | |
1397 | struct cryp_device_data *device_data; | |
1398 | struct cryp_protection_config prot = { | |
1399 | .privilege_access = CRYP_STATE_ENABLE | |
1400 | }; | |
1401 | struct device *dev = &pdev->dev; | |
1402 | ||
1403 | dev_dbg(dev, "[%s]", __func__); | |
1404 | device_data = kzalloc(sizeof(struct cryp_device_data), GFP_ATOMIC); | |
1405 | if (!device_data) { | |
1406 | dev_err(dev, "[%s]: kzalloc() failed!", __func__); | |
1407 | ret = -ENOMEM; | |
1408 | goto out; | |
1409 | } | |
1410 | ||
1411 | device_data->dev = dev; | |
1412 | device_data->current_ctx = NULL; | |
1413 | ||
1414 | /* Grab the DMA configuration from platform data. */ | |
1415 | mem_to_engine = &((struct cryp_platform_data *) | |
1416 | dev->platform_data)->mem_to_engine; | |
1417 | engine_to_mem = &((struct cryp_platform_data *) | |
1418 | dev->platform_data)->engine_to_mem; | |
1419 | ||
1420 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1421 | if (!res) { | |
1422 | dev_err(dev, "[%s]: platform_get_resource() failed", | |
1423 | __func__); | |
1424 | ret = -ENODEV; | |
1425 | goto out_kfree; | |
1426 | } | |
1427 | ||
1428 | res = request_mem_region(res->start, resource_size(res), pdev->name); | |
1429 | if (res == NULL) { | |
1430 | dev_err(dev, "[%s]: request_mem_region() failed", | |
1431 | __func__); | |
1432 | ret = -EBUSY; | |
1433 | goto out_kfree; | |
1434 | } | |
1435 | ||
1436 | device_data->base = ioremap(res->start, resource_size(res)); | |
1437 | if (!device_data->base) { | |
1438 | dev_err(dev, "[%s]: ioremap failed!", __func__); | |
1439 | ret = -ENOMEM; | |
1440 | goto out_free_mem; | |
1441 | } | |
1442 | ||
1443 | spin_lock_init(&device_data->ctx_lock); | |
1444 | spin_lock_init(&device_data->power_state_spinlock); | |
1445 | ||
1446 | /* Enable power for CRYP hardware block */ | |
1447 | device_data->pwr_regulator = regulator_get(&pdev->dev, "v-ape"); | |
1448 | if (IS_ERR(device_data->pwr_regulator)) { | |
1449 | dev_err(dev, "[%s]: could not get cryp regulator", __func__); | |
1450 | ret = PTR_ERR(device_data->pwr_regulator); | |
1451 | device_data->pwr_regulator = NULL; | |
1452 | goto out_unmap; | |
1453 | } | |
1454 | ||
1455 | /* Enable the clk for CRYP hardware block */ | |
1456 | device_data->clk = clk_get(&pdev->dev, NULL); | |
1457 | if (IS_ERR(device_data->clk)) { | |
1458 | dev_err(dev, "[%s]: clk_get() failed!", __func__); | |
1459 | ret = PTR_ERR(device_data->clk); | |
1460 | goto out_regulator; | |
1461 | } | |
1462 | ||
1463 | /* Enable device power (and clock) */ | |
1464 | ret = cryp_enable_power(device_data->dev, device_data, false); | |
1465 | if (ret) { | |
1466 | dev_err(dev, "[%s]: cryp_enable_power() failed!", __func__); | |
1467 | goto out_clk; | |
1468 | } | |
1469 | ||
1470 | cryp_error = cryp_check(device_data); | |
1471 | if (cryp_error != 0) { | |
1472 | dev_err(dev, "[%s]: cryp_init() failed!", __func__); | |
1473 | ret = -EINVAL; | |
1474 | goto out_power; | |
1475 | } | |
1476 | ||
1477 | cryp_error = cryp_configure_protection(device_data, &prot); | |
1478 | if (cryp_error != 0) { | |
1479 | dev_err(dev, "[%s]: cryp_configure_protection() failed!", | |
1480 | __func__); | |
1481 | ret = -EINVAL; | |
1482 | goto out_power; | |
1483 | } | |
1484 | ||
1485 | res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | |
1486 | if (!res_irq) { | |
1487 | dev_err(dev, "[%s]: IORESOURCE_IRQ unavailable", | |
1488 | __func__); | |
1489 | goto out_power; | |
1490 | } | |
1491 | ||
1492 | ret = request_irq(res_irq->start, | |
1493 | cryp_interrupt_handler, | |
1494 | 0, | |
1495 | "cryp1", | |
1496 | device_data); | |
1497 | if (ret) { | |
1498 | dev_err(dev, "[%s]: Unable to request IRQ", __func__); | |
1499 | goto out_power; | |
1500 | } | |
1501 | ||
1502 | if (cryp_mode == CRYP_MODE_DMA) | |
1503 | cryp_dma_setup_channel(device_data, dev); | |
1504 | ||
1505 | platform_set_drvdata(pdev, device_data); | |
1506 | ||
1507 | /* Put the new device into the device list... */ | |
1508 | klist_add_tail(&device_data->list_node, &driver_data.device_list); | |
1509 | ||
1510 | /* ... and signal that a new device is available. */ | |
1511 | up(&driver_data.device_allocation); | |
1512 | ||
1513 | atomic_set(&session_id, 1); | |
1514 | ||
1515 | ret = cryp_algs_register_all(); | |
1516 | if (ret) { | |
1517 | dev_err(dev, "[%s]: cryp_algs_register_all() failed!", | |
1518 | __func__); | |
1519 | goto out_power; | |
1520 | } | |
1521 | ||
1522 | return 0; | |
1523 | ||
1524 | out_power: | |
1525 | cryp_disable_power(device_data->dev, device_data, false); | |
1526 | ||
1527 | out_clk: | |
1528 | clk_put(device_data->clk); | |
1529 | ||
1530 | out_regulator: | |
1531 | regulator_put(device_data->pwr_regulator); | |
1532 | ||
1533 | out_unmap: | |
1534 | iounmap(device_data->base); | |
1535 | ||
1536 | out_free_mem: | |
1537 | release_mem_region(res->start, resource_size(res)); | |
1538 | ||
1539 | out_kfree: | |
1540 | kfree(device_data); | |
1541 | out: | |
1542 | return ret; | |
1543 | } | |
1544 | ||
1545 | static int ux500_cryp_remove(struct platform_device *pdev) | |
1546 | { | |
1547 | struct resource *res = NULL; | |
1548 | struct resource *res_irq = NULL; | |
1549 | struct cryp_device_data *device_data; | |
1550 | ||
1551 | dev_dbg(&pdev->dev, "[%s]", __func__); | |
1552 | device_data = platform_get_drvdata(pdev); | |
1553 | if (!device_data) { | |
1554 | dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!", | |
1555 | __func__); | |
1556 | return -ENOMEM; | |
1557 | } | |
1558 | ||
1559 | /* Try to decrease the number of available devices. */ | |
1560 | if (down_trylock(&driver_data.device_allocation)) | |
1561 | return -EBUSY; | |
1562 | ||
1563 | /* Check that the device is free */ | |
1564 | spin_lock(&device_data->ctx_lock); | |
1565 | /* current_ctx allocates a device, NULL = unallocated */ | |
1566 | if (device_data->current_ctx) { | |
1567 | /* The device is busy */ | |
1568 | spin_unlock(&device_data->ctx_lock); | |
1569 | /* Return the device to the pool. */ | |
1570 | up(&driver_data.device_allocation); | |
1571 | return -EBUSY; | |
1572 | } | |
1573 | ||
1574 | spin_unlock(&device_data->ctx_lock); | |
1575 | ||
1576 | /* Remove the device from the list */ | |
1577 | if (klist_node_attached(&device_data->list_node)) | |
1578 | klist_remove(&device_data->list_node); | |
1579 | ||
1580 | /* If this was the last device, remove the services */ | |
1581 | if (list_empty(&driver_data.device_list.k_list)) | |
1582 | cryp_algs_unregister_all(); | |
1583 | ||
1584 | res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | |
1585 | if (!res_irq) | |
1586 | dev_err(&pdev->dev, "[%s]: IORESOURCE_IRQ, unavailable", | |
1587 | __func__); | |
1588 | else { | |
1589 | disable_irq(res_irq->start); | |
1590 | free_irq(res_irq->start, device_data); | |
1591 | } | |
1592 | ||
1593 | if (cryp_disable_power(&pdev->dev, device_data, false)) | |
1594 | dev_err(&pdev->dev, "[%s]: cryp_disable_power() failed", | |
1595 | __func__); | |
1596 | ||
1597 | clk_put(device_data->clk); | |
1598 | regulator_put(device_data->pwr_regulator); | |
1599 | ||
1600 | iounmap(device_data->base); | |
1601 | ||
1602 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1603 | if (res) | |
1604 | release_mem_region(res->start, res->end - res->start + 1); | |
1605 | ||
1606 | kfree(device_data); | |
1607 | ||
1608 | return 0; | |
1609 | } | |
1610 | ||
1611 | static void ux500_cryp_shutdown(struct platform_device *pdev) | |
1612 | { | |
1613 | struct resource *res_irq = NULL; | |
1614 | struct cryp_device_data *device_data; | |
1615 | ||
1616 | dev_dbg(&pdev->dev, "[%s]", __func__); | |
1617 | ||
1618 | device_data = platform_get_drvdata(pdev); | |
1619 | if (!device_data) { | |
1620 | dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!", | |
1621 | __func__); | |
1622 | return; | |
1623 | } | |
1624 | ||
1625 | /* Check that the device is free */ | |
1626 | spin_lock(&device_data->ctx_lock); | |
1627 | /* current_ctx allocates a device, NULL = unallocated */ | |
1628 | if (!device_data->current_ctx) { | |
1629 | if (down_trylock(&driver_data.device_allocation)) | |
1630 | dev_dbg(&pdev->dev, "[%s]: Cryp still in use!" | |
1631 | "Shutting down anyway...", __func__); | |
1632 | /** | |
1633 | * (Allocate the device) | |
1634 | * Need to set this to non-null (dummy) value, | |
1635 | * to avoid usage if context switching. | |
1636 | */ | |
1637 | device_data->current_ctx++; | |
1638 | } | |
1639 | spin_unlock(&device_data->ctx_lock); | |
1640 | ||
1641 | /* Remove the device from the list */ | |
1642 | if (klist_node_attached(&device_data->list_node)) | |
1643 | klist_remove(&device_data->list_node); | |
1644 | ||
1645 | /* If this was the last device, remove the services */ | |
1646 | if (list_empty(&driver_data.device_list.k_list)) | |
1647 | cryp_algs_unregister_all(); | |
1648 | ||
1649 | res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | |
1650 | if (!res_irq) | |
1651 | dev_err(&pdev->dev, "[%s]: IORESOURCE_IRQ, unavailable", | |
1652 | __func__); | |
1653 | else { | |
1654 | disable_irq(res_irq->start); | |
1655 | free_irq(res_irq->start, device_data); | |
1656 | } | |
1657 | ||
1658 | if (cryp_disable_power(&pdev->dev, device_data, false)) | |
1659 | dev_err(&pdev->dev, "[%s]: cryp_disable_power() failed", | |
1660 | __func__); | |
1661 | ||
1662 | } | |
1663 | ||
4f31f5b1 | 1664 | static int ux500_cryp_suspend(struct device *dev) |
2789c08f AW |
1665 | { |
1666 | int ret; | |
4f31f5b1 | 1667 | struct platform_device *pdev = to_platform_device(dev); |
2789c08f AW |
1668 | struct cryp_device_data *device_data; |
1669 | struct resource *res_irq; | |
1670 | struct cryp_ctx *temp_ctx = NULL; | |
1671 | ||
4f31f5b1 | 1672 | dev_dbg(dev, "[%s]", __func__); |
2789c08f AW |
1673 | |
1674 | /* Handle state? */ | |
1675 | device_data = platform_get_drvdata(pdev); | |
1676 | if (!device_data) { | |
4f31f5b1 | 1677 | dev_err(dev, "[%s]: platform_get_drvdata() failed!", __func__); |
2789c08f AW |
1678 | return -ENOMEM; |
1679 | } | |
1680 | ||
1681 | res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | |
1682 | if (!res_irq) | |
4f31f5b1 | 1683 | dev_err(dev, "[%s]: IORESOURCE_IRQ, unavailable", __func__); |
2789c08f AW |
1684 | else |
1685 | disable_irq(res_irq->start); | |
1686 | ||
1687 | spin_lock(&device_data->ctx_lock); | |
1688 | if (!device_data->current_ctx) | |
1689 | device_data->current_ctx++; | |
1690 | spin_unlock(&device_data->ctx_lock); | |
1691 | ||
1692 | if (device_data->current_ctx == ++temp_ctx) { | |
1693 | if (down_interruptible(&driver_data.device_allocation)) | |
4f31f5b1 RW |
1694 | dev_dbg(dev, "[%s]: down_interruptible() failed", |
1695 | __func__); | |
1696 | ret = cryp_disable_power(dev, device_data, false); | |
2789c08f AW |
1697 | |
1698 | } else | |
4f31f5b1 | 1699 | ret = cryp_disable_power(dev, device_data, true); |
2789c08f AW |
1700 | |
1701 | if (ret) | |
4f31f5b1 | 1702 | dev_err(dev, "[%s]: cryp_disable_power()", __func__); |
2789c08f AW |
1703 | |
1704 | return ret; | |
1705 | } | |
1706 | ||
4f31f5b1 | 1707 | static int ux500_cryp_resume(struct device *dev) |
2789c08f AW |
1708 | { |
1709 | int ret = 0; | |
4f31f5b1 | 1710 | struct platform_device *pdev = to_platform_device(dev); |
2789c08f AW |
1711 | struct cryp_device_data *device_data; |
1712 | struct resource *res_irq; | |
1713 | struct cryp_ctx *temp_ctx = NULL; | |
1714 | ||
4f31f5b1 | 1715 | dev_dbg(dev, "[%s]", __func__); |
2789c08f AW |
1716 | |
1717 | device_data = platform_get_drvdata(pdev); | |
1718 | if (!device_data) { | |
4f31f5b1 | 1719 | dev_err(dev, "[%s]: platform_get_drvdata() failed!", __func__); |
2789c08f AW |
1720 | return -ENOMEM; |
1721 | } | |
1722 | ||
1723 | spin_lock(&device_data->ctx_lock); | |
1724 | if (device_data->current_ctx == ++temp_ctx) | |
1725 | device_data->current_ctx = NULL; | |
1726 | spin_unlock(&device_data->ctx_lock); | |
1727 | ||
1728 | ||
1729 | if (!device_data->current_ctx) | |
1730 | up(&driver_data.device_allocation); | |
1731 | else | |
4f31f5b1 | 1732 | ret = cryp_enable_power(dev, device_data, true); |
2789c08f AW |
1733 | |
1734 | if (ret) | |
4f31f5b1 | 1735 | dev_err(dev, "[%s]: cryp_enable_power() failed!", __func__); |
2789c08f AW |
1736 | else { |
1737 | res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | |
1738 | if (res_irq) | |
1739 | enable_irq(res_irq->start); | |
1740 | } | |
1741 | ||
1742 | return ret; | |
1743 | } | |
1744 | ||
4f31f5b1 RW |
1745 | static SIMPLE_DEV_PM_OPS(ux500_cryp_pm, ux500_cryp_suspend, ux500_cryp_resume); |
1746 | ||
2789c08f AW |
1747 | static struct platform_driver cryp_driver = { |
1748 | .probe = ux500_cryp_probe, | |
1749 | .remove = ux500_cryp_remove, | |
1750 | .shutdown = ux500_cryp_shutdown, | |
2789c08f AW |
1751 | .driver = { |
1752 | .owner = THIS_MODULE, | |
1753 | .name = "cryp1" | |
4f31f5b1 | 1754 | .pm = &ux500_cryp_pm, |
2789c08f AW |
1755 | } |
1756 | }; | |
1757 | ||
1758 | static int __init ux500_cryp_mod_init(void) | |
1759 | { | |
1760 | pr_debug("[%s] is called!", __func__); | |
1761 | klist_init(&driver_data.device_list, NULL, NULL); | |
1762 | /* Initialize the semaphore to 0 devices (locked state) */ | |
1763 | sema_init(&driver_data.device_allocation, 0); | |
1764 | return platform_driver_register(&cryp_driver); | |
1765 | } | |
1766 | ||
1767 | static void __exit ux500_cryp_mod_fini(void) | |
1768 | { | |
1769 | pr_debug("[%s] is called!", __func__); | |
1770 | platform_driver_unregister(&cryp_driver); | |
1771 | return; | |
1772 | } | |
1773 | ||
1774 | module_init(ux500_cryp_mod_init); | |
1775 | module_exit(ux500_cryp_mod_fini); | |
1776 | ||
1777 | module_param(cryp_mode, int, 0); | |
1778 | ||
1779 | MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 CRYP crypto engine."); | |
1780 | MODULE_ALIAS("aes-all"); | |
1781 | MODULE_ALIAS("des-all"); | |
1782 | ||
1783 | MODULE_LICENSE("GPL"); |