Staging: sep: fix coding style issues
[deliverable/linux.git] / drivers / staging / sep / sep_main.c
CommitLineData
ff3d9c3c
MA
1/*
2 *
3 * sep_main.c - Security Processor Driver main group of functions
4 *
5 * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
6 * Contributions(c) 2009-2011 Discretix. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 *
21 * CONTACTS:
22 *
23 * Mark Allyn mark.a.allyn@intel.com
24 * Jayant Mangalampalli jayant.mangalampalli@intel.com
25 *
26 * CHANGES:
27 *
28 * 2009.06.26 Initial publish
29 * 2010.09.14 Upgrade to Medfield
30 * 2011.01.21 Move to sep_main.c to allow for sep_crypto.c
31 * 2011.02.22 Enable kernel crypto operation
32 *
33 * Please note that this driver is based on information in the Discretix
34 * CryptoCell 5.2 Driver Implementation Guide; the Discretix CryptoCell 5.2
35 * Integration Intel Medfield appendix; the Discretix CryptoCell 5.2
36 * Linux Driver Integration Guide; and the Discretix CryptoCell 5.2 System
37 * Overview and Integration Guide.
38 */
39/* #define DEBUG */
40/* #define SEP_PERF_DEBUG */
41
42#include <linux/init.h>
ab8ef351 43#include <linux/kernel.h>
ff3d9c3c
MA
44#include <linux/module.h>
45#include <linux/miscdevice.h>
46#include <linux/fs.h>
47#include <linux/cdev.h>
48#include <linux/kdev_t.h>
49#include <linux/mutex.h>
50#include <linux/sched.h>
51#include <linux/mm.h>
52#include <linux/poll.h>
53#include <linux/wait.h>
54#include <linux/pci.h>
55#include <linux/pm_runtime.h>
56#include <linux/slab.h>
57#include <linux/ioctl.h>
58#include <asm/current.h>
59#include <linux/ioport.h>
60#include <linux/io.h>
61#include <linux/interrupt.h>
62#include <linux/pagemap.h>
63#include <asm/cacheflush.h>
64#include <linux/sched.h>
65#include <linux/delay.h>
66#include <linux/jiffies.h>
67#include <linux/async.h>
68#include <linux/crypto.h>
69#include <crypto/internal/hash.h>
70#include <crypto/scatterwalk.h>
71#include <crypto/sha.h>
72#include <crypto/md5.h>
73#include <crypto/aes.h>
74#include <crypto/des.h>
75#include <crypto/hash.h>
76
77#include "sep_driver_hw_defs.h"
78#include "sep_driver_config.h"
79#include "sep_driver_api.h"
80#include "sep_dev.h"
81#include "sep_crypto.h"
82
83#define CREATE_TRACE_POINTS
84#include "sep_trace_events.h"
85
86/*
87 * Let's not spend cycles iterating over message
88 * area contents if debugging not enabled
89 */
90#ifdef DEBUG
91#define sep_dump_message(sep) _sep_dump_message(sep)
92#else
93#define sep_dump_message(sep)
94#endif
95
96/**
97 * Currenlty, there is only one SEP device per platform;
98 * In event platforms in the future have more than one SEP
99 * device, this will be a linked list
100 */
101
102struct sep_device *sep_dev;
103
104/**
105 * sep_queue_status_remove - Removes transaction from status queue
106 * @sep: SEP device
107 * @sep_queue_info: pointer to status queue
108 *
109 * This function will removes information about transaction from the queue.
110 */
111void sep_queue_status_remove(struct sep_device *sep,
112 struct sep_queue_info **queue_elem)
113{
114 unsigned long lck_flags;
115
116 dev_dbg(&sep->pdev->dev, "[PID%d] sep_queue_status_remove\n",
117 current->pid);
118
119 if (!queue_elem || !(*queue_elem)) {
120 dev_dbg(&sep->pdev->dev, "PID%d %s null\n",
121 current->pid, __func__);
122 return;
123 }
124
125 spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
126 list_del(&(*queue_elem)->list);
127 sep->sep_queue_num--;
128 spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
129
130 kfree(*queue_elem);
131 *queue_elem = NULL;
132
133 dev_dbg(&sep->pdev->dev, "[PID%d] sep_queue_status_remove return\n",
134 current->pid);
135 return;
136}
137
138/**
139 * sep_queue_status_add - Adds transaction to status queue
140 * @sep: SEP device
141 * @opcode: transaction opcode
142 * @size: input data size
143 * @pid: pid of current process
144 * @name: current process name
145 * @name_len: length of name (current process)
146 *
147 * This function adds information about about transaction started to the status
148 * queue.
149 */
150struct sep_queue_info *sep_queue_status_add(
151 struct sep_device *sep,
152 u32 opcode,
153 u32 size,
154 u32 pid,
155 u8 *name, size_t name_len)
156{
157 unsigned long lck_flags;
158 struct sep_queue_info *my_elem = NULL;
159
160 my_elem = kzalloc(sizeof(struct sep_queue_info), GFP_KERNEL);
161
162 if (!my_elem)
163 return NULL;
164
165 dev_dbg(&sep->pdev->dev, "[PID%d] kzalloc ok\n", current->pid);
166
167 my_elem->data.opcode = opcode;
168 my_elem->data.size = size;
169 my_elem->data.pid = pid;
170
171 if (name_len > TASK_COMM_LEN)
172 name_len = TASK_COMM_LEN;
173
174 memcpy(&my_elem->data.name, name, name_len);
175
176 spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
177
178 list_add_tail(&my_elem->list, &sep->sep_queue_status);
179 sep->sep_queue_num++;
180
181 spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
182
183 return my_elem;
184}
185
186/**
187 * sep_allocate_dmatables_region - Allocates buf for the MLLI/DMA tables
188 * @sep: SEP device
189 * @dmatables_region: Destination pointer for the buffer
190 * @dma_ctx: DMA context for the transaction
191 * @table_count: Number of MLLI/DMA tables to create
192 * The buffer created will not work as-is for DMA operations,
193 * it needs to be copied over to the appropriate place in the
194 * shared area.
195 */
196static int sep_allocate_dmatables_region(struct sep_device *sep,
197 void **dmatables_region,
198 struct sep_dma_context *dma_ctx,
199 const u32 table_count)
200{
ab8ef351
MA
201 const size_t new_len =
202 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES - 1;
203
ff3d9c3c
MA
204 void *tmp_region = NULL;
205
206 dev_dbg(&sep->pdev->dev, "[PID%d] dma_ctx = 0x%p\n",
207 current->pid, dma_ctx);
208 dev_dbg(&sep->pdev->dev, "[PID%d] dmatables_region = 0x%p\n",
209 current->pid, dmatables_region);
210
211 if (!dma_ctx || !dmatables_region) {
212 dev_warn(&sep->pdev->dev,
213 "[PID%d] dma context/region uninitialized\n",
214 current->pid);
215 return -EINVAL;
216 }
217
5f356a67 218 dev_dbg(&sep->pdev->dev, "[PID%d] newlen = 0x%08zX\n",
ff3d9c3c
MA
219 current->pid, new_len);
220 dev_dbg(&sep->pdev->dev, "[PID%d] oldlen = 0x%08X\n", current->pid,
221 dma_ctx->dmatables_len);
222 tmp_region = kzalloc(new_len + dma_ctx->dmatables_len, GFP_KERNEL);
223 if (!tmp_region) {
224 dev_warn(&sep->pdev->dev,
225 "[PID%d] no mem for dma tables region\n",
226 current->pid);
227 return -ENOMEM;
228 }
229
230 /* Were there any previous tables that need to be preserved ? */
231 if (*dmatables_region) {
232 memcpy(tmp_region, *dmatables_region, dma_ctx->dmatables_len);
233 kfree(*dmatables_region);
ab8ef351 234 *dmatables_region = NULL;
ff3d9c3c
MA
235 }
236
237 *dmatables_region = tmp_region;
238
239 dma_ctx->dmatables_len += new_len;
240
241 return 0;
242}
243
244/**
245 * sep_wait_transaction - Used for synchronizing transactions
246 * @sep: SEP device
247 */
248int sep_wait_transaction(struct sep_device *sep)
249{
250 int error = 0;
251 DEFINE_WAIT(wait);
252
253 if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
254 &sep->in_use_flags)) {
255 dev_dbg(&sep->pdev->dev,
256 "[PID%d] no transactions, returning\n",
257 current->pid);
258 goto end_function_setpid;
259 }
260
261 /*
262 * Looping needed even for exclusive waitq entries
263 * due to process wakeup latencies, previous process
264 * might have already created another transaction.
265 */
266 for (;;) {
267 /*
268 * Exclusive waitq entry, so that only one process is
269 * woken up from the queue at a time.
270 */
271 prepare_to_wait_exclusive(&sep->event_transactions,
272 &wait,
273 TASK_INTERRUPTIBLE);
274 if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
275 &sep->in_use_flags)) {
276 dev_dbg(&sep->pdev->dev,
277 "[PID%d] no transactions, breaking\n",
278 current->pid);
279 break;
280 }
281 dev_dbg(&sep->pdev->dev,
282 "[PID%d] transactions ongoing, sleeping\n",
283 current->pid);
284 schedule();
285 dev_dbg(&sep->pdev->dev, "[PID%d] woken up\n", current->pid);
286
287 if (signal_pending(current)) {
288 dev_dbg(&sep->pdev->dev, "[PID%d] received signal\n",
289 current->pid);
290 error = -EINTR;
291 goto end_function;
292 }
293 }
294end_function_setpid:
295 /*
296 * The pid_doing_transaction indicates that this process
297 * now owns the facilities to performa a transaction with
298 * the SEP. While this process is performing a transaction,
299 * no other process who has the SEP device open can perform
300 * any transactions. This method allows more than one process
301 * to have the device open at any given time, which provides
302 * finer granularity for device utilization by multiple
303 * processes.
304 */
305 /* Only one process is able to progress here at a time */
306 sep->pid_doing_transaction = current->pid;
307
308end_function:
309 finish_wait(&sep->event_transactions, &wait);
310
311 return error;
312}
313
314/**
315 * sep_check_transaction_owner - Checks if current process owns transaction
316 * @sep: SEP device
317 */
318static inline int sep_check_transaction_owner(struct sep_device *sep)
319{
320 dev_dbg(&sep->pdev->dev, "[PID%d] transaction pid = %d\n",
321 current->pid,
322 sep->pid_doing_transaction);
323
324 if ((sep->pid_doing_transaction == 0) ||
325 (current->pid != sep->pid_doing_transaction)) {
326 return -EACCES;
327 }
328
329 /* We own the transaction */
330 return 0;
331}
332
333#ifdef DEBUG
334
335/**
336 * sep_dump_message - dump the message that is pending
337 * @sep: SEP device
338 * This will only print dump if DEBUG is set; it does
339 * follow kernel debug print enabling
340 */
341static void _sep_dump_message(struct sep_device *sep)
342{
343 int count;
344
345 u32 *p = sep->shared_addr;
346
ab8ef351 347 for (count = 0; count < 10 * 4; count += 4)
ff3d9c3c
MA
348 dev_dbg(&sep->pdev->dev,
349 "[PID%d] Word %d of the message is %x\n",
350 current->pid, count/4, *p++);
351}
ab8ef351 352
ff3d9c3c
MA
353#endif
354
355/**
356 * sep_map_and_alloc_shared_area -allocate shared block
357 * @sep: security processor
358 * @size: size of shared area
359 */
360static int sep_map_and_alloc_shared_area(struct sep_device *sep)
361{
362 sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev,
363 sep->shared_size,
364 &sep->shared_bus, GFP_KERNEL);
365
366 if (!sep->shared_addr) {
367 dev_dbg(&sep->pdev->dev,
368 "[PID%d] shared memory dma_alloc_coherent failed\n",
369 current->pid);
370 return -ENOMEM;
371 }
372 dev_dbg(&sep->pdev->dev,
373 "[PID%d] shared_addr %zx bytes @%p (bus %llx)\n",
374 current->pid,
375 sep->shared_size, sep->shared_addr,
376 (unsigned long long)sep->shared_bus);
377 return 0;
378}
379
380/**
381 * sep_unmap_and_free_shared_area - free shared block
382 * @sep: security processor
383 */
384static void sep_unmap_and_free_shared_area(struct sep_device *sep)
385{
386 dma_free_coherent(&sep->pdev->dev, sep->shared_size,
387 sep->shared_addr, sep->shared_bus);
388}
389
ab8ef351
MA
390#ifdef DEBUG
391
ff3d9c3c
MA
392/**
393 * sep_shared_bus_to_virt - convert bus/virt addresses
394 * @sep: pointer to struct sep_device
395 * @bus_address: address to convert
396 *
397 * Returns virtual address inside the shared area according
398 * to the bus address.
399 */
400static void *sep_shared_bus_to_virt(struct sep_device *sep,
401 dma_addr_t bus_address)
402{
403 return sep->shared_addr + (bus_address - sep->shared_bus);
404}
405
ab8ef351
MA
406#endif
407
ff3d9c3c
MA
408/**
409 * sep_open - device open method
410 * @inode: inode of SEP device
411 * @filp: file handle to SEP device
412 *
413 * Open method for the SEP device. Called when userspace opens
414 * the SEP device node.
415 *
416 * Returns zero on success otherwise an error code.
417 */
418static int sep_open(struct inode *inode, struct file *filp)
419{
420 struct sep_device *sep;
421 struct sep_private_data *priv;
422
423 dev_dbg(&sep_dev->pdev->dev, "[PID%d] open\n", current->pid);
424
425 if (filp->f_flags & O_NONBLOCK)
426 return -ENOTSUPP;
427
428 /*
429 * Get the SEP device structure and use it for the
430 * private_data field in filp for other methods
431 */
432
433 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
434 if (!priv)
435 return -ENOMEM;
436
437 sep = sep_dev;
438 priv->device = sep;
439 filp->private_data = priv;
440
441 dev_dbg(&sep_dev->pdev->dev, "[PID%d] priv is 0x%p\n",
442 current->pid, priv);
443
444 /* Anyone can open; locking takes place at transaction level */
445 return 0;
446}
447
448/**
449 * sep_free_dma_table_data_handler - free DMA table
450 * @sep: pointere to struct sep_device
451 * @dma_ctx: dma context
452 *
453 * Handles the request to free DMA table for synchronic actions
454 */
455int sep_free_dma_table_data_handler(struct sep_device *sep,
456 struct sep_dma_context **dma_ctx)
457{
458 int count;
459 int dcb_counter;
460 /* Pointer to the current dma_resource struct */
461 struct sep_dma_resource *dma;
462
463 dev_dbg(&sep->pdev->dev,
464 "[PID%d] sep_free_dma_table_data_handler\n",
465 current->pid);
466
467 if (!dma_ctx || !(*dma_ctx)) {
468 /* No context or context already freed */
469 dev_dbg(&sep->pdev->dev,
470 "[PID%d] no DMA context or context already freed\n",
471 current->pid);
472
473 return 0;
474 }
475
476 dev_dbg(&sep->pdev->dev, "[PID%d] (*dma_ctx)->nr_dcb_creat 0x%x\n",
477 current->pid,
478 (*dma_ctx)->nr_dcb_creat);
479
480 for (dcb_counter = 0;
481 dcb_counter < (*dma_ctx)->nr_dcb_creat; dcb_counter++) {
482 dma = &(*dma_ctx)->dma_res_arr[dcb_counter];
483
484 /* Unmap and free input map array */
485 if (dma->in_map_array) {
486 for (count = 0; count < dma->in_num_pages; count++) {
487 dma_unmap_page(&sep->pdev->dev,
488 dma->in_map_array[count].dma_addr,
489 dma->in_map_array[count].size,
490 DMA_TO_DEVICE);
491 }
492 kfree(dma->in_map_array);
493 }
494
aca58ec8
MA
495 /**
496 * Output is handled different. If
497 * this was a secure dma into restricted memory,
498 * then we skip this step altogether as restricted
499 * memory is not available to the o/s at all.
500 */
501 if (((*dma_ctx)->secure_dma == false) &&
502 (dma->out_map_array)) {
503
ff3d9c3c
MA
504 for (count = 0; count < dma->out_num_pages; count++) {
505 dma_unmap_page(&sep->pdev->dev,
506 dma->out_map_array[count].dma_addr,
507 dma->out_map_array[count].size,
508 DMA_FROM_DEVICE);
509 }
510 kfree(dma->out_map_array);
511 }
512
513 /* Free page cache for output */
514 if (dma->in_page_array) {
515 for (count = 0; count < dma->in_num_pages; count++) {
516 flush_dcache_page(dma->in_page_array[count]);
517 page_cache_release(dma->in_page_array[count]);
518 }
519 kfree(dma->in_page_array);
520 }
521
aca58ec8
MA
522 /* Again, we do this only for non secure dma */
523 if (((*dma_ctx)->secure_dma == false) &&
524 (dma->out_page_array)) {
525
ff3d9c3c
MA
526 for (count = 0; count < dma->out_num_pages; count++) {
527 if (!PageReserved(dma->out_page_array[count]))
528
529 SetPageDirty(dma->
530 out_page_array[count]);
531
532 flush_dcache_page(dma->out_page_array[count]);
533 page_cache_release(dma->out_page_array[count]);
534 }
535 kfree(dma->out_page_array);
536 }
537
538 /**
539 * Note that here we use in_map_num_entries because we
540 * don't have a page array; the page array is generated
541 * only in the lock_user_pages, which is not called
542 * for kernel crypto, which is what the sg (scatter gather
543 * is used for exclusively
544 */
545 if (dma->src_sg) {
546 dma_unmap_sg(&sep->pdev->dev, dma->src_sg,
547 dma->in_map_num_entries, DMA_TO_DEVICE);
548 dma->src_sg = NULL;
549 }
550
551 if (dma->dst_sg) {
552 dma_unmap_sg(&sep->pdev->dev, dma->dst_sg,
553 dma->in_map_num_entries, DMA_FROM_DEVICE);
554 dma->dst_sg = NULL;
555 }
556
557 /* Reset all the values */
558 dma->in_page_array = NULL;
559 dma->out_page_array = NULL;
560 dma->in_num_pages = 0;
561 dma->out_num_pages = 0;
562 dma->in_map_array = NULL;
563 dma->out_map_array = NULL;
564 dma->in_map_num_entries = 0;
565 dma->out_map_num_entries = 0;
566 }
567
568 (*dma_ctx)->nr_dcb_creat = 0;
569 (*dma_ctx)->num_lli_tables_created = 0;
570
571 kfree(*dma_ctx);
572 *dma_ctx = NULL;
573
574 dev_dbg(&sep->pdev->dev,
575 "[PID%d] sep_free_dma_table_data_handler end\n",
576 current->pid);
577
578 return 0;
579}
580
581/**
582 * sep_end_transaction_handler - end transaction
583 * @sep: pointer to struct sep_device
584 * @dma_ctx: DMA context
585 * @call_status: Call status
586 *
587 * This API handles the end transaction request.
588 */
589static int sep_end_transaction_handler(struct sep_device *sep,
590 struct sep_dma_context **dma_ctx,
591 struct sep_call_status *call_status,
592 struct sep_queue_info **my_queue_elem)
593{
594 dev_dbg(&sep->pdev->dev, "[PID%d] ending transaction\n", current->pid);
595
596 /*
597 * Extraneous transaction clearing would mess up PM
598 * device usage counters and SEP would get suspended
599 * just before we send a command to SEP in the next
600 * transaction
601 * */
602 if (sep_check_transaction_owner(sep)) {
603 dev_dbg(&sep->pdev->dev, "[PID%d] not transaction owner\n",
604 current->pid);
605 return 0;
606 }
607
608 /* Update queue status */
609 sep_queue_status_remove(sep, my_queue_elem);
610
611 /* Check that all the DMA resources were freed */
612 if (dma_ctx)
613 sep_free_dma_table_data_handler(sep, dma_ctx);
614
615 /* Reset call status for next transaction */
616 if (call_status)
617 call_status->status = 0;
618
619 /* Clear the message area to avoid next transaction reading
620 * sensitive results from previous transaction */
621 memset(sep->shared_addr, 0,
622 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
623
624 /* start suspend delay */
625#ifdef SEP_ENABLE_RUNTIME_PM
626 if (sep->in_use) {
627 sep->in_use = 0;
628 pm_runtime_mark_last_busy(&sep->pdev->dev);
629 pm_runtime_put_autosuspend(&sep->pdev->dev);
630 }
631#endif
632
633 clear_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags);
634 sep->pid_doing_transaction = 0;
635
636 /* Now it's safe for next process to proceed */
637 dev_dbg(&sep->pdev->dev, "[PID%d] waking up next transaction\n",
638 current->pid);
639 clear_bit(SEP_TRANSACTION_STARTED_LOCK_BIT, &sep->in_use_flags);
640 wake_up(&sep->event_transactions);
641
642 return 0;
643}
644
645
646/**
647 * sep_release - close a SEP device
648 * @inode: inode of SEP device
649 * @filp: file handle being closed
650 *
651 * Called on the final close of a SEP device.
652 */
653static int sep_release(struct inode *inode, struct file *filp)
654{
655 struct sep_private_data * const private_data = filp->private_data;
656 struct sep_call_status *call_status = &private_data->call_status;
657 struct sep_device *sep = private_data->device;
658 struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
659 struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
660
661 dev_dbg(&sep->pdev->dev, "[PID%d] release\n", current->pid);
662
663 sep_end_transaction_handler(sep, dma_ctx, call_status,
664 my_queue_elem);
665
666 kfree(filp->private_data);
667
668 return 0;
669}
670
671/**
672 * sep_mmap - maps the shared area to user space
673 * @filp: pointer to struct file
674 * @vma: pointer to vm_area_struct
675 *
676 * Called on an mmap of our space via the normal SEP device
677 */
678static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
679{
680 struct sep_private_data * const private_data = filp->private_data;
681 struct sep_call_status *call_status = &private_data->call_status;
682 struct sep_device *sep = private_data->device;
683 struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
684 dma_addr_t bus_addr;
685 unsigned long error = 0;
686
687 dev_dbg(&sep->pdev->dev, "[PID%d] sep_mmap\n", current->pid);
688
689 /* Set the transaction busy (own the device) */
690 /*
691 * Problem for multithreaded applications is that here we're
692 * possibly going to sleep while holding a write lock on
693 * current->mm->mmap_sem, which will cause deadlock for ongoing
694 * transaction trying to create DMA tables
695 */
696 error = sep_wait_transaction(sep);
697 if (error)
698 /* Interrupted by signal, don't clear transaction */
699 goto end_function;
700
701 /* Clear the message area to avoid next transaction reading
702 * sensitive results from previous transaction */
703 memset(sep->shared_addr, 0,
704 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
705
706 /*
707 * Check that the size of the mapped range is as the size of the message
708 * shared area
709 */
710 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
711 error = -EINVAL;
712 goto end_function_with_error;
713 }
714
715 dev_dbg(&sep->pdev->dev, "[PID%d] shared_addr is %p\n",
716 current->pid, sep->shared_addr);
717
718 /* Get bus address */
719 bus_addr = sep->shared_bus;
720
721 if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT,
722 vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
723 dev_dbg(&sep->pdev->dev, "[PID%d] remap_page_range failed\n",
724 current->pid);
725 error = -EAGAIN;
726 goto end_function_with_error;
727 }
728
729 /* Update call status */
730 set_bit(SEP_LEGACY_MMAP_DONE_OFFSET, &call_status->status);
731
732 goto end_function;
733
734end_function_with_error:
735 /* Clear our transaction */
736 sep_end_transaction_handler(sep, NULL, call_status,
737 my_queue_elem);
738
739end_function:
740 return error;
741}
742
743/**
744 * sep_poll - poll handler
745 * @filp: pointer to struct file
746 * @wait: pointer to poll_table
747 *
748 * Called by the OS when the kernel is asked to do a poll on
749 * a SEP file handle.
750 */
751static unsigned int sep_poll(struct file *filp, poll_table *wait)
752{
753 struct sep_private_data * const private_data = filp->private_data;
754 struct sep_call_status *call_status = &private_data->call_status;
755 struct sep_device *sep = private_data->device;
756 u32 mask = 0;
757 u32 retval = 0;
758 u32 retval2 = 0;
759 unsigned long lock_irq_flag;
760
761 /* Am I the process that owns the transaction? */
762 if (sep_check_transaction_owner(sep)) {
763 dev_dbg(&sep->pdev->dev, "[PID%d] poll pid not owner\n",
764 current->pid);
765 mask = POLLERR;
766 goto end_function;
767 }
768
769 /* Check if send command or send_reply were activated previously */
770 if (0 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
771 &call_status->status)) {
772 dev_warn(&sep->pdev->dev, "[PID%d] sendmsg not called\n",
773 current->pid);
774 mask = POLLERR;
775 goto end_function;
776 }
777
778
779 /* Add the event to the polling wait table */
780 dev_dbg(&sep->pdev->dev, "[PID%d] poll: calling wait sep_event\n",
781 current->pid);
782
783 poll_wait(filp, &sep->event_interrupt, wait);
784
785 dev_dbg(&sep->pdev->dev,
786 "[PID%d] poll: send_ct is %lx reply ct is %lx\n",
787 current->pid, sep->send_ct, sep->reply_ct);
788
bb75f7dc 789 /* Check if error occurred during poll */
ff3d9c3c
MA
790 retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
791 if ((retval2 != 0x0) && (retval2 != 0x8)) {
792 dev_dbg(&sep->pdev->dev, "[PID%d] poll; poll error %x\n",
793 current->pid, retval2);
794 mask |= POLLERR;
795 goto end_function;
796 }
797
798 spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
799
800 if (sep->send_ct == sep->reply_ct) {
801 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
802 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
803 dev_dbg(&sep->pdev->dev,
804 "[PID%d] poll: data ready check (GPR2) %x\n",
805 current->pid, retval);
806
807 /* Check if printf request */
808 if ((retval >> 30) & 0x1) {
809 dev_dbg(&sep->pdev->dev,
810 "[PID%d] poll: SEP printf request\n",
811 current->pid);
812 goto end_function;
813 }
814
815 /* Check if the this is SEP reply or request */
816 if (retval >> 31) {
817 dev_dbg(&sep->pdev->dev,
818 "[PID%d] poll: SEP request\n",
819 current->pid);
820 } else {
821 dev_dbg(&sep->pdev->dev,
822 "[PID%d] poll: normal return\n",
823 current->pid);
824 sep_dump_message(sep);
825 dev_dbg(&sep->pdev->dev,
826 "[PID%d] poll; SEP reply POLLIN|POLLRDNORM\n",
827 current->pid);
828 mask |= POLLIN | POLLRDNORM;
829 }
830 set_bit(SEP_LEGACY_POLL_DONE_OFFSET, &call_status->status);
831 } else {
832 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
833 dev_dbg(&sep->pdev->dev,
834 "[PID%d] poll; no reply; returning mask of 0\n",
835 current->pid);
836 mask = 0;
837 }
838
839end_function:
840 return mask;
841}
842
843/**
844 * sep_time_address - address in SEP memory of time
845 * @sep: SEP device we want the address from
846 *
847 * Return the address of the two dwords in memory used for time
848 * setting.
849 */
850static u32 *sep_time_address(struct sep_device *sep)
851{
852 return sep->shared_addr +
853 SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
854}
855
856/**
857 * sep_set_time - set the SEP time
858 * @sep: the SEP we are setting the time for
859 *
860 * Calculates time and sets it at the predefined address.
861 * Called with the SEP mutex held.
862 */
863static unsigned long sep_set_time(struct sep_device *sep)
864{
865 struct timeval time;
866 u32 *time_addr; /* Address of time as seen by the kernel */
867
868
869 do_gettimeofday(&time);
870
871 /* Set value in the SYSTEM MEMORY offset */
872 time_addr = sep_time_address(sep);
873
874 time_addr[0] = SEP_TIME_VAL_TOKEN;
875 time_addr[1] = time.tv_sec;
876
877 dev_dbg(&sep->pdev->dev, "[PID%d] time.tv_sec is %lu\n",
878 current->pid, time.tv_sec);
879 dev_dbg(&sep->pdev->dev, "[PID%d] time_addr is %p\n",
880 current->pid, time_addr);
881 dev_dbg(&sep->pdev->dev, "[PID%d] sep->shared_addr is %p\n",
882 current->pid, sep->shared_addr);
883
884 return time.tv_sec;
885}
886
887/**
888 * sep_send_command_handler - kick off a command
889 * @sep: SEP being signalled
890 *
891 * This function raises interrupt to SEP that signals that is has a new
892 * command from the host
893 *
894 * Note that this function does fall under the ioctl lock
895 */
896int sep_send_command_handler(struct sep_device *sep)
897{
898 unsigned long lock_irq_flag;
899 u32 *msg_pool;
900 int error = 0;
901
902 /* Basic sanity check; set msg pool to start of shared area */
903 msg_pool = (u32 *)sep->shared_addr;
904 msg_pool += 2;
905
906 /* Look for start msg token */
907 if (*msg_pool != SEP_START_MSG_TOKEN) {
908 dev_warn(&sep->pdev->dev, "start message token not present\n");
909 error = -EPROTO;
910 goto end_function;
911 }
912
913 /* Do we have a reasonable size? */
914 msg_pool += 1;
915 if ((*msg_pool < 2) ||
916 (*msg_pool > SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES)) {
917
918 dev_warn(&sep->pdev->dev, "invalid message size\n");
919 error = -EPROTO;
920 goto end_function;
921 }
922
923 /* Does the command look reasonable? */
924 msg_pool += 1;
925 if (*msg_pool < 2) {
926 dev_warn(&sep->pdev->dev, "invalid message opcode\n");
927 error = -EPROTO;
928 goto end_function;
929 }
930
931#if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
932 dev_dbg(&sep->pdev->dev, "[PID%d] before pm sync status 0x%X\n",
933 current->pid,
934 sep->pdev->dev.power.runtime_status);
935 sep->in_use = 1; /* device is about to be used */
936 pm_runtime_get_sync(&sep->pdev->dev);
937#endif
938
939 if (test_and_set_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags)) {
940 error = -EPROTO;
941 goto end_function;
942 }
943 sep->in_use = 1; /* device is about to be used */
944 sep_set_time(sep);
945
946 sep_dump_message(sep);
947
948 /* Update counter */
949 spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
950 sep->send_ct++;
951 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
952
953 dev_dbg(&sep->pdev->dev,
954 "[PID%d] sep_send_command_handler send_ct %lx reply_ct %lx\n",
955 current->pid, sep->send_ct, sep->reply_ct);
956
957 /* Send interrupt to SEP */
958 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
959
960end_function:
961 return error;
962}
963
964/**
965 * sep_crypto_dma -
966 * @sep: pointer to struct sep_device
967 * @sg: pointer to struct scatterlist
968 * @direction:
969 * @dma_maps: pointer to place a pointer to array of dma maps
970 * This is filled in; anything previous there will be lost
971 * The structure for dma maps is sep_dma_map
972 * @returns number of dma maps on success; negative on error
973 *
974 * This creates the dma table from the scatterlist
975 * It is used only for kernel crypto as it works with scatterlists
976 * representation of data buffers
977 *
978 */
979static int sep_crypto_dma(
980 struct sep_device *sep,
981 struct scatterlist *sg,
982 struct sep_dma_map **dma_maps,
983 enum dma_data_direction direction)
984{
985 struct scatterlist *temp_sg;
986
987 u32 count_segment;
988 u32 count_mapped;
989 struct sep_dma_map *sep_dma;
990 int ct1;
991
992 if (sg->length == 0)
993 return 0;
994
995 /* Count the segments */
996 temp_sg = sg;
997 count_segment = 0;
998 while (temp_sg) {
999 count_segment += 1;
1000 temp_sg = scatterwalk_sg_next(temp_sg);
1001 }
1002 dev_dbg(&sep->pdev->dev,
1003 "There are (hex) %x segments in sg\n", count_segment);
1004
1005 /* DMA map segments */
1006 count_mapped = dma_map_sg(&sep->pdev->dev, sg,
1007 count_segment, direction);
1008
1009 dev_dbg(&sep->pdev->dev,
1010 "There are (hex) %x maps in sg\n", count_mapped);
1011
1012 if (count_mapped == 0) {
1013 dev_dbg(&sep->pdev->dev, "Cannot dma_map_sg\n");
1014 return -ENOMEM;
1015 }
1016
1017 sep_dma = kmalloc(sizeof(struct sep_dma_map) *
1018 count_mapped, GFP_ATOMIC);
1019
1020 if (sep_dma == NULL) {
1021 dev_dbg(&sep->pdev->dev, "Cannot allocate dma_maps\n");
1022 return -ENOMEM;
1023 }
1024
1025 for_each_sg(sg, temp_sg, count_mapped, ct1) {
1026 sep_dma[ct1].dma_addr = sg_dma_address(temp_sg);
1027 sep_dma[ct1].size = sg_dma_len(temp_sg);
1028 dev_dbg(&sep->pdev->dev, "(all hex) map %x dma %lx len %lx\n",
1029 ct1, (unsigned long)sep_dma[ct1].dma_addr,
1030 (unsigned long)sep_dma[ct1].size);
1031 }
1032
1033 *dma_maps = sep_dma;
1034 return count_mapped;
1035
1036}
1037
1038/**
1039 * sep_crypto_lli -
1040 * @sep: pointer to struct sep_device
1041 * @sg: pointer to struct scatterlist
1042 * @data_size: total data size
1043 * @direction:
1044 * @dma_maps: pointer to place a pointer to array of dma maps
1045 * This is filled in; anything previous there will be lost
1046 * The structure for dma maps is sep_dma_map
1047 * @lli_maps: pointer to place a pointer to array of lli maps
1048 * This is filled in; anything previous there will be lost
1049 * The structure for dma maps is sep_dma_map
1050 * @returns number of dma maps on success; negative on error
1051 *
1052 * This creates the LLI table from the scatterlist
1053 * It is only used for kernel crypto as it works exclusively
1054 * with scatterlists (struct scatterlist) representation of
1055 * data buffers
1056 */
1057static int sep_crypto_lli(
1058 struct sep_device *sep,
1059 struct scatterlist *sg,
1060 struct sep_dma_map **maps,
1061 struct sep_lli_entry **llis,
1062 u32 data_size,
1063 enum dma_data_direction direction)
1064{
1065
1066 int ct1;
1067 struct sep_lli_entry *sep_lli;
1068 struct sep_dma_map *sep_map;
1069
1070 int nbr_ents;
1071
1072 nbr_ents = sep_crypto_dma(sep, sg, maps, direction);
1073 if (nbr_ents <= 0) {
1074 dev_dbg(&sep->pdev->dev, "crypto_dma failed %x\n",
1075 nbr_ents);
1076 return nbr_ents;
1077 }
1078
1079 sep_map = *maps;
1080
1081 sep_lli = kmalloc(sizeof(struct sep_lli_entry) * nbr_ents, GFP_ATOMIC);
1082
1083 if (sep_lli == NULL) {
1084 dev_dbg(&sep->pdev->dev, "Cannot allocate lli_maps\n");
1085
1086 kfree(*maps);
1087 *maps = NULL;
1088 return -ENOMEM;
1089 }
1090
1091 for (ct1 = 0; ct1 < nbr_ents; ct1 += 1) {
1092 sep_lli[ct1].bus_address = (u32)sep_map[ct1].dma_addr;
1093
1094 /* Maximum for page is total data size */
1095 if (sep_map[ct1].size > data_size)
1096 sep_map[ct1].size = data_size;
1097
1098 sep_lli[ct1].block_size = (u32)sep_map[ct1].size;
1099 }
1100
1101 *llis = sep_lli;
1102 return nbr_ents;
1103}
1104
1105/**
1106 * sep_lock_kernel_pages - map kernel pages for DMA
1107 * @sep: pointer to struct sep_device
1108 * @kernel_virt_addr: address of data buffer in kernel
1109 * @data_size: size of data
1110 * @lli_array_ptr: lli array
1111 * @in_out_flag: input into device or output from device
1112 *
1113 * This function locks all the physical pages of the kernel virtual buffer
1114 * and construct a basic lli array, where each entry holds the physical
1115 * page address and the size that application data holds in this page
1116 * This function is used only during kernel crypto mod calls from within
1117 * the kernel (when ioctl is not used)
1118 *
1119 * This is used only for kernel crypto. Kernel pages
1120 * are handled differently as they are done via
1121 * scatter gather lists (struct scatterlist)
1122 */
1123static int sep_lock_kernel_pages(struct sep_device *sep,
1124 unsigned long kernel_virt_addr,
1125 u32 data_size,
1126 struct sep_lli_entry **lli_array_ptr,
1127 int in_out_flag,
1128 struct sep_dma_context *dma_ctx)
1129
1130{
1131 u32 num_pages;
1132 struct scatterlist *sg;
1133
1134 /* Array of lli */
1135 struct sep_lli_entry *lli_array;
1136 /* Map array */
1137 struct sep_dma_map *map_array;
1138
1139 enum dma_data_direction direction;
1140
1141 lli_array = NULL;
1142 map_array = NULL;
1143
1144 if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1145 direction = DMA_TO_DEVICE;
1146 sg = dma_ctx->src_sg;
1147 } else {
1148 direction = DMA_FROM_DEVICE;
1149 sg = dma_ctx->dst_sg;
1150 }
1151
1152 num_pages = sep_crypto_lli(sep, sg, &map_array, &lli_array,
1153 data_size, direction);
1154
1155 if (num_pages <= 0) {
1156 dev_dbg(&sep->pdev->dev, "sep_crypto_lli returned error %x\n",
1157 num_pages);
1158 return -ENOMEM;
1159 }
1160
1161 /* Put mapped kernel sg into kernel resource array */
1162
bb75f7dc 1163 /* Set output params according to the in_out flag */
ff3d9c3c
MA
1164 if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1165 *lli_array_ptr = lli_array;
1166 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages =
1167 num_pages;
1168 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array =
1169 NULL;
1170 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array =
1171 map_array;
1172 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_num_entries =
1173 num_pages;
1174 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].src_sg =
1175 dma_ctx->src_sg;
1176 } else {
1177 *lli_array_ptr = lli_array;
1178 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages =
1179 num_pages;
1180 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array =
1181 NULL;
1182 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array =
1183 map_array;
1184 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
1185 out_map_num_entries = num_pages;
1186 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].dst_sg =
1187 dma_ctx->dst_sg;
1188 }
1189
1190 return 0;
1191}
1192
1193/**
1194 * sep_lock_user_pages - lock and map user pages for DMA
1195 * @sep: pointer to struct sep_device
1196 * @app_virt_addr: user memory data buffer
1197 * @data_size: size of data buffer
1198 * @lli_array_ptr: lli array
1199 * @in_out_flag: input or output to device
1200 *
1201 * This function locks all the physical pages of the application
1202 * virtual buffer and construct a basic lli array, where each entry
1203 * holds the physical page address and the size that application
1204 * data holds in this physical pages
1205 */
1206static int sep_lock_user_pages(struct sep_device *sep,
1207 u32 app_virt_addr,
1208 u32 data_size,
1209 struct sep_lli_entry **lli_array_ptr,
1210 int in_out_flag,
1211 struct sep_dma_context *dma_ctx)
1212
1213{
1214 int error = 0;
1215 u32 count;
1216 int result;
1217 /* The the page of the end address of the user space buffer */
1218 u32 end_page;
1219 /* The page of the start address of the user space buffer */
1220 u32 start_page;
1221 /* The range in pages */
1222 u32 num_pages;
1223 /* Array of pointers to page */
1224 struct page **page_array;
1225 /* Array of lli */
1226 struct sep_lli_entry *lli_array;
1227 /* Map array */
1228 struct sep_dma_map *map_array;
1229
1230 /* Set start and end pages and num pages */
1231 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1232 start_page = app_virt_addr >> PAGE_SHIFT;
1233 num_pages = end_page - start_page + 1;
1234
1235 dev_dbg(&sep->pdev->dev,
1236 "[PID%d] lock user pages app_virt_addr is %x\n",
1237 current->pid, app_virt_addr);
1238
1239 dev_dbg(&sep->pdev->dev, "[PID%d] data_size is (hex) %x\n",
1240 current->pid, data_size);
1241 dev_dbg(&sep->pdev->dev, "[PID%d] start_page is (hex) %x\n",
1242 current->pid, start_page);
1243 dev_dbg(&sep->pdev->dev, "[PID%d] end_page is (hex) %x\n",
1244 current->pid, end_page);
1245 dev_dbg(&sep->pdev->dev, "[PID%d] num_pages is (hex) %x\n",
1246 current->pid, num_pages);
1247
1248 /* Allocate array of pages structure pointers */
1249 page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
1250 if (!page_array) {
1251 error = -ENOMEM;
1252 goto end_function;
1253 }
1254 map_array = kmalloc(sizeof(struct sep_dma_map) * num_pages, GFP_ATOMIC);
1255 if (!map_array) {
1256 dev_warn(&sep->pdev->dev,
1257 "[PID%d] kmalloc for map_array failed\n",
1258 current->pid);
1259 error = -ENOMEM;
1260 goto end_function_with_error1;
1261 }
1262
1263 lli_array = kmalloc(sizeof(struct sep_lli_entry) * num_pages,
1264 GFP_ATOMIC);
1265
1266 if (!lli_array) {
1267 dev_warn(&sep->pdev->dev,
1268 "[PID%d] kmalloc for lli_array failed\n",
1269 current->pid);
1270 error = -ENOMEM;
1271 goto end_function_with_error2;
1272 }
1273
1274 /* Convert the application virtual address into a set of physical */
1275 down_read(&current->mm->mmap_sem);
1276 result = get_user_pages(current, current->mm, app_virt_addr,
1277 num_pages,
1278 ((in_out_flag == SEP_DRIVER_IN_FLAG) ? 0 : 1),
1279 0, page_array, NULL);
1280
1281 up_read(&current->mm->mmap_sem);
1282
1283 /* Check the number of pages locked - if not all then exit with error */
1284 if (result != num_pages) {
1285 dev_warn(&sep->pdev->dev,
1286 "[PID%d] not all pages locked by get_user_pages, "
1287 "result 0x%X, num_pages 0x%X\n",
1288 current->pid, result, num_pages);
1289 error = -ENOMEM;
1290 goto end_function_with_error3;
1291 }
1292
1293 dev_dbg(&sep->pdev->dev, "[PID%d] get_user_pages succeeded\n",
1294 current->pid);
1295
1296 /*
1297 * Fill the array using page array data and
1298 * map the pages - this action will also flush the cache as needed
1299 */
1300 for (count = 0; count < num_pages; count++) {
1301 /* Fill the map array */
1302 map_array[count].dma_addr =
1303 dma_map_page(&sep->pdev->dev, page_array[count],
1304 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
1305
1306 map_array[count].size = PAGE_SIZE;
1307
1308 /* Fill the lli array entry */
1309 lli_array[count].bus_address = (u32)map_array[count].dma_addr;
1310 lli_array[count].block_size = PAGE_SIZE;
1311
1312 dev_dbg(&sep->pdev->dev,
1313 "[PID%d] lli_array[%x].bus_address is %08lx, "
1314 "lli_array[%x].block_size is (hex) %x\n", current->pid,
1315 count, (unsigned long)lli_array[count].bus_address,
1316 count, lli_array[count].block_size);
1317 }
1318
1319 /* Check the offset for the first page */
1320 lli_array[0].bus_address =
1321 lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
1322
1323 /* Check that not all the data is in the first page only */
1324 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1325 lli_array[0].block_size = data_size;
1326 else
1327 lli_array[0].block_size =
1328 PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1329
1330 dev_dbg(&sep->pdev->dev,
1331 "[PID%d] After check if page 0 has all data\n",
1332 current->pid);
1333 dev_dbg(&sep->pdev->dev,
1334 "[PID%d] lli_array[0].bus_address is (hex) %08lx, "
1335 "lli_array[0].block_size is (hex) %x\n",
1336 current->pid,
1337 (unsigned long)lli_array[0].bus_address,
1338 lli_array[0].block_size);
1339
1340
1341 /* Check the size of the last page */
1342 if (num_pages > 1) {
1343 lli_array[num_pages - 1].block_size =
1344 (app_virt_addr + data_size) & (~PAGE_MASK);
1345 if (lli_array[num_pages - 1].block_size == 0)
1346 lli_array[num_pages - 1].block_size = PAGE_SIZE;
1347
1348 dev_dbg(&sep->pdev->dev,
1349 "[PID%d] After last page size adjustment\n",
1350 current->pid);
1351 dev_dbg(&sep->pdev->dev,
1352 "[PID%d] lli_array[%x].bus_address is (hex) %08lx, "
1353 "lli_array[%x].block_size is (hex) %x\n",
1354 current->pid,
1355 num_pages - 1,
1356 (unsigned long)lli_array[num_pages - 1].bus_address,
1357 num_pages - 1,
1358 lli_array[num_pages - 1].block_size);
1359 }
1360
bb75f7dc 1361 /* Set output params according to the in_out flag */
ff3d9c3c
MA
1362 if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1363 *lli_array_ptr = lli_array;
1364 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages =
1365 num_pages;
1366 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array =
1367 page_array;
1368 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array =
1369 map_array;
1370 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_num_entries =
1371 num_pages;
1372 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].src_sg = NULL;
1373 } else {
1374 *lli_array_ptr = lli_array;
1375 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages =
1376 num_pages;
1377 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array =
1378 page_array;
1379 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array =
1380 map_array;
1381 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
1382 out_map_num_entries = num_pages;
1383 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].dst_sg = NULL;
1384 }
1385 goto end_function;
1386
1387end_function_with_error3:
1388 /* Free lli array */
1389 kfree(lli_array);
1390
1391end_function_with_error2:
1392 kfree(map_array);
1393
1394end_function_with_error1:
1395 /* Free page array */
1396 kfree(page_array);
1397
1398end_function:
1399 return error;
1400}
1401
aca58ec8
MA
1402/**
1403 * sep_lli_table_secure_dma - get lli array for IMR addresses
1404 * @sep: pointer to struct sep_device
1405 * @app_virt_addr: user memory data buffer
1406 * @data_size: size of data buffer
1407 * @lli_array_ptr: lli array
1408 * @in_out_flag: not used
1409 * @dma_ctx: pointer to struct sep_dma_context
1410 *
1411 * This function creates lli tables for outputting data to
1412 * IMR memory, which is memory that cannot be accessed by the
1413 * the x86 processor.
1414 */
1415static int sep_lli_table_secure_dma(struct sep_device *sep,
1416 u32 app_virt_addr,
1417 u32 data_size,
1418 struct sep_lli_entry **lli_array_ptr,
1419 int in_out_flag,
1420 struct sep_dma_context *dma_ctx)
1421
1422{
1423 int error = 0;
1424 u32 count;
1425 /* The the page of the end address of the user space buffer */
1426 u32 end_page;
1427 /* The page of the start address of the user space buffer */
1428 u32 start_page;
1429 /* The range in pages */
1430 u32 num_pages;
1431 /* Array of lli */
1432 struct sep_lli_entry *lli_array;
1433
1434 /* Set start and end pages and num pages */
1435 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1436 start_page = app_virt_addr >> PAGE_SHIFT;
1437 num_pages = end_page - start_page + 1;
1438
2e0bec91
AA
1439 dev_dbg(&sep->pdev->dev,
1440 "[PID%d] lock user pages app_virt_addr is %x\n",
1441 current->pid, app_virt_addr);
aca58ec8
MA
1442
1443 dev_dbg(&sep->pdev->dev, "[PID%d] data_size is (hex) %x\n",
1444 current->pid, data_size);
1445 dev_dbg(&sep->pdev->dev, "[PID%d] start_page is (hex) %x\n",
1446 current->pid, start_page);
1447 dev_dbg(&sep->pdev->dev, "[PID%d] end_page is (hex) %x\n",
1448 current->pid, end_page);
1449 dev_dbg(&sep->pdev->dev, "[PID%d] num_pages is (hex) %x\n",
1450 current->pid, num_pages);
1451
1452 lli_array = kmalloc(sizeof(struct sep_lli_entry) * num_pages,
1453 GFP_ATOMIC);
1454
1455 if (!lli_array) {
1456 dev_warn(&sep->pdev->dev,
1457 "[PID%d] kmalloc for lli_array failed\n",
1458 current->pid);
1459 return -ENOMEM;
1460 }
1461
1462 /*
1463 * Fill the lli_array
1464 */
1465 start_page = start_page << PAGE_SHIFT;
1466 for (count = 0; count < num_pages; count++) {
1467 /* Fill the lli array entry */
1468 lli_array[count].bus_address = start_page;
1469 lli_array[count].block_size = PAGE_SIZE;
1470
1471 start_page += PAGE_SIZE;
1472
1473 dev_dbg(&sep->pdev->dev,
1474 "[PID%d] lli_array[%x].bus_address is %08lx, "
1475 "lli_array[%x].block_size is (hex) %x\n",
1476 current->pid,
1477 count, (unsigned long)lli_array[count].bus_address,
1478 count, lli_array[count].block_size);
1479 }
1480
1481 /* Check the offset for the first page */
1482 lli_array[0].bus_address =
1483 lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
1484
1485 /* Check that not all the data is in the first page only */
1486 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1487 lli_array[0].block_size = data_size;
1488 else
1489 lli_array[0].block_size =
1490 PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1491
1492 dev_dbg(&sep->pdev->dev,
1493 "[PID%d] After check if page 0 has all data\n"
1494 "lli_array[0].bus_address is (hex) %08lx, "
1495 "lli_array[0].block_size is (hex) %x\n",
1496 current->pid,
1497 (unsigned long)lli_array[0].bus_address,
1498 lli_array[0].block_size);
1499
1500 /* Check the size of the last page */
1501 if (num_pages > 1) {
1502 lli_array[num_pages - 1].block_size =
1503 (app_virt_addr + data_size) & (~PAGE_MASK);
1504 if (lli_array[num_pages - 1].block_size == 0)
1505 lli_array[num_pages - 1].block_size = PAGE_SIZE;
1506
1507 dev_dbg(&sep->pdev->dev,
1508 "[PID%d] After last page size adjustment\n"
1509 "lli_array[%x].bus_address is (hex) %08lx, "
1510 "lli_array[%x].block_size is (hex) %x\n",
1511 current->pid, num_pages - 1,
1512 (unsigned long)lli_array[num_pages - 1].bus_address,
1513 num_pages - 1,
1514 lli_array[num_pages - 1].block_size);
1515 }
1516 *lli_array_ptr = lli_array;
1517 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages = num_pages;
1518 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
1519 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = NULL;
1520 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_num_entries = 0;
1521
1522 return error;
1523}
1524
ff3d9c3c
MA
1525/**
1526 * sep_calculate_lli_table_max_size - size the LLI table
1527 * @sep: pointer to struct sep_device
1528 * @lli_in_array_ptr
1529 * @num_array_entries
1530 * @last_table_flag
1531 *
1532 * This function calculates the size of data that can be inserted into
1533 * the lli table from this array, such that either the table is full
1534 * (all entries are entered), or there are no more entries in the
1535 * lli array
1536 */
1537static u32 sep_calculate_lli_table_max_size(struct sep_device *sep,
1538 struct sep_lli_entry *lli_in_array_ptr,
1539 u32 num_array_entries,
1540 u32 *last_table_flag)
1541{
1542 u32 counter;
1543 /* Table data size */
1544 u32 table_data_size = 0;
1545 /* Data size for the next table */
1546 u32 next_table_data_size;
1547
1548 *last_table_flag = 0;
1549
1550 /*
1551 * Calculate the data in the out lli table till we fill the whole
1552 * table or till the data has ended
1553 */
1554 for (counter = 0;
1555 (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) &&
1556 (counter < num_array_entries); counter++)
1557 table_data_size += lli_in_array_ptr[counter].block_size;
1558
1559 /*
1560 * Check if we reached the last entry,
1561 * meaning this ia the last table to build,
1562 * and no need to check the block alignment
1563 */
1564 if (counter == num_array_entries) {
1565 /* Set the last table flag */
1566 *last_table_flag = 1;
1567 goto end_function;
1568 }
1569
1570 /*
1571 * Calculate the data size of the next table.
1572 * Stop if no entries left or if data size is more the DMA restriction
1573 */
1574 next_table_data_size = 0;
1575 for (; counter < num_array_entries; counter++) {
1576 next_table_data_size += lli_in_array_ptr[counter].block_size;
1577 if (next_table_data_size >= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1578 break;
1579 }
1580
1581 /*
1582 * Check if the next table data size is less then DMA rstriction.
1583 * if it is - recalculate the current table size, so that the next
1584 * table data size will be adaquete for DMA
1585 */
1586 if (next_table_data_size &&
1587 next_table_data_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1588
1589 table_data_size -= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE -
1590 next_table_data_size);
1591
1592end_function:
1593 return table_data_size;
1594}
1595
1596/**
1597 * sep_build_lli_table - build an lli array for the given table
1598 * @sep: pointer to struct sep_device
1599 * @lli_array_ptr: pointer to lli array
1600 * @lli_table_ptr: pointer to lli table
1601 * @num_processed_entries_ptr: pointer to number of entries
1602 * @num_table_entries_ptr: pointer to number of tables
1603 * @table_data_size: total data size
1604 *
1605 * Builds ant lli table from the lli_array according to
1606 * the given size of data
1607 */
1608static void sep_build_lli_table(struct sep_device *sep,
1609 struct sep_lli_entry *lli_array_ptr,
1610 struct sep_lli_entry *lli_table_ptr,
1611 u32 *num_processed_entries_ptr,
1612 u32 *num_table_entries_ptr,
1613 u32 table_data_size)
1614{
1615 /* Current table data size */
1616 u32 curr_table_data_size;
1617 /* Counter of lli array entry */
1618 u32 array_counter;
1619
1620 /* Init current table data size and lli array entry counter */
1621 curr_table_data_size = 0;
1622 array_counter = 0;
1623 *num_table_entries_ptr = 1;
1624
1625 dev_dbg(&sep->pdev->dev,
1626 "[PID%d] build lli table table_data_size: (hex) %x\n",
1627 current->pid, table_data_size);
1628
1629 /* Fill the table till table size reaches the needed amount */
1630 while (curr_table_data_size < table_data_size) {
1631 /* Update the number of entries in table */
1632 (*num_table_entries_ptr)++;
1633
1634 lli_table_ptr->bus_address =
1635 cpu_to_le32(lli_array_ptr[array_counter].bus_address);
1636
1637 lli_table_ptr->block_size =
1638 cpu_to_le32(lli_array_ptr[array_counter].block_size);
1639
1640 curr_table_data_size += lli_array_ptr[array_counter].block_size;
1641
1642 dev_dbg(&sep->pdev->dev,
1643 "[PID%d] lli_table_ptr is %p\n",
1644 current->pid, lli_table_ptr);
1645 dev_dbg(&sep->pdev->dev,
1646 "[PID%d] lli_table_ptr->bus_address: %08lx\n",
1647 current->pid,
1648 (unsigned long)lli_table_ptr->bus_address);
1649
1650 dev_dbg(&sep->pdev->dev,
1651 "[PID%d] lli_table_ptr->block_size is (hex) %x\n",
1652 current->pid, lli_table_ptr->block_size);
1653
1654 /* Check for overflow of the table data */
1655 if (curr_table_data_size > table_data_size) {
1656 dev_dbg(&sep->pdev->dev,
1657 "[PID%d] curr_table_data_size too large\n",
1658 current->pid);
1659
1660 /* Update the size of block in the table */
1661 lli_table_ptr->block_size =
1662 cpu_to_le32(lli_table_ptr->block_size) -
1663 (curr_table_data_size - table_data_size);
1664
1665 /* Update the physical address in the lli array */
1666 lli_array_ptr[array_counter].bus_address +=
1667 cpu_to_le32(lli_table_ptr->block_size);
1668
1669 /* Update the block size left in the lli array */
1670 lli_array_ptr[array_counter].block_size =
1671 (curr_table_data_size - table_data_size);
1672 } else
1673 /* Advance to the next entry in the lli_array */
1674 array_counter++;
1675
1676 dev_dbg(&sep->pdev->dev,
1677 "[PID%d] lli_table_ptr->bus_address is %08lx\n",
1678 current->pid,
1679 (unsigned long)lli_table_ptr->bus_address);
1680 dev_dbg(&sep->pdev->dev,
1681 "[PID%d] lli_table_ptr->block_size is (hex) %x\n",
1682 current->pid,
1683 lli_table_ptr->block_size);
1684
1685 /* Move to the next entry in table */
1686 lli_table_ptr++;
1687 }
1688
1689 /* Set the info entry to default */
1690 lli_table_ptr->bus_address = 0xffffffff;
1691 lli_table_ptr->block_size = 0;
1692
1693 /* Set the output parameter */
1694 *num_processed_entries_ptr += array_counter;
1695
1696}
1697
1698/**
1699 * sep_shared_area_virt_to_bus - map shared area to bus address
1700 * @sep: pointer to struct sep_device
1701 * @virt_address: virtual address to convert
1702 *
1703 * This functions returns the physical address inside shared area according
1704 * to the virtual address. It can be either on the externa RAM device
1705 * (ioremapped), or on the system RAM
1706 * This implementation is for the external RAM
1707 */
1708static dma_addr_t sep_shared_area_virt_to_bus(struct sep_device *sep,
1709 void *virt_address)
1710{
1711 dev_dbg(&sep->pdev->dev, "[PID%d] sh virt to phys v %p\n",
1712 current->pid, virt_address);
1713 dev_dbg(&sep->pdev->dev, "[PID%d] sh virt to phys p %08lx\n",
1714 current->pid,
1715 (unsigned long)
1716 sep->shared_bus + (virt_address - sep->shared_addr));
1717
1718 return sep->shared_bus + (size_t)(virt_address - sep->shared_addr);
1719}
1720
1721/**
1722 * sep_shared_area_bus_to_virt - map shared area bus address to kernel
1723 * @sep: pointer to struct sep_device
1724 * @bus_address: bus address to convert
1725 *
1726 * This functions returns the virtual address inside shared area
1727 * according to the physical address. It can be either on the
1728 * externa RAM device (ioremapped), or on the system RAM
1729 * This implementation is for the external RAM
1730 */
1731static void *sep_shared_area_bus_to_virt(struct sep_device *sep,
1732 dma_addr_t bus_address)
1733{
1734 dev_dbg(&sep->pdev->dev, "[PID%d] shared bus to virt b=%lx v=%lx\n",
1735 current->pid,
1736 (unsigned long)bus_address, (unsigned long)(sep->shared_addr +
1737 (size_t)(bus_address - sep->shared_bus)));
1738
1739 return sep->shared_addr + (size_t)(bus_address - sep->shared_bus);
1740}
1741
1742/**
1743 * sep_debug_print_lli_tables - dump LLI table
1744 * @sep: pointer to struct sep_device
1745 * @lli_table_ptr: pointer to sep_lli_entry
1746 * @num_table_entries: number of entries
1747 * @table_data_size: total data size
1748 *
1749 * Walk the the list of the print created tables and print all the data
1750 */
1751static void sep_debug_print_lli_tables(struct sep_device *sep,
1752 struct sep_lli_entry *lli_table_ptr,
1753 unsigned long num_table_entries,
1754 unsigned long table_data_size)
1755{
aca58ec8 1756#ifdef DEBUG
ff3d9c3c
MA
1757 unsigned long table_count = 1;
1758 unsigned long entries_count = 0;
1759
1760 dev_dbg(&sep->pdev->dev, "[PID%d] sep_debug_print_lli_tables start\n",
1761 current->pid);
1762 if (num_table_entries == 0) {
1763 dev_dbg(&sep->pdev->dev, "[PID%d] no table to print\n",
1764 current->pid);
1765 return;
1766 }
1767
1768 while ((unsigned long) lli_table_ptr->bus_address != 0xffffffff) {
1769 dev_dbg(&sep->pdev->dev,
1770 "[PID%d] lli table %08lx, "
1771 "table_data_size is (hex) %lx\n",
1772 current->pid, table_count, table_data_size);
1773 dev_dbg(&sep->pdev->dev,
1774 "[PID%d] num_table_entries is (hex) %lx\n",
1775 current->pid, num_table_entries);
1776
1777 /* Print entries of the table (without info entry) */
1778 for (entries_count = 0; entries_count < num_table_entries;
1779 entries_count++, lli_table_ptr++) {
1780
1781 dev_dbg(&sep->pdev->dev,
1782 "[PID%d] lli_table_ptr address is %08lx\n",
1783 current->pid,
1784 (unsigned long) lli_table_ptr);
1785
1786 dev_dbg(&sep->pdev->dev,
1787 "[PID%d] phys address is %08lx "
1788 "block size is (hex) %x\n", current->pid,
1789 (unsigned long)lli_table_ptr->bus_address,
1790 lli_table_ptr->block_size);
1791 }
1792
1793 /* Point to the info entry */
1794 lli_table_ptr--;
1795
1796 dev_dbg(&sep->pdev->dev,
1797 "[PID%d] phys lli_table_ptr->block_size "
1798 "is (hex) %x\n",
1799 current->pid,
1800 lli_table_ptr->block_size);
1801
1802 dev_dbg(&sep->pdev->dev,
1803 "[PID%d] phys lli_table_ptr->physical_address "
1804 "is %08lx\n",
1805 current->pid,
1806 (unsigned long)lli_table_ptr->bus_address);
1807
1808
1809 table_data_size = lli_table_ptr->block_size & 0xffffff;
1810 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1811
1812 dev_dbg(&sep->pdev->dev,
1813 "[PID%d] phys table_data_size is "
1814 "(hex) %lx num_table_entries is"
1815 " %lx bus_address is%lx\n",
1816 current->pid,
1817 table_data_size,
1818 num_table_entries,
1819 (unsigned long)lli_table_ptr->bus_address);
1820
1821 if ((unsigned long)lli_table_ptr->bus_address != 0xffffffff)
1822 lli_table_ptr = (struct sep_lli_entry *)
1823 sep_shared_bus_to_virt(sep,
1824 (unsigned long)lli_table_ptr->bus_address);
1825
1826 table_count++;
1827 }
1828 dev_dbg(&sep->pdev->dev, "[PID%d] sep_debug_print_lli_tables end\n",
1829 current->pid);
aca58ec8 1830#endif
ff3d9c3c
MA
1831}
1832
1833
1834/**
1835 * sep_prepare_empty_lli_table - create a blank LLI table
1836 * @sep: pointer to struct sep_device
1837 * @lli_table_addr_ptr: pointer to lli table
1838 * @num_entries_ptr: pointer to number of entries
1839 * @table_data_size_ptr: point to table data size
1840 * @dmatables_region: Optional buffer for DMA tables
1841 * @dma_ctx: DMA context
1842 *
1843 * This function creates empty lli tables when there is no data
1844 */
1845static void sep_prepare_empty_lli_table(struct sep_device *sep,
1846 dma_addr_t *lli_table_addr_ptr,
1847 u32 *num_entries_ptr,
1848 u32 *table_data_size_ptr,
1849 void **dmatables_region,
1850 struct sep_dma_context *dma_ctx)
1851{
1852 struct sep_lli_entry *lli_table_ptr;
1853
1854 /* Find the area for new table */
1855 lli_table_ptr =
1856 (struct sep_lli_entry *)(sep->shared_addr +
1857 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1858 dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1859 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1860
1861 if (dmatables_region && *dmatables_region)
1862 lli_table_ptr = *dmatables_region;
1863
1864 lli_table_ptr->bus_address = 0;
1865 lli_table_ptr->block_size = 0;
1866
1867 lli_table_ptr++;
1868 lli_table_ptr->bus_address = 0xFFFFFFFF;
1869 lli_table_ptr->block_size = 0;
1870
1871 /* Set the output parameter value */
1872 *lli_table_addr_ptr = sep->shared_bus +
1873 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1874 dma_ctx->num_lli_tables_created *
1875 sizeof(struct sep_lli_entry) *
1876 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1877
1878 /* Set the num of entries and table data size for empty table */
1879 *num_entries_ptr = 2;
1880 *table_data_size_ptr = 0;
1881
1882 /* Update the number of created tables */
1883 dma_ctx->num_lli_tables_created++;
1884}
1885
1886/**
1887 * sep_prepare_input_dma_table - prepare input DMA mappings
1888 * @sep: pointer to struct sep_device
1889 * @data_size:
1890 * @block_size:
1891 * @lli_table_ptr:
1892 * @num_entries_ptr:
1893 * @table_data_size_ptr:
1894 * @is_kva: set for kernel data (kernel cryptio call)
1895 *
1896 * This function prepares only input DMA table for synhronic symmetric
1897 * operations (HASH)
1898 * Note that all bus addresses that are passed to the SEP
1899 * are in 32 bit format; the SEP is a 32 bit device
1900 */
1901static int sep_prepare_input_dma_table(struct sep_device *sep,
1902 unsigned long app_virt_addr,
1903 u32 data_size,
1904 u32 block_size,
1905 dma_addr_t *lli_table_ptr,
1906 u32 *num_entries_ptr,
1907 u32 *table_data_size_ptr,
1908 bool is_kva,
1909 void **dmatables_region,
1910 struct sep_dma_context *dma_ctx
1911)
1912{
1913 int error = 0;
1914 /* Pointer to the info entry of the table - the last entry */
1915 struct sep_lli_entry *info_entry_ptr;
1916 /* Array of pointers to page */
1917 struct sep_lli_entry *lli_array_ptr;
1918 /* Points to the first entry to be processed in the lli_in_array */
1919 u32 current_entry = 0;
1920 /* Num entries in the virtual buffer */
1921 u32 sep_lli_entries = 0;
1922 /* Lli table pointer */
1923 struct sep_lli_entry *in_lli_table_ptr;
1924 /* The total data in one table */
1925 u32 table_data_size = 0;
1926 /* Flag for last table */
1927 u32 last_table_flag = 0;
1928 /* Number of entries in lli table */
1929 u32 num_entries_in_table = 0;
1930 /* Next table address */
1931 void *lli_table_alloc_addr = NULL;
1932 void *dma_lli_table_alloc_addr = NULL;
1933 void *dma_in_lli_table_ptr = NULL;
1934
2e0bec91
AA
1935 dev_dbg(&sep->pdev->dev,
1936 "[PID%d] prepare intput dma tbl data size: (hex) %x\n",
1937 current->pid, data_size);
ff3d9c3c
MA
1938
1939 dev_dbg(&sep->pdev->dev, "[PID%d] block_size is (hex) %x\n",
1940 current->pid, block_size);
1941
1942 /* Initialize the pages pointers */
1943 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
1944 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages = 0;
1945
1946 /* Set the kernel address for first table to be allocated */
1947 lli_table_alloc_addr = (void *)(sep->shared_addr +
1948 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1949 dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1950 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1951
1952 if (data_size == 0) {
1953 if (dmatables_region) {
1954 error = sep_allocate_dmatables_region(sep,
1955 dmatables_region,
1956 dma_ctx,
1957 1);
1958 if (error)
1959 return error;
1960 }
1961 /* Special case - create meptu table - 2 entries, zero data */
1962 sep_prepare_empty_lli_table(sep, lli_table_ptr,
1963 num_entries_ptr, table_data_size_ptr,
1964 dmatables_region, dma_ctx);
1965 goto update_dcb_counter;
1966 }
1967
1968 /* Check if the pages are in Kernel Virtual Address layout */
1969 if (is_kva == true)
1970 error = sep_lock_kernel_pages(sep, app_virt_addr,
1971 data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG,
1972 dma_ctx);
1973 else
1974 /*
1975 * Lock the pages of the user buffer
1976 * and translate them to pages
1977 */
1978 error = sep_lock_user_pages(sep, app_virt_addr,
1979 data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG,
1980 dma_ctx);
1981
1982 if (error)
1983 goto end_function;
1984
1985 dev_dbg(&sep->pdev->dev,
1986 "[PID%d] output sep_in_num_pages is (hex) %x\n",
1987 current->pid,
1988 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages);
1989
1990 current_entry = 0;
1991 info_entry_ptr = NULL;
1992
1993 sep_lli_entries =
1994 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages;
1995
1996 dma_lli_table_alloc_addr = lli_table_alloc_addr;
1997 if (dmatables_region) {
1998 error = sep_allocate_dmatables_region(sep,
1999 dmatables_region,
2000 dma_ctx,
2001 sep_lli_entries);
2002 if (error)
2003 return error;
2004 lli_table_alloc_addr = *dmatables_region;
2005 }
2006
2007 /* Loop till all the entries in in array are processed */
2008 while (current_entry < sep_lli_entries) {
2009
2010 /* Set the new input and output tables */
2011 in_lli_table_ptr =
2012 (struct sep_lli_entry *)lli_table_alloc_addr;
2013 dma_in_lli_table_ptr =
2014 (struct sep_lli_entry *)dma_lli_table_alloc_addr;
2015
2016 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2017 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2018 dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2019 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2020
2021 if (dma_lli_table_alloc_addr >
2022 ((void *)sep->shared_addr +
2023 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
2024 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
2025
2026 error = -ENOMEM;
2027 goto end_function_error;
2028
2029 }
2030
2031 /* Update the number of created tables */
2032 dma_ctx->num_lli_tables_created++;
2033
2034 /* Calculate the maximum size of data for input table */
2035 table_data_size = sep_calculate_lli_table_max_size(sep,
2036 &lli_array_ptr[current_entry],
2037 (sep_lli_entries - current_entry),
2038 &last_table_flag);
2039
2040 /*
2041 * If this is not the last table -
bb75f7dc 2042 * then align it to the block size
ff3d9c3c
MA
2043 */
2044 if (!last_table_flag)
2045 table_data_size =
2046 (table_data_size / block_size) * block_size;
2047
2048 dev_dbg(&sep->pdev->dev,
2049 "[PID%d] output table_data_size is (hex) %x\n",
2050 current->pid,
2051 table_data_size);
2052
2053 /* Construct input lli table */
2054 sep_build_lli_table(sep, &lli_array_ptr[current_entry],
2055 in_lli_table_ptr,
2056 &current_entry, &num_entries_in_table, table_data_size);
2057
2058 if (info_entry_ptr == NULL) {
2059
2060 /* Set the output parameters to physical addresses */
2061 *lli_table_ptr = sep_shared_area_virt_to_bus(sep,
2062 dma_in_lli_table_ptr);
2063 *num_entries_ptr = num_entries_in_table;
2064 *table_data_size_ptr = table_data_size;
2065
2066 dev_dbg(&sep->pdev->dev,
2067 "[PID%d] output lli_table_in_ptr is %08lx\n",
2068 current->pid,
2069 (unsigned long)*lli_table_ptr);
2070
2071 } else {
2072 /* Update the info entry of the previous in table */
2073 info_entry_ptr->bus_address =
2074 sep_shared_area_virt_to_bus(sep,
2075 dma_in_lli_table_ptr);
2076 info_entry_ptr->block_size =
2077 ((num_entries_in_table) << 24) |
2078 (table_data_size);
2079 }
2080 /* Save the pointer to the info entry of the current tables */
2081 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
2082 }
2083 /* Print input tables */
2084 if (!dmatables_region) {
2085 sep_debug_print_lli_tables(sep, (struct sep_lli_entry *)
2086 sep_shared_area_bus_to_virt(sep, *lli_table_ptr),
2087 *num_entries_ptr, *table_data_size_ptr);
2088 }
2089
2090 /* The array of the pages */
2091 kfree(lli_array_ptr);
2092
2093update_dcb_counter:
2094 /* Update DCB counter */
2095 dma_ctx->nr_dcb_creat++;
2096 goto end_function;
2097
2098end_function_error:
2099 /* Free all the allocated resources */
2100 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array);
aca58ec8 2101 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = NULL;
ff3d9c3c
MA
2102 kfree(lli_array_ptr);
2103 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array);
aca58ec8 2104 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
ff3d9c3c
MA
2105
2106end_function:
2107 return error;
2108
2109}
2110
2111/**
2112 * sep_construct_dma_tables_from_lli - prepare AES/DES mappings
2113 * @sep: pointer to struct sep_device
2114 * @lli_in_array:
2115 * @sep_in_lli_entries:
2116 * @lli_out_array:
2117 * @sep_out_lli_entries
2118 * @block_size
2119 * @lli_table_in_ptr
2120 * @lli_table_out_ptr
2121 * @in_num_entries_ptr
2122 * @out_num_entries_ptr
2123 * @table_data_size_ptr
2124 *
2125 * This function creates the input and output DMA tables for
2126 * symmetric operations (AES/DES) according to the block
2127 * size from LLI arays
2128 * Note that all bus addresses that are passed to the SEP
2129 * are in 32 bit format; the SEP is a 32 bit device
2130 */
2131static int sep_construct_dma_tables_from_lli(
2132 struct sep_device *sep,
2133 struct sep_lli_entry *lli_in_array,
2134 u32 sep_in_lli_entries,
2135 struct sep_lli_entry *lli_out_array,
2136 u32 sep_out_lli_entries,
2137 u32 block_size,
2138 dma_addr_t *lli_table_in_ptr,
2139 dma_addr_t *lli_table_out_ptr,
2140 u32 *in_num_entries_ptr,
2141 u32 *out_num_entries_ptr,
2142 u32 *table_data_size_ptr,
2143 void **dmatables_region,
2144 struct sep_dma_context *dma_ctx)
2145{
2146 /* Points to the area where next lli table can be allocated */
2147 void *lli_table_alloc_addr = NULL;
2148 /*
2149 * Points to the area in shared region where next lli table
2150 * can be allocated
2151 */
2152 void *dma_lli_table_alloc_addr = NULL;
2153 /* Input lli table in dmatables_region or shared region */
2154 struct sep_lli_entry *in_lli_table_ptr = NULL;
2155 /* Input lli table location in the shared region */
2156 struct sep_lli_entry *dma_in_lli_table_ptr = NULL;
2157 /* Output lli table in dmatables_region or shared region */
2158 struct sep_lli_entry *out_lli_table_ptr = NULL;
2159 /* Output lli table location in the shared region */
2160 struct sep_lli_entry *dma_out_lli_table_ptr = NULL;
2161 /* Pointer to the info entry of the table - the last entry */
2162 struct sep_lli_entry *info_in_entry_ptr = NULL;
2163 /* Pointer to the info entry of the table - the last entry */
2164 struct sep_lli_entry *info_out_entry_ptr = NULL;
2165 /* Points to the first entry to be processed in the lli_in_array */
2166 u32 current_in_entry = 0;
2167 /* Points to the first entry to be processed in the lli_out_array */
2168 u32 current_out_entry = 0;
2169 /* Max size of the input table */
2170 u32 in_table_data_size = 0;
2171 /* Max size of the output table */
2172 u32 out_table_data_size = 0;
2173 /* Flag te signifies if this is the last tables build */
2174 u32 last_table_flag = 0;
2175 /* The data size that should be in table */
2176 u32 table_data_size = 0;
2177 /* Number of etnries in the input table */
2178 u32 num_entries_in_table = 0;
2179 /* Number of etnries in the output table */
2180 u32 num_entries_out_table = 0;
2181
2182 if (!dma_ctx) {
2183 dev_warn(&sep->pdev->dev, "DMA context uninitialized\n");
2184 return -EINVAL;
2185 }
2186
2187 /* Initiate to point after the message area */
2188 lli_table_alloc_addr = (void *)(sep->shared_addr +
2189 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
2190 (dma_ctx->num_lli_tables_created *
2191 (sizeof(struct sep_lli_entry) *
2192 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP)));
2193 dma_lli_table_alloc_addr = lli_table_alloc_addr;
2194
2195 if (dmatables_region) {
2196 /* 2 for both in+out table */
2197 if (sep_allocate_dmatables_region(sep,
2198 dmatables_region,
2199 dma_ctx,
2200 2*sep_in_lli_entries))
2201 return -ENOMEM;
2202 lli_table_alloc_addr = *dmatables_region;
2203 }
2204
2205 /* Loop till all the entries in in array are not processed */
2206 while (current_in_entry < sep_in_lli_entries) {
2207 /* Set the new input and output tables */
2208 in_lli_table_ptr =
2209 (struct sep_lli_entry *)lli_table_alloc_addr;
2210 dma_in_lli_table_ptr =
2211 (struct sep_lli_entry *)dma_lli_table_alloc_addr;
2212
2213 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2214 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2215 dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2216 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2217
2218 /* Set the first output tables */
2219 out_lli_table_ptr =
2220 (struct sep_lli_entry *)lli_table_alloc_addr;
2221 dma_out_lli_table_ptr =
2222 (struct sep_lli_entry *)dma_lli_table_alloc_addr;
2223
2224 /* Check if the DMA table area limit was overrun */
2225 if ((dma_lli_table_alloc_addr + sizeof(struct sep_lli_entry) *
2226 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP) >
2227 ((void *)sep->shared_addr +
2228 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
2229 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
2230
2231 dev_warn(&sep->pdev->dev, "dma table limit overrun\n");
2232 return -ENOMEM;
2233 }
2234
2235 /* Update the number of the lli tables created */
2236 dma_ctx->num_lli_tables_created += 2;
2237
2238 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2239 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2240 dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2241 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2242
2243 /* Calculate the maximum size of data for input table */
2244 in_table_data_size =
2245 sep_calculate_lli_table_max_size(sep,
2246 &lli_in_array[current_in_entry],
2247 (sep_in_lli_entries - current_in_entry),
2248 &last_table_flag);
2249
2250 /* Calculate the maximum size of data for output table */
2251 out_table_data_size =
2252 sep_calculate_lli_table_max_size(sep,
2253 &lli_out_array[current_out_entry],
2254 (sep_out_lli_entries - current_out_entry),
2255 &last_table_flag);
2256
2257 if (!last_table_flag) {
2258 in_table_data_size = (in_table_data_size /
2259 block_size) * block_size;
2260 out_table_data_size = (out_table_data_size /
2261 block_size) * block_size;
2262 }
2263
2264 table_data_size = in_table_data_size;
2265 if (table_data_size > out_table_data_size)
2266 table_data_size = out_table_data_size;
2267
2268 dev_dbg(&sep->pdev->dev,
2269 "[PID%d] construct tables from lli"
2270 " in_table_data_size is (hex) %x\n", current->pid,
2271 in_table_data_size);
2272
2273 dev_dbg(&sep->pdev->dev,
2274 "[PID%d] construct tables from lli"
2275 "out_table_data_size is (hex) %x\n", current->pid,
2276 out_table_data_size);
2277
2278 /* Construct input lli table */
2279 sep_build_lli_table(sep, &lli_in_array[current_in_entry],
2280 in_lli_table_ptr,
2281 &current_in_entry,
2282 &num_entries_in_table,
2283 table_data_size);
2284
2285 /* Construct output lli table */
2286 sep_build_lli_table(sep, &lli_out_array[current_out_entry],
2287 out_lli_table_ptr,
2288 &current_out_entry,
2289 &num_entries_out_table,
2290 table_data_size);
2291
2292 /* If info entry is null - this is the first table built */
2293 if (info_in_entry_ptr == NULL) {
2294 /* Set the output parameters to physical addresses */
2295 *lli_table_in_ptr =
2296 sep_shared_area_virt_to_bus(sep, dma_in_lli_table_ptr);
2297
2298 *in_num_entries_ptr = num_entries_in_table;
2299
2300 *lli_table_out_ptr =
2301 sep_shared_area_virt_to_bus(sep,
2302 dma_out_lli_table_ptr);
2303
2304 *out_num_entries_ptr = num_entries_out_table;
2305 *table_data_size_ptr = table_data_size;
2306
2307 dev_dbg(&sep->pdev->dev,
2308 "[PID%d] output lli_table_in_ptr is %08lx\n",
2309 current->pid,
2310 (unsigned long)*lli_table_in_ptr);
2311 dev_dbg(&sep->pdev->dev,
2312 "[PID%d] output lli_table_out_ptr is %08lx\n",
2313 current->pid,
2314 (unsigned long)*lli_table_out_ptr);
2315 } else {
2316 /* Update the info entry of the previous in table */
2317 info_in_entry_ptr->bus_address =
2318 sep_shared_area_virt_to_bus(sep,
2319 dma_in_lli_table_ptr);
2320
2321 info_in_entry_ptr->block_size =
2322 ((num_entries_in_table) << 24) |
2323 (table_data_size);
2324
2325 /* Update the info entry of the previous in table */
2326 info_out_entry_ptr->bus_address =
2327 sep_shared_area_virt_to_bus(sep,
2328 dma_out_lli_table_ptr);
2329
2330 info_out_entry_ptr->block_size =
2331 ((num_entries_out_table) << 24) |
2332 (table_data_size);
2333
2334 dev_dbg(&sep->pdev->dev,
2335 "[PID%d] output lli_table_in_ptr:%08lx %08x\n",
2336 current->pid,
2337 (unsigned long)info_in_entry_ptr->bus_address,
2338 info_in_entry_ptr->block_size);
2339
2340 dev_dbg(&sep->pdev->dev,
2341 "[PID%d] output lli_table_out_ptr:"
2342 "%08lx %08x\n",
2343 current->pid,
2344 (unsigned long)info_out_entry_ptr->bus_address,
2345 info_out_entry_ptr->block_size);
2346 }
2347
2348 /* Save the pointer to the info entry of the current tables */
2349 info_in_entry_ptr = in_lli_table_ptr +
2350 num_entries_in_table - 1;
2351 info_out_entry_ptr = out_lli_table_ptr +
2352 num_entries_out_table - 1;
2353
2354 dev_dbg(&sep->pdev->dev,
2355 "[PID%d] output num_entries_out_table is %x\n",
2356 current->pid,
2357 (u32)num_entries_out_table);
2358 dev_dbg(&sep->pdev->dev,
2359 "[PID%d] output info_in_entry_ptr is %lx\n",
2360 current->pid,
2361 (unsigned long)info_in_entry_ptr);
2362 dev_dbg(&sep->pdev->dev,
2363 "[PID%d] output info_out_entry_ptr is %lx\n",
2364 current->pid,
2365 (unsigned long)info_out_entry_ptr);
2366 }
2367
2368 /* Print input tables */
2369 if (!dmatables_region) {
2370 sep_debug_print_lli_tables(
2371 sep,
2372 (struct sep_lli_entry *)
2373 sep_shared_area_bus_to_virt(sep, *lli_table_in_ptr),
2374 *in_num_entries_ptr,
2375 *table_data_size_ptr);
2376 }
2377
2378 /* Print output tables */
2379 if (!dmatables_region) {
2380 sep_debug_print_lli_tables(
2381 sep,
2382 (struct sep_lli_entry *)
2383 sep_shared_area_bus_to_virt(sep, *lli_table_out_ptr),
2384 *out_num_entries_ptr,
2385 *table_data_size_ptr);
2386 }
2387
2388 return 0;
2389}
2390
2391/**
2392 * sep_prepare_input_output_dma_table - prepare DMA I/O table
2393 * @app_virt_in_addr:
2394 * @app_virt_out_addr:
2395 * @data_size:
2396 * @block_size:
2397 * @lli_table_in_ptr:
2398 * @lli_table_out_ptr:
2399 * @in_num_entries_ptr:
2400 * @out_num_entries_ptr:
2401 * @table_data_size_ptr:
2402 * @is_kva: set for kernel data; used only for kernel crypto module
2403 *
2404 * This function builds input and output DMA tables for synhronic
2405 * symmetric operations (AES, DES, HASH). It also checks that each table
2406 * is of the modular block size
2407 * Note that all bus addresses that are passed to the SEP
2408 * are in 32 bit format; the SEP is a 32 bit device
2409 */
2410static int sep_prepare_input_output_dma_table(struct sep_device *sep,
2411 unsigned long app_virt_in_addr,
2412 unsigned long app_virt_out_addr,
2413 u32 data_size,
2414 u32 block_size,
2415 dma_addr_t *lli_table_in_ptr,
2416 dma_addr_t *lli_table_out_ptr,
2417 u32 *in_num_entries_ptr,
2418 u32 *out_num_entries_ptr,
2419 u32 *table_data_size_ptr,
2420 bool is_kva,
2421 void **dmatables_region,
2422 struct sep_dma_context *dma_ctx)
2423
2424{
2425 int error = 0;
2426 /* Array of pointers of page */
2427 struct sep_lli_entry *lli_in_array;
2428 /* Array of pointers of page */
2429 struct sep_lli_entry *lli_out_array;
2430
2431 if (!dma_ctx) {
2432 error = -EINVAL;
2433 goto end_function;
2434 }
2435
2436 if (data_size == 0) {
2437 /* Prepare empty table for input and output */
2438 if (dmatables_region) {
2439 error = sep_allocate_dmatables_region(
2440 sep,
2441 dmatables_region,
2442 dma_ctx,
2443 2);
2444 if (error)
2445 goto end_function;
2446 }
2447 sep_prepare_empty_lli_table(sep, lli_table_in_ptr,
2448 in_num_entries_ptr, table_data_size_ptr,
2449 dmatables_region, dma_ctx);
2450
2451 sep_prepare_empty_lli_table(sep, lli_table_out_ptr,
2452 out_num_entries_ptr, table_data_size_ptr,
2453 dmatables_region, dma_ctx);
2454
2455 goto update_dcb_counter;
2456 }
2457
2458 /* Initialize the pages pointers */
2459 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
2460 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
2461
2462 /* Lock the pages of the buffer and translate them to pages */
2463 if (is_kva == true) {
2464 dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel input pages\n",
2465 current->pid);
2466 error = sep_lock_kernel_pages(sep, app_virt_in_addr,
2467 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG,
2468 dma_ctx);
2469 if (error) {
2470 dev_warn(&sep->pdev->dev,
2471 "[PID%d] sep_lock_kernel_pages for input "
2472 "virtual buffer failed\n", current->pid);
2473
2474 goto end_function;
2475 }
2476
2477 dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel output pages\n",
2478 current->pid);
2479 error = sep_lock_kernel_pages(sep, app_virt_out_addr,
2480 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG,
2481 dma_ctx);
2482
2483 if (error) {
2484 dev_warn(&sep->pdev->dev,
2485 "[PID%d] sep_lock_kernel_pages for output "
2486 "virtual buffer failed\n", current->pid);
2487
2488 goto end_function_free_lli_in;
2489 }
2490
2491 }
2492
2493 else {
2494 dev_dbg(&sep->pdev->dev, "[PID%d] Locking user input pages\n",
2495 current->pid);
2496 error = sep_lock_user_pages(sep, app_virt_in_addr,
2497 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG,
2498 dma_ctx);
2499 if (error) {
2500 dev_warn(&sep->pdev->dev,
2501 "[PID%d] sep_lock_user_pages for input "
2502 "virtual buffer failed\n", current->pid);
2503
2504 goto end_function;
2505 }
2506
aca58ec8
MA
2507 if (dma_ctx->secure_dma == true) {
2508 /* secure_dma requires use of non accessible memory */
2509 dev_dbg(&sep->pdev->dev, "[PID%d] in secure_dma\n",
2510 current->pid);
2511 error = sep_lli_table_secure_dma(sep,
2512 app_virt_out_addr, data_size, &lli_out_array,
2513 SEP_DRIVER_OUT_FLAG, dma_ctx);
2514 if (error) {
2515 dev_warn(&sep->pdev->dev,
2516 "[PID%d] secure dma table setup "
2517 " for output virtual buffer failed\n",
2518 current->pid);
2519
2520 goto end_function_free_lli_in;
2521 }
2522 } else {
2523 /* For normal, non-secure dma */
2524 dev_dbg(&sep->pdev->dev, "[PID%d] not in secure_dma\n",
ff3d9c3c
MA
2525 current->pid);
2526
aca58ec8
MA
2527 dev_dbg(&sep->pdev->dev,
2528 "[PID%d] Locking user output pages\n",
2529 current->pid);
2530
2531 error = sep_lock_user_pages(sep, app_virt_out_addr,
ff3d9c3c
MA
2532 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG,
2533 dma_ctx);
2534
aca58ec8
MA
2535 if (error) {
2536 dev_warn(&sep->pdev->dev,
ff3d9c3c
MA
2537 "[PID%d] sep_lock_user_pages"
2538 " for output virtual buffer failed\n",
2539 current->pid);
2540
aca58ec8
MA
2541 goto end_function_free_lli_in;
2542 }
ff3d9c3c
MA
2543 }
2544 }
2545
2e0bec91
AA
2546 dev_dbg(&sep->pdev->dev,
2547 "[PID%d] After lock; prep input output dma table sep_in_num_pages is (hex) %x\n",
2548 current->pid,
ff3d9c3c
MA
2549 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages);
2550
2551 dev_dbg(&sep->pdev->dev, "[PID%d] sep_out_num_pages is (hex) %x\n",
2552 current->pid,
2553 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages);
2554
2e0bec91
AA
2555 dev_dbg(&sep->pdev->dev,
2556 "[PID%d] SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is (hex) %x\n",
2557 current->pid, SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
ff3d9c3c
MA
2558
2559 /* Call the fucntion that creates table from the lli arrays */
2560 dev_dbg(&sep->pdev->dev, "[PID%d] calling create table from lli\n",
2561 current->pid);
2562 error = sep_construct_dma_tables_from_lli(
2563 sep, lli_in_array,
2564 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
2565 in_num_pages,
2566 lli_out_array,
2567 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
2568 out_num_pages,
2569 block_size, lli_table_in_ptr, lli_table_out_ptr,
2570 in_num_entries_ptr, out_num_entries_ptr,
2571 table_data_size_ptr, dmatables_region, dma_ctx);
2572
2573 if (error) {
2574 dev_warn(&sep->pdev->dev,
2575 "[PID%d] sep_construct_dma_tables_from_lli failed\n",
2576 current->pid);
2577 goto end_function_with_error;
2578 }
2579
2580 kfree(lli_out_array);
2581 kfree(lli_in_array);
2582
2583update_dcb_counter:
2584 /* Update DCB counter */
2585 dma_ctx->nr_dcb_creat++;
2586
2587 goto end_function;
2588
2589end_function_with_error:
2590 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array);
aca58ec8 2591 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = NULL;
ff3d9c3c 2592 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array);
aca58ec8 2593 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
ff3d9c3c
MA
2594 kfree(lli_out_array);
2595
2596
2597end_function_free_lli_in:
2598 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array);
aca58ec8 2599 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = NULL;
ff3d9c3c 2600 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array);
aca58ec8 2601 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
ff3d9c3c
MA
2602 kfree(lli_in_array);
2603
2604end_function:
2605
2606 return error;
2607
2608}
2609
2610/**
2611 * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks
2612 * @app_in_address: unsigned long; for data buffer in (user space)
2613 * @app_out_address: unsigned long; for data buffer out (user space)
2614 * @data_in_size: u32; for size of data
2615 * @block_size: u32; for block size
2616 * @tail_block_size: u32; for size of tail block
2617 * @isapplet: bool; to indicate external app
2618 * @is_kva: bool; kernel buffer; only used for kernel crypto module
aca58ec8 2619 * @secure_dma; indicates whether this is secure_dma using IMR
ff3d9c3c
MA
2620 *
2621 * This function prepares the linked DMA tables and puts the
2622 * address for the linked list of tables inta a DCB (data control
2623 * block) the address of which is known by the SEP hardware
2624 * Note that all bus addresses that are passed to the SEP
2625 * are in 32 bit format; the SEP is a 32 bit device
2626 */
2627int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
2628 unsigned long app_in_address,
2629 unsigned long app_out_address,
2630 u32 data_in_size,
2631 u32 block_size,
2632 u32 tail_block_size,
2633 bool isapplet,
2634 bool is_kva,
aca58ec8 2635 bool secure_dma,
ff3d9c3c
MA
2636 struct sep_dcblock *dcb_region,
2637 void **dmatables_region,
2638 struct sep_dma_context **dma_ctx,
2639 struct scatterlist *src_sg,
2640 struct scatterlist *dst_sg)
2641{
2642 int error = 0;
2643 /* Size of tail */
2644 u32 tail_size = 0;
2645 /* Address of the created DCB table */
2646 struct sep_dcblock *dcb_table_ptr = NULL;
2647 /* The physical address of the first input DMA table */
2648 dma_addr_t in_first_mlli_address = 0;
2649 /* Number of entries in the first input DMA table */
2650 u32 in_first_num_entries = 0;
2651 /* The physical address of the first output DMA table */
2652 dma_addr_t out_first_mlli_address = 0;
2653 /* Number of entries in the first output DMA table */
2654 u32 out_first_num_entries = 0;
2655 /* Data in the first input/output table */
2656 u32 first_data_size = 0;
2657
2658 dev_dbg(&sep->pdev->dev, "[PID%d] app_in_address %lx\n",
2659 current->pid, app_in_address);
2660
2661 dev_dbg(&sep->pdev->dev, "[PID%d] app_out_address %lx\n",
2662 current->pid, app_out_address);
2663
2664 dev_dbg(&sep->pdev->dev, "[PID%d] data_in_size %x\n",
2665 current->pid, data_in_size);
2666
2667 dev_dbg(&sep->pdev->dev, "[PID%d] block_size %x\n",
2668 current->pid, block_size);
2669
2670 dev_dbg(&sep->pdev->dev, "[PID%d] tail_block_size %x\n",
2671 current->pid, tail_block_size);
2672
2673 dev_dbg(&sep->pdev->dev, "[PID%d] isapplet %x\n",
2674 current->pid, isapplet);
2675
2676 dev_dbg(&sep->pdev->dev, "[PID%d] is_kva %x\n",
2677 current->pid, is_kva);
2678
2679 dev_dbg(&sep->pdev->dev, "[PID%d] src_sg %p\n",
2680 current->pid, src_sg);
2681
2682 dev_dbg(&sep->pdev->dev, "[PID%d] dst_sg %p\n",
2683 current->pid, dst_sg);
2684
2685 if (!dma_ctx) {
2686 dev_warn(&sep->pdev->dev, "[PID%d] no DMA context pointer\n",
2687 current->pid);
2688 error = -EINVAL;
2689 goto end_function;
2690 }
2691
2692 if (*dma_ctx) {
2693 /* In case there are multiple DCBs for this transaction */
2694 dev_dbg(&sep->pdev->dev, "[PID%d] DMA context already set\n",
2695 current->pid);
2696 } else {
2697 *dma_ctx = kzalloc(sizeof(**dma_ctx), GFP_KERNEL);
2698 if (!(*dma_ctx)) {
2699 dev_dbg(&sep->pdev->dev,
2700 "[PID%d] Not enough memory for DMA context\n",
2701 current->pid);
2702 error = -ENOMEM;
2703 goto end_function;
2704 }
2705 dev_dbg(&sep->pdev->dev,
2706 "[PID%d] Created DMA context addr at 0x%p\n",
2707 current->pid, *dma_ctx);
2708 }
2709
aca58ec8
MA
2710 (*dma_ctx)->secure_dma = secure_dma;
2711
ff3d9c3c
MA
2712 /* these are for kernel crypto only */
2713 (*dma_ctx)->src_sg = src_sg;
2714 (*dma_ctx)->dst_sg = dst_sg;
2715
2716 if ((*dma_ctx)->nr_dcb_creat == SEP_MAX_NUM_SYNC_DMA_OPS) {
2717 /* No more DCBs to allocate */
2718 dev_dbg(&sep->pdev->dev, "[PID%d] no more DCBs available\n",
2719 current->pid);
2720 error = -ENOSPC;
2721 goto end_function_error;
2722 }
2723
2724 /* Allocate new DCB */
2725 if (dcb_region) {
2726 dcb_table_ptr = dcb_region;
2727 } else {
2728 dcb_table_ptr = (struct sep_dcblock *)(sep->shared_addr +
2729 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES +
2730 ((*dma_ctx)->nr_dcb_creat *
2731 sizeof(struct sep_dcblock)));
2732 }
2733
2734 /* Set the default values in the DCB */
2735 dcb_table_ptr->input_mlli_address = 0;
2736 dcb_table_ptr->input_mlli_num_entries = 0;
2737 dcb_table_ptr->input_mlli_data_size = 0;
2738 dcb_table_ptr->output_mlli_address = 0;
2739 dcb_table_ptr->output_mlli_num_entries = 0;
2740 dcb_table_ptr->output_mlli_data_size = 0;
2741 dcb_table_ptr->tail_data_size = 0;
2742 dcb_table_ptr->out_vr_tail_pt = 0;
2743
2744 if (isapplet == true) {
2745
2746 /* Check if there is enough data for DMA operation */
2747 if (data_in_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) {
2748 if (is_kva == true) {
2749 error = -ENODEV;
2750 goto end_function_error;
2751 } else {
2752 if (copy_from_user(dcb_table_ptr->tail_data,
2753 (void __user *)app_in_address,
2754 data_in_size)) {
2755 error = -EFAULT;
2756 goto end_function_error;
2757 }
2758 }
2759
2760 dcb_table_ptr->tail_data_size = data_in_size;
2761
2762 /* Set the output user-space address for mem2mem op */
2763 if (app_out_address)
2764 dcb_table_ptr->out_vr_tail_pt =
2765 (aligned_u64)app_out_address;
2766
2767 /*
2768 * Update both data length parameters in order to avoid
2769 * second data copy and allow building of empty mlli
2770 * tables
2771 */
2772 tail_size = 0x0;
2773 data_in_size = 0x0;
2774
2775 } else {
2776 if (!app_out_address) {
2777 tail_size = data_in_size % block_size;
2778 if (!tail_size) {
2779 if (tail_block_size == block_size)
2780 tail_size = block_size;
2781 }
2782 } else {
2783 tail_size = 0;
2784 }
2785 }
2786 if (tail_size) {
2787 if (tail_size > sizeof(dcb_table_ptr->tail_data))
2788 return -EINVAL;
2789 if (is_kva == true) {
2790 error = -ENODEV;
2791 goto end_function_error;
2792 } else {
2793 /* We have tail data - copy it to DCB */
2794 if (copy_from_user(dcb_table_ptr->tail_data,
2795 (void __user *)(app_in_address +
2796 data_in_size - tail_size), tail_size)) {
2797 error = -EFAULT;
2798 goto end_function_error;
2799 }
2800 }
2801 if (app_out_address)
2802 /*
2803 * Calculate the output address
2804 * according to tail data size
2805 */
2806 dcb_table_ptr->out_vr_tail_pt =
2807 (aligned_u64)app_out_address +
2808 data_in_size - tail_size;
2809
2810 /* Save the real tail data size */
2811 dcb_table_ptr->tail_data_size = tail_size;
2812 /*
2813 * Update the data size without the tail
2814 * data size AKA data for the dma
2815 */
2816 data_in_size = (data_in_size - tail_size);
2817 }
2818 }
2819 /* Check if we need to build only input table or input/output */
2820 if (app_out_address) {
2821 /* Prepare input/output tables */
2822 error = sep_prepare_input_output_dma_table(sep,
2823 app_in_address,
2824 app_out_address,
2825 data_in_size,
2826 block_size,
2827 &in_first_mlli_address,
2828 &out_first_mlli_address,
2829 &in_first_num_entries,
2830 &out_first_num_entries,
2831 &first_data_size,
2832 is_kva,
2833 dmatables_region,
2834 *dma_ctx);
2835 } else {
2836 /* Prepare input tables */
2837 error = sep_prepare_input_dma_table(sep,
2838 app_in_address,
2839 data_in_size,
2840 block_size,
2841 &in_first_mlli_address,
2842 &in_first_num_entries,
2843 &first_data_size,
2844 is_kva,
2845 dmatables_region,
2846 *dma_ctx);
2847 }
2848
2849 if (error) {
2850 dev_warn(&sep->pdev->dev,
2851 "prepare DMA table call failed "
2852 "from prepare DCB call\n");
2853 goto end_function_error;
2854 }
2855
2856 /* Set the DCB values */
2857 dcb_table_ptr->input_mlli_address = in_first_mlli_address;
2858 dcb_table_ptr->input_mlli_num_entries = in_first_num_entries;
2859 dcb_table_ptr->input_mlli_data_size = first_data_size;
2860 dcb_table_ptr->output_mlli_address = out_first_mlli_address;
2861 dcb_table_ptr->output_mlli_num_entries = out_first_num_entries;
2862 dcb_table_ptr->output_mlli_data_size = first_data_size;
2863
2864 goto end_function;
2865
2866end_function_error:
2867 kfree(*dma_ctx);
aca58ec8 2868 *dma_ctx = NULL;
ff3d9c3c
MA
2869
2870end_function:
2871 return error;
2872
2873}
2874
2875
2876/**
2877 * sep_free_dma_tables_and_dcb - free DMA tables and DCBs
2878 * @sep: pointer to struct sep_device
2879 * @isapplet: indicates external application (used for kernel access)
2880 * @is_kva: indicates kernel addresses (only used for kernel crypto)
2881 *
2882 * This function frees the DMA tables and DCB
2883 */
2884static int sep_free_dma_tables_and_dcb(struct sep_device *sep, bool isapplet,
2885 bool is_kva, struct sep_dma_context **dma_ctx)
2886{
2887 struct sep_dcblock *dcb_table_ptr;
2888 unsigned long pt_hold;
2889 void *tail_pt;
2890
2891 int i = 0;
2892 int error = 0;
2893 int error_temp = 0;
2894
2895 dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_tables_and_dcb\n",
2896 current->pid);
2897
aca58ec8
MA
2898 if (((*dma_ctx)->secure_dma == false) && (isapplet == true)) {
2899 dev_dbg(&sep->pdev->dev, "[PID%d] handling applet\n",
2900 current->pid);
2901
2902 /* Tail stuff is only for non secure_dma */
ff3d9c3c
MA
2903 /* Set pointer to first DCB table */
2904 dcb_table_ptr = (struct sep_dcblock *)
2905 (sep->shared_addr +
2906 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES);
2907
aca58ec8
MA
2908 /**
2909 * Go over each DCB and see if
2910 * tail pointer must be updated
2911 */
2912 for (i = 0; dma_ctx && *dma_ctx &&
2913 i < (*dma_ctx)->nr_dcb_creat; i++, dcb_table_ptr++) {
ff3d9c3c
MA
2914 if (dcb_table_ptr->out_vr_tail_pt) {
2915 pt_hold = (unsigned long)dcb_table_ptr->
2916 out_vr_tail_pt;
2917 tail_pt = (void *)pt_hold;
2918 if (is_kva == true) {
2919 error = -ENODEV;
2920 break;
2921 } else {
2922 error_temp = copy_to_user(
2923 (void __user *)tail_pt,
2924 dcb_table_ptr->tail_data,
2925 dcb_table_ptr->tail_data_size);
2926 }
2927 if (error_temp) {
2928 /* Release the DMA resource */
2929 error = -EFAULT;
2930 break;
2931 }
2932 }
2933 }
2934 }
aca58ec8 2935
ff3d9c3c
MA
2936 /* Free the output pages, if any */
2937 sep_free_dma_table_data_handler(sep, dma_ctx);
2938
2939 dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_tables_and_dcb end\n",
2940 current->pid);
2941
2942 return error;
2943}
2944
2945/**
2946 * sep_prepare_dcb_handler - prepare a control block
2947 * @sep: pointer to struct sep_device
2948 * @arg: pointer to user parameters
aca58ec8 2949 * @secure_dma: indicate whether we are using secure_dma on IMR
ff3d9c3c
MA
2950 *
2951 * This function will retrieve the RAR buffer physical addresses, type
2952 * & size corresponding to the RAR handles provided in the buffers vector.
2953 */
2954static int sep_prepare_dcb_handler(struct sep_device *sep, unsigned long arg,
aca58ec8 2955 bool secure_dma,
ff3d9c3c
MA
2956 struct sep_dma_context **dma_ctx)
2957{
2958 int error;
2959 /* Command arguments */
2960 static struct build_dcb_struct command_args;
2961
2962 /* Get the command arguments */
2963 if (copy_from_user(&command_args, (void __user *)arg,
2964 sizeof(struct build_dcb_struct))) {
2965 error = -EFAULT;
2966 goto end_function;
2967 }
2968
2969 dev_dbg(&sep->pdev->dev,
2970 "[PID%d] prep dcb handler app_in_address is %08llx\n",
2971 current->pid, command_args.app_in_address);
2972 dev_dbg(&sep->pdev->dev,
2973 "[PID%d] app_out_address is %08llx\n",
2974 current->pid, command_args.app_out_address);
2975 dev_dbg(&sep->pdev->dev,
2976 "[PID%d] data_size is %x\n",
2977 current->pid, command_args.data_in_size);
2978 dev_dbg(&sep->pdev->dev,
2979 "[PID%d] block_size is %x\n",
2980 current->pid, command_args.block_size);
2981 dev_dbg(&sep->pdev->dev,
2982 "[PID%d] tail block_size is %x\n",
2983 current->pid, command_args.tail_block_size);
2984 dev_dbg(&sep->pdev->dev,
2985 "[PID%d] is_applet is %x\n",
2986 current->pid, command_args.is_applet);
2987
2988 if (!command_args.app_in_address) {
2989 dev_warn(&sep->pdev->dev,
2990 "[PID%d] null app_in_address\n", current->pid);
2991 error = -EINVAL;
2992 goto end_function;
2993 }
2994
2995 error = sep_prepare_input_output_dma_table_in_dcb(sep,
2996 (unsigned long)command_args.app_in_address,
2997 (unsigned long)command_args.app_out_address,
2998 command_args.data_in_size, command_args.block_size,
2999 command_args.tail_block_size,
3000 command_args.is_applet, false,
aca58ec8 3001 secure_dma, NULL, NULL, dma_ctx, NULL, NULL);
ff3d9c3c
MA
3002
3003end_function:
3004 return error;
3005
3006}
3007
3008/**
3009 * sep_free_dcb_handler - free control block resources
3010 * @sep: pointer to struct sep_device
3011 *
3012 * This function frees the DCB resources and updates the needed
3013 * user-space buffers.
3014 */
3015static int sep_free_dcb_handler(struct sep_device *sep,
3016 struct sep_dma_context **dma_ctx)
3017{
ff3d9c3c 3018 if (!dma_ctx || !(*dma_ctx)) {
aca58ec8
MA
3019 dev_dbg(&sep->pdev->dev,
3020 "[PID%d] no dma context defined, nothing to free\n",
ff3d9c3c 3021 current->pid);
aca58ec8 3022 return -EINVAL;
ff3d9c3c
MA
3023 }
3024
3025 dev_dbg(&sep->pdev->dev, "[PID%d] free dcbs num of DCBs %x\n",
3026 current->pid,
3027 (*dma_ctx)->nr_dcb_creat);
3028
aca58ec8 3029 return sep_free_dma_tables_and_dcb(sep, false, false, dma_ctx);
ff3d9c3c
MA
3030}
3031
3032/**
3033 * sep_ioctl - ioctl handler for sep device
3034 * @filp: pointer to struct file
3035 * @cmd: command
3036 * @arg: pointer to argument structure
3037 *
bb75f7dc 3038 * Implement the ioctl methods available on the SEP device.
ff3d9c3c
MA
3039 */
3040static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3041{
3042 struct sep_private_data * const private_data = filp->private_data;
3043 struct sep_call_status *call_status = &private_data->call_status;
3044 struct sep_device *sep = private_data->device;
3045 struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
3046 struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
3047 int error = 0;
3048
3049 dev_dbg(&sep->pdev->dev, "[PID%d] ioctl cmd 0x%x\n",
3050 current->pid, cmd);
3051 dev_dbg(&sep->pdev->dev, "[PID%d] dma context addr 0x%p\n",
3052 current->pid, *dma_ctx);
3053
3054 /* Make sure we own this device */
3055 error = sep_check_transaction_owner(sep);
3056 if (error) {
3057 dev_dbg(&sep->pdev->dev, "[PID%d] ioctl pid is not owner\n",
3058 current->pid);
3059 goto end_function;
3060 }
3061
3062 /* Check that sep_mmap has been called before */
3063 if (0 == test_bit(SEP_LEGACY_MMAP_DONE_OFFSET,
3064 &call_status->status)) {
3065 dev_dbg(&sep->pdev->dev,
3066 "[PID%d] mmap not called\n", current->pid);
3067 error = -EPROTO;
3068 goto end_function;
3069 }
3070
3071 /* Check that the command is for SEP device */
3072 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) {
3073 error = -ENOTTY;
3074 goto end_function;
3075 }
3076
3077 switch (cmd) {
3078 case SEP_IOCSENDSEPCOMMAND:
6ab80c26
MA
3079 dev_dbg(&sep->pdev->dev,
3080 "[PID%d] SEP_IOCSENDSEPCOMMAND start\n",
3081 current->pid);
ff3d9c3c
MA
3082 if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
3083 &call_status->status)) {
6ab80c26
MA
3084 dev_warn(&sep->pdev->dev,
3085 "[PID%d] send msg already done\n",
ff3d9c3c
MA
3086 current->pid);
3087 error = -EPROTO;
3088 goto end_function;
3089 }
3090 /* Send command to SEP */
3091 error = sep_send_command_handler(sep);
3092 if (!error)
3093 set_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
3094 &call_status->status);
6ab80c26
MA
3095 dev_dbg(&sep->pdev->dev,
3096 "[PID%d] SEP_IOCSENDSEPCOMMAND end\n",
ff3d9c3c
MA
3097 current->pid);
3098 break;
3099 case SEP_IOCENDTRANSACTION:
6ab80c26
MA
3100 dev_dbg(&sep->pdev->dev,
3101 "[PID%d] SEP_IOCENDTRANSACTION start\n",
3102 current->pid);
ff3d9c3c 3103 error = sep_end_transaction_handler(sep, dma_ctx, call_status,
6ab80c26
MA
3104 my_queue_elem);
3105 dev_dbg(&sep->pdev->dev,
3106 "[PID%d] SEP_IOCENDTRANSACTION end\n",
ff3d9c3c
MA
3107 current->pid);
3108 break;
3109 case SEP_IOCPREPAREDCB:
6ab80c26
MA
3110 dev_dbg(&sep->pdev->dev,
3111 "[PID%d] SEP_IOCPREPAREDCB start\n",
3112 current->pid);
3113 case SEP_IOCPREPAREDCB_SECURE_DMA:
3114 dev_dbg(&sep->pdev->dev,
3115 "[PID%d] SEP_IOCPREPAREDCB_SECURE_DMA start\n",
3116 current->pid);
ff3d9c3c
MA
3117 if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
3118 &call_status->status)) {
fcff311c 3119 dev_dbg(&sep->pdev->dev,
6ab80c26 3120 "[PID%d] dcb prep needed before send msg\n",
ff3d9c3c
MA
3121 current->pid);
3122 error = -EPROTO;
3123 goto end_function;
3124 }
3125
3126 if (!arg) {
fcff311c 3127 dev_dbg(&sep->pdev->dev,
6ab80c26 3128 "[PID%d] dcb null arg\n", current->pid);
fcff311c 3129 error = -EINVAL;
ff3d9c3c
MA
3130 goto end_function;
3131 }
3132
6ab80c26
MA
3133 if (cmd == SEP_IOCPREPAREDCB) {
3134 /* No secure dma */
3135 dev_dbg(&sep->pdev->dev,
3136 "[PID%d] SEP_IOCPREPAREDCB (no secure_dma)\n",
3137 current->pid);
3138
3139 error = sep_prepare_dcb_handler(sep, arg, false,
3140 dma_ctx);
3141 } else {
3142 /* Secure dma */
3143 dev_dbg(&sep->pdev->dev,
3144 "[PID%d] SEP_IOC_POC (with secure_dma)\n",
3145 current->pid);
3146
3147 error = sep_prepare_dcb_handler(sep, arg, true,
3148 dma_ctx);
3149 }
3150 dev_dbg(&sep->pdev->dev, "[PID%d] dcb's end\n",
ff3d9c3c
MA
3151 current->pid);
3152 break;
3153 case SEP_IOCFREEDCB:
6ab80c26
MA
3154 dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCFREEDCB start\n",
3155 current->pid);
3156 case SEP_IOCFREEDCB_SECURE_DMA:
3157 dev_dbg(&sep->pdev->dev,
3158 "[PID%d] SEP_IOCFREEDCB_SECURE_DMA start\n",
3159 current->pid);
ff3d9c3c
MA
3160 error = sep_free_dcb_handler(sep, dma_ctx);
3161 dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCFREEDCB end\n",
3162 current->pid);
3163 break;
3164 default:
3165 error = -ENOTTY;
3166 dev_dbg(&sep->pdev->dev, "[PID%d] default end\n",
3167 current->pid);
3168 break;
3169 }
3170
3171end_function:
3172 dev_dbg(&sep->pdev->dev, "[PID%d] ioctl end\n", current->pid);
3173
3174 return error;
3175}
3176
3177/**
3178 * sep_inthandler - interrupt handler for sep device
3179 * @irq: interrupt
3180 * @dev_id: device id
3181 */
3182static irqreturn_t sep_inthandler(int irq, void *dev_id)
3183{
3184 unsigned long lock_irq_flag;
3185 u32 reg_val, reg_val2 = 0;
3186 struct sep_device *sep = dev_id;
3187 irqreturn_t int_error = IRQ_HANDLED;
3188
3189 /* Are we in power save? */
3190#if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
3191 if (sep->pdev->dev.power.runtime_status != RPM_ACTIVE) {
3192 dev_dbg(&sep->pdev->dev, "interrupt during pwr save\n");
3193 return IRQ_NONE;
3194 }
3195#endif
3196
3197 if (test_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags) == 0) {
3198 dev_dbg(&sep->pdev->dev, "interrupt while nobody using sep\n");
3199 return IRQ_NONE;
3200 }
3201
3202 /* Read the IRR register to check if this is SEP interrupt */
3203 reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
3204
3205 dev_dbg(&sep->pdev->dev, "sep int: IRR REG val: %x\n", reg_val);
3206
3207 if (reg_val & (0x1 << 13)) {
3208
3209 /* Lock and update the counter of reply messages */
3210 spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
3211 sep->reply_ct++;
3212 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
3213
3214 dev_dbg(&sep->pdev->dev, "sep int: send_ct %lx reply_ct %lx\n",
3215 sep->send_ct, sep->reply_ct);
3216
3217 /* Is this a kernel client request */
3218 if (sep->in_kernel) {
3219 tasklet_schedule(&sep->finish_tasklet);
3220 goto finished_interrupt;
3221 }
3222
3223 /* Is this printf or daemon request? */
3224 reg_val2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
3225 dev_dbg(&sep->pdev->dev,
3226 "SEP Interrupt - GPR2 is %08x\n", reg_val2);
3227
3228 clear_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags);
3229
3230 if ((reg_val2 >> 30) & 0x1) {
3231 dev_dbg(&sep->pdev->dev, "int: printf request\n");
3232 } else if (reg_val2 >> 31) {
3233 dev_dbg(&sep->pdev->dev, "int: daemon request\n");
3234 } else {
3235 dev_dbg(&sep->pdev->dev, "int: SEP reply\n");
3236 wake_up(&sep->event_interrupt);
3237 }
3238 } else {
3239 dev_dbg(&sep->pdev->dev, "int: not SEP interrupt\n");
3240 int_error = IRQ_NONE;
3241 }
3242
3243finished_interrupt:
3244
3245 if (int_error == IRQ_HANDLED)
3246 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
3247
3248 return int_error;
3249}
3250
3251/**
3252 * sep_reconfig_shared_area - reconfigure shared area
3253 * @sep: pointer to struct sep_device
3254 *
3255 * Reconfig the shared area between HOST and SEP - needed in case
3256 * the DX_CC_Init function was called before OS loading.
3257 */
3258static int sep_reconfig_shared_area(struct sep_device *sep)
3259{
3260 int ret_val;
3261
3262 /* use to limit waiting for SEP */
3263 unsigned long end_time;
3264
3265 /* Send the new SHARED MESSAGE AREA to the SEP */
3266 dev_dbg(&sep->pdev->dev, "reconfig shared; sending %08llx to sep\n",
3267 (unsigned long long)sep->shared_bus);
3268
3269 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
3270
3271 /* Poll for SEP response */
3272 ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
3273
3274 end_time = jiffies + (WAIT_TIME * HZ);
3275
3276 while ((time_before(jiffies, end_time)) && (ret_val != 0xffffffff) &&
3277 (ret_val != sep->shared_bus))
3278 ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
3279
3280 /* Check the return value (register) */
3281 if (ret_val != sep->shared_bus) {
3282 dev_warn(&sep->pdev->dev, "could not reconfig shared area\n");
3283 dev_warn(&sep->pdev->dev, "result was %x\n", ret_val);
3284 ret_val = -ENOMEM;
3285 } else
3286 ret_val = 0;
3287
3288 dev_dbg(&sep->pdev->dev, "reconfig shared area end\n");
3289
3290 return ret_val;
3291}
3292
3293/**
3294 * sep_activate_dcb_dmatables_context - Takes DCB & DMA tables
3295 * contexts into use
3296 * @sep: SEP device
3297 * @dcb_region: DCB region copy
3298 * @dmatables_region: MLLI/DMA tables copy
3299 * @dma_ctx: DMA context for current transaction
3300 */
3301ssize_t sep_activate_dcb_dmatables_context(struct sep_device *sep,
3302 struct sep_dcblock **dcb_region,
3303 void **dmatables_region,
3304 struct sep_dma_context *dma_ctx)
3305{
3306 void *dmaregion_free_start = NULL;
3307 void *dmaregion_free_end = NULL;
3308 void *dcbregion_free_start = NULL;
3309 void *dcbregion_free_end = NULL;
3310 ssize_t error = 0;
3311
3312 dev_dbg(&sep->pdev->dev, "[PID%d] activating dcb/dma region\n",
3313 current->pid);
3314
3315 if (1 > dma_ctx->nr_dcb_creat) {
3316 dev_warn(&sep->pdev->dev,
3317 "[PID%d] invalid number of dcbs to activate 0x%08X\n",
3318 current->pid, dma_ctx->nr_dcb_creat);
3319 error = -EINVAL;
3320 goto end_function;
3321 }
3322
3323 dmaregion_free_start = sep->shared_addr
3324 + SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES;
3325 dmaregion_free_end = dmaregion_free_start
3326 + SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES - 1;
3327
3328 if (dmaregion_free_start
3329 + dma_ctx->dmatables_len > dmaregion_free_end) {
3330 error = -ENOMEM;
3331 goto end_function;
3332 }
3333 memcpy(dmaregion_free_start,
3334 *dmatables_region,
3335 dma_ctx->dmatables_len);
3336 /* Free MLLI table copy */
3337 kfree(*dmatables_region);
3338 *dmatables_region = NULL;
3339
3340 /* Copy thread's DCB table copy to DCB table region */
3341 dcbregion_free_start = sep->shared_addr +
3342 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES;
3343 dcbregion_free_end = dcbregion_free_start +
3344 (SEP_MAX_NUM_SYNC_DMA_OPS *
3345 sizeof(struct sep_dcblock)) - 1;
3346
3347 if (dcbregion_free_start
3348 + (dma_ctx->nr_dcb_creat * sizeof(struct sep_dcblock))
3349 > dcbregion_free_end) {
3350 error = -ENOMEM;
3351 goto end_function;
3352 }
3353
3354 memcpy(dcbregion_free_start,
3355 *dcb_region,
3356 dma_ctx->nr_dcb_creat * sizeof(struct sep_dcblock));
3357
3358 /* Print the tables */
3359 dev_dbg(&sep->pdev->dev, "activate: input table\n");
3360 sep_debug_print_lli_tables(sep,
3361 (struct sep_lli_entry *)sep_shared_area_bus_to_virt(sep,
3362 (*dcb_region)->input_mlli_address),
3363 (*dcb_region)->input_mlli_num_entries,
3364 (*dcb_region)->input_mlli_data_size);
3365
3366 dev_dbg(&sep->pdev->dev, "activate: output table\n");
3367 sep_debug_print_lli_tables(sep,
3368 (struct sep_lli_entry *)sep_shared_area_bus_to_virt(sep,
3369 (*dcb_region)->output_mlli_address),
3370 (*dcb_region)->output_mlli_num_entries,
3371 (*dcb_region)->output_mlli_data_size);
3372
3373 dev_dbg(&sep->pdev->dev,
3374 "[PID%d] printing activated tables\n", current->pid);
3375
3376end_function:
3377 kfree(*dmatables_region);
3378 *dmatables_region = NULL;
3379
3380 kfree(*dcb_region);
3381 *dcb_region = NULL;
3382
3383 return error;
3384}
3385
3386/**
3387 * sep_create_dcb_dmatables_context - Creates DCB & MLLI/DMA table context
3388 * @sep: SEP device
3389 * @dcb_region: DCB region buf to create for current transaction
3390 * @dmatables_region: MLLI/DMA tables buf to create for current transaction
3391 * @dma_ctx: DMA context buf to create for current transaction
3392 * @user_dcb_args: User arguments for DCB/MLLI creation
3393 * @num_dcbs: Number of DCBs to create
aca58ec8 3394 * @secure_dma: Indicate use of IMR restricted memory secure dma
ff3d9c3c
MA
3395 */
3396static ssize_t sep_create_dcb_dmatables_context(struct sep_device *sep,
3397 struct sep_dcblock **dcb_region,
3398 void **dmatables_region,
3399 struct sep_dma_context **dma_ctx,
3400 const struct build_dcb_struct __user *user_dcb_args,
aca58ec8 3401 const u32 num_dcbs, bool secure_dma)
ff3d9c3c
MA
3402{
3403 int error = 0;
3404 int i = 0;
3405 struct build_dcb_struct *dcb_args = NULL;
3406
3407 dev_dbg(&sep->pdev->dev, "[PID%d] creating dcb/dma region\n",
3408 current->pid);
3409
3410 if (!dcb_region || !dma_ctx || !dmatables_region || !user_dcb_args) {
3411 error = -EINVAL;
3412 goto end_function;
3413 }
3414
3415 if (SEP_MAX_NUM_SYNC_DMA_OPS < num_dcbs) {
3416 dev_warn(&sep->pdev->dev,
3417 "[PID%d] invalid number of dcbs 0x%08X\n",
3418 current->pid, num_dcbs);
3419 error = -EINVAL;
3420 goto end_function;
3421 }
3422
3423 dcb_args = kzalloc(num_dcbs * sizeof(struct build_dcb_struct),
3424 GFP_KERNEL);
3425 if (!dcb_args) {
3426 dev_warn(&sep->pdev->dev, "[PID%d] no memory for dcb args\n",
3427 current->pid);
3428 error = -ENOMEM;
3429 goto end_function;
3430 }
3431
3432 if (copy_from_user(dcb_args,
3433 user_dcb_args,
3434 num_dcbs * sizeof(struct build_dcb_struct))) {
3435 error = -EINVAL;
3436 goto end_function;
3437 }
3438
3439 /* Allocate thread-specific memory for DCB */
3440 *dcb_region = kzalloc(num_dcbs * sizeof(struct sep_dcblock),
3441 GFP_KERNEL);
3442 if (!(*dcb_region)) {
3443 error = -ENOMEM;
3444 goto end_function;
3445 }
3446
3447 /* Prepare DCB and MLLI table into the allocated regions */
3448 for (i = 0; i < num_dcbs; i++) {
3449 error = sep_prepare_input_output_dma_table_in_dcb(sep,
3450 (unsigned long)dcb_args[i].app_in_address,
3451 (unsigned long)dcb_args[i].app_out_address,
3452 dcb_args[i].data_in_size,
3453 dcb_args[i].block_size,
3454 dcb_args[i].tail_block_size,
3455 dcb_args[i].is_applet,
aca58ec8 3456 false, secure_dma,
ff3d9c3c
MA
3457 *dcb_region, dmatables_region,
3458 dma_ctx,
3459 NULL,
3460 NULL);
3461 if (error) {
3462 dev_warn(&sep->pdev->dev,
3463 "[PID%d] dma table creation failed\n",
3464 current->pid);
3465 goto end_function;
3466 }
aca58ec8
MA
3467
3468 if (dcb_args[i].app_in_address != 0)
3469 (*dma_ctx)->input_data_len += dcb_args[i].data_in_size;
ff3d9c3c
MA
3470 }
3471
3472end_function:
3473 kfree(dcb_args);
3474 return error;
3475
3476}
3477
3478/**
3479 * sep_create_dcb_dmatables_context_kernel - Creates DCB & MLLI/DMA table context
3480 * for kernel crypto
3481 * @sep: SEP device
3482 * @dcb_region: DCB region buf to create for current transaction
3483 * @dmatables_region: MLLI/DMA tables buf to create for current transaction
3484 * @dma_ctx: DMA context buf to create for current transaction
3485 * @user_dcb_args: User arguments for DCB/MLLI creation
3486 * @num_dcbs: Number of DCBs to create
3487 * This does that same thing as sep_create_dcb_dmatables_context
3488 * except that it is used only for the kernel crypto operation. It is
3489 * separate because there is no user data involved; the dcb data structure
3490 * is specific for kernel crypto (build_dcb_struct_kernel)
3491 */
3492int sep_create_dcb_dmatables_context_kernel(struct sep_device *sep,
3493 struct sep_dcblock **dcb_region,
3494 void **dmatables_region,
3495 struct sep_dma_context **dma_ctx,
3496 const struct build_dcb_struct_kernel *dcb_data,
3497 const u32 num_dcbs)
3498{
3499 int error = 0;
3500 int i = 0;
3501
3502 dev_dbg(&sep->pdev->dev, "[PID%d] creating dcb/dma region\n",
3503 current->pid);
3504
3505 if (!dcb_region || !dma_ctx || !dmatables_region || !dcb_data) {
3506 error = -EINVAL;
3507 goto end_function;
3508 }
3509
3510 if (SEP_MAX_NUM_SYNC_DMA_OPS < num_dcbs) {
3511 dev_warn(&sep->pdev->dev,
3512 "[PID%d] invalid number of dcbs 0x%08X\n",
3513 current->pid, num_dcbs);
3514 error = -EINVAL;
3515 goto end_function;
3516 }
3517
3518 dev_dbg(&sep->pdev->dev, "[PID%d] num_dcbs is %d\n",
3519 current->pid, num_dcbs);
3520
3521 /* Allocate thread-specific memory for DCB */
3522 *dcb_region = kzalloc(num_dcbs * sizeof(struct sep_dcblock),
3523 GFP_KERNEL);
3524 if (!(*dcb_region)) {
3525 error = -ENOMEM;
3526 goto end_function;
3527 }
3528
3529 /* Prepare DCB and MLLI table into the allocated regions */
3530 for (i = 0; i < num_dcbs; i++) {
3531 error = sep_prepare_input_output_dma_table_in_dcb(sep,
3532 (unsigned long)dcb_data->app_in_address,
3533 (unsigned long)dcb_data->app_out_address,
3534 dcb_data->data_in_size,
3535 dcb_data->block_size,
3536 dcb_data->tail_block_size,
3537 dcb_data->is_applet,
3538 true,
aca58ec8 3539 false,
ff3d9c3c
MA
3540 *dcb_region, dmatables_region,
3541 dma_ctx,
3542 dcb_data->src_sg,
3543 dcb_data->dst_sg);
3544 if (error) {
3545 dev_warn(&sep->pdev->dev,
3546 "[PID%d] dma table creation failed\n",
3547 current->pid);
3548 goto end_function;
3549 }
3550 }
3551
3552end_function:
3553 return error;
3554
3555}
3556
3557/**
3558 * sep_activate_msgarea_context - Takes the message area context into use
3559 * @sep: SEP device
3560 * @msg_region: Message area context buf
3561 * @msg_len: Message area context buffer size
3562 */
3563static ssize_t sep_activate_msgarea_context(struct sep_device *sep,
3564 void **msg_region,
3565 const size_t msg_len)
3566{
3567 dev_dbg(&sep->pdev->dev, "[PID%d] activating msg region\n",
3568 current->pid);
3569
3570 if (!msg_region || !(*msg_region) ||
3571 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES < msg_len) {
3572 dev_warn(&sep->pdev->dev,
5f356a67 3573 "[PID%d] invalid act msgarea len 0x%08zX\n",
ff3d9c3c
MA
3574 current->pid, msg_len);
3575 return -EINVAL;
3576 }
3577
3578 memcpy(sep->shared_addr, *msg_region, msg_len);
3579
3580 return 0;
3581}
3582
3583/**
3584 * sep_create_msgarea_context - Creates message area context
3585 * @sep: SEP device
3586 * @msg_region: Msg area region buf to create for current transaction
3587 * @msg_user: Content for msg area region from user
3588 * @msg_len: Message area size
3589 */
3590static ssize_t sep_create_msgarea_context(struct sep_device *sep,
3591 void **msg_region,
3592 const void __user *msg_user,
3593 const size_t msg_len)
3594{
3595 int error = 0;
3596
3597 dev_dbg(&sep->pdev->dev, "[PID%d] creating msg region\n",
3598 current->pid);
3599
3600 if (!msg_region ||
3601 !msg_user ||
3602 SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES < msg_len ||
3603 SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES > msg_len) {
3604 dev_warn(&sep->pdev->dev,
5f356a67 3605 "[PID%d] invalid creat msgarea len 0x%08zX\n",
ff3d9c3c
MA
3606 current->pid, msg_len);
3607 error = -EINVAL;
3608 goto end_function;
3609 }
3610
3611 /* Allocate thread-specific memory for message buffer */
3612 *msg_region = kzalloc(msg_len, GFP_KERNEL);
3613 if (!(*msg_region)) {
3614 dev_warn(&sep->pdev->dev,
3615 "[PID%d] no mem for msgarea context\n",
3616 current->pid);
3617 error = -ENOMEM;
3618 goto end_function;
3619 }
3620
3621 /* Copy input data to write() to allocated message buffer */
3622 if (copy_from_user(*msg_region, msg_user, msg_len)) {
3623 error = -EINVAL;
3624 goto end_function;
3625 }
3626
3627end_function:
3628 if (error && msg_region) {
3629 kfree(*msg_region);
3630 *msg_region = NULL;
3631 }
3632
3633 return error;
3634}
3635
3636
3637/**
3638 * sep_read - Returns results of an operation for fastcall interface
3639 * @filp: File pointer
3640 * @buf_user: User buffer for storing results
3641 * @count_user: User buffer size
3642 * @offset: File offset, not supported
3643 *
3644 * The implementation does not support reading in chunks, all data must be
3645 * consumed during a single read system call.
3646 */
3647static ssize_t sep_read(struct file *filp,
3648 char __user *buf_user, size_t count_user,
3649 loff_t *offset)
3650{
3651 struct sep_private_data * const private_data = filp->private_data;
3652 struct sep_call_status *call_status = &private_data->call_status;
3653 struct sep_device *sep = private_data->device;
3654 struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
3655 struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
3656 ssize_t error = 0, error_tmp = 0;
3657
3658 /* Am I the process that owns the transaction? */
3659 error = sep_check_transaction_owner(sep);
3660 if (error) {
3661 dev_dbg(&sep->pdev->dev, "[PID%d] read pid is not owner\n",
3662 current->pid);
3663 goto end_function;
3664 }
3665
3666 /* Checks that user has called necessarry apis */
3667 if (0 == test_bit(SEP_FASTCALL_WRITE_DONE_OFFSET,
3668 &call_status->status)) {
3669 dev_warn(&sep->pdev->dev,
3670 "[PID%d] fastcall write not called\n",
3671 current->pid);
3672 error = -EPROTO;
3673 goto end_function_error;
3674 }
3675
3676 if (!buf_user) {
3677 dev_warn(&sep->pdev->dev,
3678 "[PID%d] null user buffer\n",
3679 current->pid);
3680 error = -EINVAL;
3681 goto end_function_error;
3682 }
3683
3684
3685 /* Wait for SEP to finish */
3686 wait_event(sep->event_interrupt,
3687 test_bit(SEP_WORKING_LOCK_BIT,
3688 &sep->in_use_flags) == 0);
3689
3690 sep_dump_message(sep);
3691
5f356a67 3692 dev_dbg(&sep->pdev->dev, "[PID%d] count_user = 0x%08zX\n",
ff3d9c3c
MA
3693 current->pid, count_user);
3694
3695 /* In case user has allocated bigger buffer */
3696 if (count_user > SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES)
3697 count_user = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES;
3698
3699 if (copy_to_user(buf_user, sep->shared_addr, count_user)) {
3700 error = -EFAULT;
3701 goto end_function_error;
3702 }
3703
3704 dev_dbg(&sep->pdev->dev, "[PID%d] read succeeded\n", current->pid);
3705 error = count_user;
3706
3707end_function_error:
3708 /* Copy possible tail data to user and free DCB and MLLIs */
3709 error_tmp = sep_free_dcb_handler(sep, dma_ctx);
3710 if (error_tmp)
3711 dev_warn(&sep->pdev->dev, "[PID%d] dcb free failed\n",
3712 current->pid);
3713
3714 /* End the transaction, wakeup pending ones */
3715 error_tmp = sep_end_transaction_handler(sep, dma_ctx, call_status,
3716 my_queue_elem);
3717 if (error_tmp)
3718 dev_warn(&sep->pdev->dev,
3719 "[PID%d] ending transaction failed\n",
3720 current->pid);
3721
3722end_function:
3723 return error;
3724}
3725
3726/**
3727 * sep_fastcall_args_get - Gets fastcall params from user
3728 * sep: SEP device
3729 * @args: Parameters buffer
3730 * @buf_user: User buffer for operation parameters
3731 * @count_user: User buffer size
3732 */
3733static inline ssize_t sep_fastcall_args_get(struct sep_device *sep,
3734 struct sep_fastcall_hdr *args,
3735 const char __user *buf_user,
3736 const size_t count_user)
3737{
3738 ssize_t error = 0;
3739 size_t actual_count = 0;
3740
3741 if (!buf_user) {
3742 dev_warn(&sep->pdev->dev,
3743 "[PID%d] null user buffer\n",
3744 current->pid);
3745 error = -EINVAL;
3746 goto end_function;
3747 }
3748
3749 if (count_user < sizeof(struct sep_fastcall_hdr)) {
3750 dev_warn(&sep->pdev->dev,
5f356a67 3751 "[PID%d] too small message size 0x%08zX\n",
ff3d9c3c
MA
3752 current->pid, count_user);
3753 error = -EINVAL;
3754 goto end_function;
3755 }
3756
3757
3758 if (copy_from_user(args, buf_user, sizeof(struct sep_fastcall_hdr))) {
3759 error = -EFAULT;
3760 goto end_function;
3761 }
3762
3763 if (SEP_FC_MAGIC != args->magic) {
3764 dev_warn(&sep->pdev->dev,
3765 "[PID%d] invalid fastcall magic 0x%08X\n",
3766 current->pid, args->magic);
3767 error = -EINVAL;
3768 goto end_function;
3769 }
3770
3771 dev_dbg(&sep->pdev->dev, "[PID%d] fastcall hdr num of DCBs 0x%08X\n",
3772 current->pid, args->num_dcbs);
3773 dev_dbg(&sep->pdev->dev, "[PID%d] fastcall hdr msg len 0x%08X\n",
3774 current->pid, args->msg_len);
3775
3776 if (SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES < args->msg_len ||
3777 SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES > args->msg_len) {
3778 dev_warn(&sep->pdev->dev,
3779 "[PID%d] invalid message length\n",
3780 current->pid);
3781 error = -EINVAL;
3782 goto end_function;
3783 }
3784
3785 actual_count = sizeof(struct sep_fastcall_hdr)
3786 + args->msg_len
3787 + (args->num_dcbs * sizeof(struct build_dcb_struct));
3788
3789 if (actual_count != count_user) {
3790 dev_warn(&sep->pdev->dev,
3791 "[PID%d] inconsistent message "
5f356a67 3792 "sizes 0x%08zX vs 0x%08zX\n",
ff3d9c3c
MA
3793 current->pid, actual_count, count_user);
3794 error = -EMSGSIZE;
3795 goto end_function;
3796 }
3797
3798end_function:
3799 return error;
3800}
3801
3802/**
3803 * sep_write - Starts an operation for fastcall interface
3804 * @filp: File pointer
3805 * @buf_user: User buffer for operation parameters
3806 * @count_user: User buffer size
3807 * @offset: File offset, not supported
3808 *
3809 * The implementation does not support writing in chunks,
3810 * all data must be given during a single write system call.
3811 */
3812static ssize_t sep_write(struct file *filp,
3813 const char __user *buf_user, size_t count_user,
3814 loff_t *offset)
3815{
3816 struct sep_private_data * const private_data = filp->private_data;
3817 struct sep_call_status *call_status = &private_data->call_status;
3818 struct sep_device *sep = private_data->device;
3819 struct sep_dma_context *dma_ctx = NULL;
3820 struct sep_fastcall_hdr call_hdr = {0};
3821 void *msg_region = NULL;
3822 void *dmatables_region = NULL;
3823 struct sep_dcblock *dcb_region = NULL;
3824 ssize_t error = 0;
3825 struct sep_queue_info *my_queue_elem = NULL;
aca58ec8 3826 bool my_secure_dma; /* are we using secure_dma (IMR)? */
ff3d9c3c
MA
3827
3828 dev_dbg(&sep->pdev->dev, "[PID%d] sep dev is 0x%p\n",
3829 current->pid, sep);
3830 dev_dbg(&sep->pdev->dev, "[PID%d] private_data is 0x%p\n",
3831 current->pid, private_data);
3832
3833 error = sep_fastcall_args_get(sep, &call_hdr, buf_user, count_user);
3834 if (error)
3835 goto end_function;
3836
3837 buf_user += sizeof(struct sep_fastcall_hdr);
3838
aca58ec8
MA
3839 if (call_hdr.secure_dma == 0)
3840 my_secure_dma = false;
3841 else
3842 my_secure_dma = true;
3843
ff3d9c3c
MA
3844 /*
3845 * Controlling driver memory usage by limiting amount of
3846 * buffers created. Only SEP_DOUBLEBUF_USERS_LIMIT number
3847 * of threads can progress further at a time
3848 */
2e0bec91
AA
3849 dev_dbg(&sep->pdev->dev,
3850 "[PID%d] waiting for double buffering region access\n",
3851 current->pid);
ff3d9c3c
MA
3852 error = down_interruptible(&sep->sep_doublebuf);
3853 dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region start\n",
3854 current->pid);
3855 if (error) {
3856 /* Signal received */
3857 goto end_function_error;
3858 }
3859
3860
3861 /*
3862 * Prepare contents of the shared area regions for
3863 * the operation into temporary buffers
3864 */
3865 if (0 < call_hdr.num_dcbs) {
3866 error = sep_create_dcb_dmatables_context(sep,
3867 &dcb_region,
3868 &dmatables_region,
3869 &dma_ctx,
3870 (const struct build_dcb_struct __user *)
3871 buf_user,
aca58ec8 3872 call_hdr.num_dcbs, my_secure_dma);
ff3d9c3c
MA
3873 if (error)
3874 goto end_function_error_doublebuf;
3875
3876 buf_user += call_hdr.num_dcbs * sizeof(struct build_dcb_struct);
3877 }
3878
3879 error = sep_create_msgarea_context(sep,
3880 &msg_region,
3881 buf_user,
3882 call_hdr.msg_len);
3883 if (error)
3884 goto end_function_error_doublebuf;
3885
3886 dev_dbg(&sep->pdev->dev, "[PID%d] updating queue status\n",
3887 current->pid);
3888 my_queue_elem = sep_queue_status_add(sep,
3889 ((struct sep_msgarea_hdr *)msg_region)->opcode,
3890 (dma_ctx) ? dma_ctx->input_data_len : 0,
3891 current->pid,
3892 current->comm, sizeof(current->comm));
3893
3894 if (!my_queue_elem) {
2e0bec91
AA
3895 dev_dbg(&sep->pdev->dev,
3896 "[PID%d] updating queue status error\n", current->pid);
ff3d9c3c
MA
3897 error = -ENOMEM;
3898 goto end_function_error_doublebuf;
3899 }
3900
3901 /* Wait until current process gets the transaction */
3902 error = sep_wait_transaction(sep);
3903
3904 if (error) {
3905 /* Interrupted by signal, don't clear transaction */
3906 dev_dbg(&sep->pdev->dev, "[PID%d] interrupted by signal\n",
3907 current->pid);
3908 sep_queue_status_remove(sep, &my_queue_elem);
3909 goto end_function_error_doublebuf;
3910 }
3911
3912 dev_dbg(&sep->pdev->dev, "[PID%d] saving queue element\n",
3913 current->pid);
3914 private_data->my_queue_elem = my_queue_elem;
3915
3916 /* Activate shared area regions for the transaction */
3917 error = sep_activate_msgarea_context(sep, &msg_region,
3918 call_hdr.msg_len);
3919 if (error)
3920 goto end_function_error_clear_transact;
3921
3922 sep_dump_message(sep);
3923
3924 if (0 < call_hdr.num_dcbs) {
3925 error = sep_activate_dcb_dmatables_context(sep,
3926 &dcb_region,
3927 &dmatables_region,
3928 dma_ctx);
3929 if (error)
3930 goto end_function_error_clear_transact;
3931 }
3932
3933 /* Send command to SEP */
3934 error = sep_send_command_handler(sep);
3935 if (error)
3936 goto end_function_error_clear_transact;
3937
3938 /* Store DMA context for the transaction */
3939 private_data->dma_ctx = dma_ctx;
3940 /* Update call status */
3941 set_bit(SEP_FASTCALL_WRITE_DONE_OFFSET, &call_status->status);
3942 error = count_user;
3943
3944 up(&sep->sep_doublebuf);
3945 dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region end\n",
3946 current->pid);
3947
3948 goto end_function;
3949
3950end_function_error_clear_transact:
3951 sep_end_transaction_handler(sep, &dma_ctx, call_status,
3952 &private_data->my_queue_elem);
3953
3954end_function_error_doublebuf:
3955 up(&sep->sep_doublebuf);
3956 dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region end\n",
3957 current->pid);
3958
3959end_function_error:
3960 if (dma_ctx)
3961 sep_free_dma_table_data_handler(sep, &dma_ctx);
3962
3963end_function:
3964 kfree(dcb_region);
3965 kfree(dmatables_region);
3966 kfree(msg_region);
3967
3968 return error;
3969}
3970/**
3971 * sep_seek - Handler for seek system call
3972 * @filp: File pointer
3973 * @offset: File offset
3974 * @origin: Options for offset
3975 *
3976 * Fastcall interface does not support seeking, all reads
3977 * and writes are from/to offset zero
3978 */
3979static loff_t sep_seek(struct file *filp, loff_t offset, int origin)
3980{
3981 return -ENOSYS;
3982}
3983
3984
3985
3986/**
3987 * sep_file_operations - file operation on sep device
3988 * @sep_ioctl: ioctl handler from user space call
3989 * @sep_poll: poll handler
3990 * @sep_open: handles sep device open request
3991 * @sep_release:handles sep device release request
3992 * @sep_mmap: handles memory mapping requests
3993 * @sep_read: handles read request on sep device
3994 * @sep_write: handles write request on sep device
3995 * @sep_seek: handles seek request on sep device
3996 */
3997static const struct file_operations sep_file_operations = {
3998 .owner = THIS_MODULE,
3999 .unlocked_ioctl = sep_ioctl,
4000 .poll = sep_poll,
4001 .open = sep_open,
4002 .release = sep_release,
4003 .mmap = sep_mmap,
4004 .read = sep_read,
4005 .write = sep_write,
4006 .llseek = sep_seek,
4007};
4008
4009/**
4010 * sep_sysfs_read - read sysfs entry per gives arguments
4011 * @filp: file pointer
4012 * @kobj: kobject pointer
4013 * @attr: binary file attributes
4014 * @buf: read to this buffer
4015 * @pos: offset to read
4016 * @count: amount of data to read
4017 *
4018 * This function is to read sysfs entries for sep driver per given arguments.
4019 */
4020static ssize_t
4021sep_sysfs_read(struct file *filp, struct kobject *kobj,
4022 struct bin_attribute *attr,
4023 char *buf, loff_t pos, size_t count)
4024{
4025 unsigned long lck_flags;
4026 size_t nleft = count;
4027 struct sep_device *sep = sep_dev;
4028 struct sep_queue_info *queue_elem = NULL;
4029 u32 queue_num = 0;
4030 u32 i = 1;
4031
4032 spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
4033
4034 queue_num = sep->sep_queue_num;
4035 if (queue_num > SEP_DOUBLEBUF_USERS_LIMIT)
4036 queue_num = SEP_DOUBLEBUF_USERS_LIMIT;
4037
4038
4039 if (count < sizeof(queue_num)
4040 + (queue_num * sizeof(struct sep_queue_data))) {
4041 spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
4042 return -EINVAL;
4043 }
4044
4045 memcpy(buf, &queue_num, sizeof(queue_num));
4046 buf += sizeof(queue_num);
4047 nleft -= sizeof(queue_num);
4048
4049 list_for_each_entry(queue_elem, &sep->sep_queue_status, list) {
4050 if (i++ > queue_num)
4051 break;
4052
4053 memcpy(buf, &queue_elem->data, sizeof(queue_elem->data));
4054 nleft -= sizeof(queue_elem->data);
4055 buf += sizeof(queue_elem->data);
4056 }
4057 spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
4058
4059 return count - nleft;
4060}
4061
4062/**
4063 * bin_attributes - defines attributes for queue_status
4064 * @attr: attributes (name & permissions)
4065 * @read: function pointer to read this file
4066 * @size: maxinum size of binary attribute
4067 */
4068static const struct bin_attribute queue_status = {
4069 .attr = {.name = "queue_status", .mode = 0444},
4070 .read = sep_sysfs_read,
4071 .size = sizeof(u32)
4072 + (SEP_DOUBLEBUF_USERS_LIMIT * sizeof(struct sep_queue_data)),
4073};
4074
4075/**
4076 * sep_register_driver_with_fs - register misc devices
4077 * @sep: pointer to struct sep_device
4078 *
4079 * This function registers the driver with the file system
4080 */
4081static int sep_register_driver_with_fs(struct sep_device *sep)
4082{
4083 int ret_val;
4084
4085 sep->miscdev_sep.minor = MISC_DYNAMIC_MINOR;
4086 sep->miscdev_sep.name = SEP_DEV_NAME;
4087 sep->miscdev_sep.fops = &sep_file_operations;
4088
4089 ret_val = misc_register(&sep->miscdev_sep);
4090 if (ret_val) {
4091 dev_warn(&sep->pdev->dev, "misc reg fails for SEP %x\n",
4092 ret_val);
4093 return ret_val;
4094 }
4095
4096 ret_val = device_create_bin_file(sep->miscdev_sep.this_device,
4097 &queue_status);
4098 if (ret_val) {
4099 dev_warn(&sep->pdev->dev, "sysfs attribute1 fails for SEP %x\n",
4100 ret_val);
4101 return ret_val;
4102 }
4103
4104 return ret_val;
4105}
4106
4107
4108/**
4109 *sep_probe - probe a matching PCI device
4110 *@pdev: pci_device
4111 *@ent: pci_device_id
4112 *
4113 *Attempt to set up and configure a SEP device that has been
4114 *discovered by the PCI layer. Allocates all required resources.
4115 */
4116static int __devinit sep_probe(struct pci_dev *pdev,
4117 const struct pci_device_id *ent)
4118{
4119 int error = 0;
4120 struct sep_device *sep = NULL;
4121
4122 if (sep_dev != NULL) {
4123 dev_dbg(&pdev->dev, "only one SEP supported.\n");
4124 return -EBUSY;
4125 }
4126
4127 /* Enable the device */
4128 error = pci_enable_device(pdev);
4129 if (error) {
4130 dev_warn(&pdev->dev, "error enabling pci device\n");
4131 goto end_function;
4132 }
4133
4134 /* Allocate the sep_device structure for this device */
4135 sep_dev = kzalloc(sizeof(struct sep_device), GFP_ATOMIC);
4136 if (sep_dev == NULL) {
4137 dev_warn(&pdev->dev,
4138 "can't kmalloc the sep_device structure\n");
4139 error = -ENOMEM;
4140 goto end_function_disable_device;
4141 }
4142
4143 /*
4144 * We're going to use another variable for actually
4145 * working with the device; this way, if we have
4146 * multiple devices in the future, it would be easier
4147 * to make appropriate changes
4148 */
4149 sep = sep_dev;
4150
4151 sep->pdev = pci_dev_get(pdev);
4152
4153 init_waitqueue_head(&sep->event_transactions);
4154 init_waitqueue_head(&sep->event_interrupt);
4155 spin_lock_init(&sep->snd_rply_lck);
4156 spin_lock_init(&sep->sep_queue_lock);
4157 sema_init(&sep->sep_doublebuf, SEP_DOUBLEBUF_USERS_LIMIT);
4158
4159 INIT_LIST_HEAD(&sep->sep_queue_status);
4160
2e0bec91
AA
4161 dev_dbg(&sep->pdev->dev,
4162 "sep probe: PCI obtained, device being prepared\n");
ff3d9c3c
MA
4163
4164 /* Set up our register area */
4165 sep->reg_physical_addr = pci_resource_start(sep->pdev, 0);
4166 if (!sep->reg_physical_addr) {
4167 dev_warn(&sep->pdev->dev, "Error getting register start\n");
4168 error = -ENODEV;
4169 goto end_function_free_sep_dev;
4170 }
4171
4172 sep->reg_physical_end = pci_resource_end(sep->pdev, 0);
4173 if (!sep->reg_physical_end) {
4174 dev_warn(&sep->pdev->dev, "Error getting register end\n");
4175 error = -ENODEV;
4176 goto end_function_free_sep_dev;
4177 }
4178
4179 sep->reg_addr = ioremap_nocache(sep->reg_physical_addr,
4180 (size_t)(sep->reg_physical_end - sep->reg_physical_addr + 1));
4181 if (!sep->reg_addr) {
4182 dev_warn(&sep->pdev->dev, "Error getting register virtual\n");
4183 error = -ENODEV;
4184 goto end_function_free_sep_dev;
4185 }
4186
4187 dev_dbg(&sep->pdev->dev,
4188 "Register area start %llx end %llx virtual %p\n",
4189 (unsigned long long)sep->reg_physical_addr,
4190 (unsigned long long)sep->reg_physical_end,
4191 sep->reg_addr);
4192
4193 /* Allocate the shared area */
4194 sep->shared_size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
4195 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES +
4196 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES +
4197 SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES +
4198 SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
4199
4200 if (sep_map_and_alloc_shared_area(sep)) {
4201 error = -ENOMEM;
4202 /* Allocation failed */
4203 goto end_function_error;
4204 }
4205
4206 /* Clear ICR register */
4207 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4208
4209 /* Set the IMR register - open only GPR 2 */
4210 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4211
4212 /* Read send/receive counters from SEP */
4213 sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4214 sep->reply_ct &= 0x3FFFFFFF;
4215 sep->send_ct = sep->reply_ct;
4216
4217 /* Get the interrupt line */
4218 error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED,
4219 "sep_driver", sep);
4220
4221 if (error)
4222 goto end_function_deallocate_sep_shared_area;
4223
4224 /* The new chip requires a shared area reconfigure */
ecd0cb00
MA
4225 error = sep_reconfig_shared_area(sep);
4226 if (error)
4227 goto end_function_free_irq;
ff3d9c3c
MA
4228
4229 sep->in_use = 1;
4230
4231 /* Finally magic up the device nodes */
4232 /* Register driver with the fs */
4233 error = sep_register_driver_with_fs(sep);
ecd0cb00
MA
4234
4235 if (error) {
4236 dev_err(&sep->pdev->dev, "error registering dev file\n");
ff3d9c3c 4237 goto end_function_free_irq;
ecd0cb00 4238 }
ff3d9c3c 4239
ecd0cb00 4240 sep->in_use = 0; /* through touching the device */
ff3d9c3c
MA
4241#ifdef SEP_ENABLE_RUNTIME_PM
4242 pm_runtime_put_noidle(&sep->pdev->dev);
4243 pm_runtime_allow(&sep->pdev->dev);
4244 pm_runtime_set_autosuspend_delay(&sep->pdev->dev,
4245 SUSPEND_DELAY);
4246 pm_runtime_use_autosuspend(&sep->pdev->dev);
ecd0cb00 4247 pm_runtime_mark_last_busy(&sep->pdev->dev);
ff3d9c3c
MA
4248 sep->power_save_setup = 1;
4249#endif
ff3d9c3c 4250 /* register kernel crypto driver */
ebb3bf50 4251#if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
ff3d9c3c
MA
4252 error = sep_crypto_setup();
4253 if (error) {
ecd0cb00 4254 dev_err(&sep->pdev->dev, "crypto setup failed\n");
ff3d9c3c
MA
4255 goto end_function_free_irq;
4256 }
ecd0cb00 4257#endif
ff3d9c3c
MA
4258 goto end_function;
4259
4260end_function_free_irq:
4261 free_irq(pdev->irq, sep);
4262
4263end_function_deallocate_sep_shared_area:
4264 /* De-allocate shared area */
4265 sep_unmap_and_free_shared_area(sep);
4266
4267end_function_error:
4268 iounmap(sep->reg_addr);
4269
4270end_function_free_sep_dev:
4271 pci_dev_put(sep_dev->pdev);
4272 kfree(sep_dev);
4273 sep_dev = NULL;
4274
4275end_function_disable_device:
4276 pci_disable_device(pdev);
4277
4278end_function:
4279 return error;
4280}
4281
4282/**
4283 * sep_remove - handles removing device from pci subsystem
4284 * @pdev: pointer to pci device
4285 *
4286 * This function will handle removing our sep device from pci subsystem on exit
4287 * or unloading this module. It should free up all used resources, and unmap if
4288 * any memory regions mapped.
4289 */
4290static void sep_remove(struct pci_dev *pdev)
4291{
4292 struct sep_device *sep = sep_dev;
4293
4294 /* Unregister from fs */
4295 misc_deregister(&sep->miscdev_sep);
4296
4297 /* Unregister from kernel crypto */
ebb3bf50 4298#if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
ff3d9c3c 4299 sep_crypto_takedown();
ebb3bf50 4300#endif
ff3d9c3c
MA
4301 /* Free the irq */
4302 free_irq(sep->pdev->irq, sep);
4303
4304 /* Free the shared area */
4305 sep_unmap_and_free_shared_area(sep_dev);
ecd0cb00 4306 iounmap(sep_dev->reg_addr);
ff3d9c3c
MA
4307
4308#ifdef SEP_ENABLE_RUNTIME_PM
4309 if (sep->in_use) {
4310 sep->in_use = 0;
4311 pm_runtime_forbid(&sep->pdev->dev);
4312 pm_runtime_get_noresume(&sep->pdev->dev);
4313 }
4314#endif
4315 pci_dev_put(sep_dev->pdev);
4316 kfree(sep_dev);
4317 sep_dev = NULL;
4318}
4319
4320/* Initialize struct pci_device_id for our driver */
4321static DEFINE_PCI_DEVICE_TABLE(sep_pci_id_tbl) = {
ffcf1281
MA
4322 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0826)},
4323 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08e9)},
2e0bec91 4324 {0}
ff3d9c3c
MA
4325};
4326
4327/* Export our pci_device_id structure to user space */
4328MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
4329
4330#ifdef SEP_ENABLE_RUNTIME_PM
4331
4332/**
4333 * sep_pm_resume - rsume routine while waking up from S3 state
4334 * @dev: pointer to sep device
4335 *
4336 * This function is to be used to wake up sep driver while system awakes from S3
4337 * state i.e. suspend to ram. The RAM in intact.
4338 * Notes - revisit with more understanding of pm, ICR/IMR & counters.
4339 */
4340static int sep_pci_resume(struct device *dev)
4341{
4342 struct sep_device *sep = sep_dev;
4343
4344 dev_dbg(&sep->pdev->dev, "pci resume called\n");
4345
4346 if (sep->power_state == SEP_DRIVER_POWERON)
4347 return 0;
4348
4349 /* Clear ICR register */
4350 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4351
4352 /* Set the IMR register - open only GPR 2 */
4353 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4354
4355 /* Read send/receive counters from SEP */
4356 sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4357 sep->reply_ct &= 0x3FFFFFFF;
4358 sep->send_ct = sep->reply_ct;
4359
4360 sep->power_state = SEP_DRIVER_POWERON;
4361
4362 return 0;
4363}
4364
4365/**
4366 * sep_pm_suspend - suspend routine while going to S3 state
4367 * @dev: pointer to sep device
4368 *
4369 * This function is to be used to suspend sep driver while system goes to S3
4370 * state i.e. suspend to ram. The RAM in intact and ON during this suspend.
4371 * Notes - revisit with more understanding of pm, ICR/IMR
4372 */
4373static int sep_pci_suspend(struct device *dev)
4374{
4375 struct sep_device *sep = sep_dev;
4376
4377 dev_dbg(&sep->pdev->dev, "pci suspend called\n");
4378 if (sep->in_use == 1)
4379 return -EAGAIN;
4380
4381 sep->power_state = SEP_DRIVER_POWEROFF;
4382
4383 /* Clear ICR register */
4384 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4385
4386 /* Set the IMR to block all */
4387 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0xFFFFFFFF);
4388
4389 return 0;
4390}
4391
4392/**
4393 * sep_pm_runtime_resume - runtime resume routine
4394 * @dev: pointer to sep device
4395 *
4396 * Notes - revisit with more understanding of pm, ICR/IMR & counters
4397 */
4398static int sep_pm_runtime_resume(struct device *dev)
4399{
4400
4401 u32 retval2;
4402 u32 delay_count;
4403 struct sep_device *sep = sep_dev;
4404
4405 dev_dbg(&sep->pdev->dev, "pm runtime resume called\n");
4406
4407 /**
4408 * Wait until the SCU boot is ready
4409 * This is done by iterating SCU_DELAY_ITERATION (10
4410 * microseconds each) up to SCU_DELAY_MAX (50) times.
4411 * This bit can be set in a random time that is less
4412 * than 500 microseconds after each power resume
4413 */
4414 retval2 = 0;
4415 delay_count = 0;
4416 while ((!retval2) && (delay_count < SCU_DELAY_MAX)) {
4417 retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
4418 retval2 &= 0x00000008;
4419 if (!retval2) {
4420 udelay(SCU_DELAY_ITERATION);
4421 delay_count += 1;
4422 }
4423 }
4424
4425 if (!retval2) {
4426 dev_warn(&sep->pdev->dev, "scu boot bit not set at resume\n");
4427 return -EINVAL;
4428 }
4429
4430 /* Clear ICR register */
4431 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4432
4433 /* Set the IMR register - open only GPR 2 */
4434 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4435
4436 /* Read send/receive counters from SEP */
4437 sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4438 sep->reply_ct &= 0x3FFFFFFF;
4439 sep->send_ct = sep->reply_ct;
4440
4441 return 0;
4442}
4443
4444/**
4445 * sep_pm_runtime_suspend - runtime suspend routine
4446 * @dev: pointer to sep device
4447 *
4448 * Notes - revisit with more understanding of pm
4449 */
4450static int sep_pm_runtime_suspend(struct device *dev)
4451{
4452 struct sep_device *sep = sep_dev;
4453
4454 dev_dbg(&sep->pdev->dev, "pm runtime suspend called\n");
4455
4456 /* Clear ICR register */
4457 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4458 return 0;
4459}
4460
4461/**
4462 * sep_pm - power management for sep driver
4463 * @sep_pm_runtime_resume: resume- no communication with cpu & main memory
4464 * @sep_pm_runtime_suspend: suspend- no communication with cpu & main memory
4465 * @sep_pci_suspend: suspend - main memory is still ON
bb75f7dc 4466 * @sep_pci_resume: resume - main memory is still ON
ff3d9c3c
MA
4467 */
4468static const struct dev_pm_ops sep_pm = {
4469 .runtime_resume = sep_pm_runtime_resume,
4470 .runtime_suspend = sep_pm_runtime_suspend,
4471 .resume = sep_pci_resume,
4472 .suspend = sep_pci_suspend,
4473};
4474#endif /* SEP_ENABLE_RUNTIME_PM */
4475
4476/**
4477 * sep_pci_driver - registers this device with pci subsystem
4478 * @name: name identifier for this driver
4479 * @sep_pci_id_tbl: pointer to struct pci_device_id table
4480 * @sep_probe: pointer to probe function in PCI driver
4481 * @sep_remove: pointer to remove function in PCI driver
4482 */
4483static struct pci_driver sep_pci_driver = {
4484#ifdef SEP_ENABLE_RUNTIME_PM
4485 .driver = {
4486 .pm = &sep_pm,
4487 },
4488#endif
4489 .name = "sep_sec_driver",
4490 .id_table = sep_pci_id_tbl,
4491 .probe = sep_probe,
4492 .remove = sep_remove
4493};
4494
4495/**
4496 * sep_init - init function
4497 *
4498 * Module load time. Register the PCI device driver.
4499 */
4500
4501static int __init sep_init(void)
4502{
4503 return pci_register_driver(&sep_pci_driver);
4504}
4505
4506
4507/**
4508 * sep_exit - called to unload driver
4509 *
4510 * Unregister the driver The device will perform all the cleanup required.
4511 */
4512static void __exit sep_exit(void)
4513{
4514 pci_unregister_driver(&sep_pci_driver);
4515}
4516
4517
4518module_init(sep_init);
4519module_exit(sep_exit);
4520
4521MODULE_LICENSE("GPL");
This page took 0.239311 seconds and 5 git commands to generate.