3 * sep_main.c - Security Processor Driver main group of functions
5 * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
6 * Contributions(c) 2009-2011 Discretix. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; version 2 of the License.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 * Mark Allyn mark.a.allyn@intel.com
24 * Jayant Mangalampalli jayant.mangalampalli@intel.com
28 * 2009.06.26 Initial publish
29 * 2010.09.14 Upgrade to Medfield
30 * 2011.01.21 Move to sep_main.c to allow for sep_crypto.c
31 * 2011.02.22 Enable kernel crypto operation
33 * Please note that this driver is based on information in the Discretix
34 * CryptoCell 5.2 Driver Implementation Guide; the Discretix CryptoCell 5.2
35 * Integration Intel Medfield appendix; the Discretix CryptoCell 5.2
36 * Linux Driver Integration Guide; and the Discretix CryptoCell 5.2 System
37 * Overview and Integration Guide.
40 /* #define SEP_PERF_DEBUG */
42 #include <linux/init.h>
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/miscdevice.h>
47 #include <linux/cdev.h>
48 #include <linux/kdev_t.h>
49 #include <linux/mutex.h>
50 #include <linux/sched.h>
52 #include <linux/poll.h>
53 #include <linux/wait.h>
54 #include <linux/pci.h>
55 #include <linux/pm_runtime.h>
56 #include <linux/slab.h>
57 #include <linux/ioctl.h>
58 #include <asm/current.h>
59 #include <linux/ioport.h>
61 #include <linux/interrupt.h>
62 #include <linux/pagemap.h>
63 #include <asm/cacheflush.h>
64 #include <linux/sched.h>
65 #include <linux/delay.h>
66 #include <linux/jiffies.h>
67 #include <linux/async.h>
68 #include <linux/crypto.h>
69 #include <crypto/internal/hash.h>
70 #include <crypto/scatterwalk.h>
71 #include <crypto/sha.h>
72 #include <crypto/md5.h>
73 #include <crypto/aes.h>
74 #include <crypto/des.h>
75 #include <crypto/hash.h>
77 #include "sep_driver_hw_defs.h"
78 #include "sep_driver_config.h"
79 #include "sep_driver_api.h"
81 #include "sep_crypto.h"
83 #define CREATE_TRACE_POINTS
84 #include "sep_trace_events.h"
87 * Let's not spend cycles iterating over message
88 * area contents if debugging not enabled
91 #define sep_dump_message(sep) _sep_dump_message(sep)
93 #define sep_dump_message(sep)
97 * Currently, there is only one SEP device per platform;
98 * In event platforms in the future have more than one SEP
99 * device, this will be a linked list
102 struct sep_device
*sep_dev
;
105 * sep_queue_status_remove - Removes transaction from status queue
107 * @sep_queue_info: pointer to status queue
109 * This function will remove information about transaction from the queue.
111 void sep_queue_status_remove(struct sep_device
*sep
,
112 struct sep_queue_info
**queue_elem
)
114 unsigned long lck_flags
;
116 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sep_queue_status_remove\n",
119 if (!queue_elem
|| !(*queue_elem
)) {
120 dev_dbg(&sep
->pdev
->dev
, "PID%d %s null\n",
121 current
->pid
, __func__
);
125 spin_lock_irqsave(&sep
->sep_queue_lock
, lck_flags
);
126 list_del(&(*queue_elem
)->list
);
127 sep
->sep_queue_num
--;
128 spin_unlock_irqrestore(&sep
->sep_queue_lock
, lck_flags
);
133 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sep_queue_status_remove return\n",
139 * sep_queue_status_add - Adds transaction to status queue
141 * @opcode: transaction opcode
142 * @size: input data size
143 * @pid: pid of current process
144 * @name: current process name
145 * @name_len: length of name (current process)
147 * This function adds information about about transaction started to the status
150 struct sep_queue_info
*sep_queue_status_add(
151 struct sep_device
*sep
,
155 u8
*name
, size_t name_len
)
157 unsigned long lck_flags
;
158 struct sep_queue_info
*my_elem
= NULL
;
160 my_elem
= kzalloc(sizeof(struct sep_queue_info
), GFP_KERNEL
);
165 dev_dbg(&sep
->pdev
->dev
, "[PID%d] kzalloc ok\n", current
->pid
);
167 my_elem
->data
.opcode
= opcode
;
168 my_elem
->data
.size
= size
;
169 my_elem
->data
.pid
= pid
;
171 if (name_len
> TASK_COMM_LEN
)
172 name_len
= TASK_COMM_LEN
;
174 memcpy(&my_elem
->data
.name
, name
, name_len
);
176 spin_lock_irqsave(&sep
->sep_queue_lock
, lck_flags
);
178 list_add_tail(&my_elem
->list
, &sep
->sep_queue_status
);
179 sep
->sep_queue_num
++;
181 spin_unlock_irqrestore(&sep
->sep_queue_lock
, lck_flags
);
187 * sep_allocate_dmatables_region - Allocates buf for the MLLI/DMA tables
189 * @dmatables_region: Destination pointer for the buffer
190 * @dma_ctx: DMA context for the transaction
191 * @table_count: Number of MLLI/DMA tables to create
192 * The buffer created will not work as-is for DMA operations,
193 * it needs to be copied over to the appropriate place in the
196 static int sep_allocate_dmatables_region(struct sep_device
*sep
,
197 void **dmatables_region
,
198 struct sep_dma_context
*dma_ctx
,
199 const u32 table_count
)
201 const size_t new_len
=
202 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES
- 1;
204 void *tmp_region
= NULL
;
206 dev_dbg(&sep
->pdev
->dev
, "[PID%d] dma_ctx = 0x%p\n",
207 current
->pid
, dma_ctx
);
208 dev_dbg(&sep
->pdev
->dev
, "[PID%d] dmatables_region = 0x%p\n",
209 current
->pid
, dmatables_region
);
211 if (!dma_ctx
|| !dmatables_region
) {
212 dev_warn(&sep
->pdev
->dev
,
213 "[PID%d] dma context/region uninitialized\n",
218 dev_dbg(&sep
->pdev
->dev
, "[PID%d] newlen = 0x%08zX\n",
219 current
->pid
, new_len
);
220 dev_dbg(&sep
->pdev
->dev
, "[PID%d] oldlen = 0x%08X\n", current
->pid
,
221 dma_ctx
->dmatables_len
);
222 tmp_region
= kzalloc(new_len
+ dma_ctx
->dmatables_len
, GFP_KERNEL
);
224 dev_warn(&sep
->pdev
->dev
,
225 "[PID%d] no mem for dma tables region\n",
230 /* Were there any previous tables that need to be preserved ? */
231 if (*dmatables_region
) {
232 memcpy(tmp_region
, *dmatables_region
, dma_ctx
->dmatables_len
);
233 kfree(*dmatables_region
);
234 *dmatables_region
= NULL
;
237 *dmatables_region
= tmp_region
;
239 dma_ctx
->dmatables_len
+= new_len
;
245 * sep_wait_transaction - Used for synchronizing transactions
248 int sep_wait_transaction(struct sep_device
*sep
)
253 if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT
,
254 &sep
->in_use_flags
)) {
255 dev_dbg(&sep
->pdev
->dev
,
256 "[PID%d] no transactions, returning\n",
258 goto end_function_setpid
;
262 * Looping needed even for exclusive waitq entries
263 * due to process wakeup latencies, previous process
264 * might have already created another transaction.
268 * Exclusive waitq entry, so that only one process is
269 * woken up from the queue at a time.
271 prepare_to_wait_exclusive(&sep
->event_transactions
,
274 if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT
,
275 &sep
->in_use_flags
)) {
276 dev_dbg(&sep
->pdev
->dev
,
277 "[PID%d] no transactions, breaking\n",
281 dev_dbg(&sep
->pdev
->dev
,
282 "[PID%d] transactions ongoing, sleeping\n",
285 dev_dbg(&sep
->pdev
->dev
, "[PID%d] woken up\n", current
->pid
);
287 if (signal_pending(current
)) {
288 dev_dbg(&sep
->pdev
->dev
, "[PID%d] received signal\n",
296 * The pid_doing_transaction indicates that this process
297 * now owns the facilities to perform a transaction with
298 * the SEP. While this process is performing a transaction,
299 * no other process who has the SEP device open can perform
300 * any transactions. This method allows more than one process
301 * to have the device open at any given time, which provides
302 * finer granularity for device utilization by multiple
305 /* Only one process is able to progress here at a time */
306 sep
->pid_doing_transaction
= current
->pid
;
309 finish_wait(&sep
->event_transactions
, &wait
);
315 * sep_check_transaction_owner - Checks if current process owns transaction
318 static inline int sep_check_transaction_owner(struct sep_device
*sep
)
320 dev_dbg(&sep
->pdev
->dev
, "[PID%d] transaction pid = %d\n",
322 sep
->pid_doing_transaction
);
324 if ((sep
->pid_doing_transaction
== 0) ||
325 (current
->pid
!= sep
->pid_doing_transaction
)) {
329 /* We own the transaction */
336 * sep_dump_message - dump the message that is pending
338 * This will only print dump if DEBUG is set; it does
339 * follow kernel debug print enabling
341 static void _sep_dump_message(struct sep_device
*sep
)
345 u32
*p
= sep
->shared_addr
;
347 for (count
= 0; count
< 10 * 4; count
+= 4)
348 dev_dbg(&sep
->pdev
->dev
,
349 "[PID%d] Word %d of the message is %x\n",
350 current
->pid
, count
/4, *p
++);
356 * sep_map_and_alloc_shared_area -allocate shared block
357 * @sep: security processor
358 * @size: size of shared area
360 static int sep_map_and_alloc_shared_area(struct sep_device
*sep
)
362 sep
->shared_addr
= dma_alloc_coherent(&sep
->pdev
->dev
,
364 &sep
->shared_bus
, GFP_KERNEL
);
366 if (!sep
->shared_addr
) {
367 dev_dbg(&sep
->pdev
->dev
,
368 "[PID%d] shared memory dma_alloc_coherent failed\n",
372 dev_dbg(&sep
->pdev
->dev
,
373 "[PID%d] shared_addr %zx bytes @%p (bus %llx)\n",
375 sep
->shared_size
, sep
->shared_addr
,
376 (unsigned long long)sep
->shared_bus
);
381 * sep_unmap_and_free_shared_area - free shared block
382 * @sep: security processor
384 static void sep_unmap_and_free_shared_area(struct sep_device
*sep
)
386 dma_free_coherent(&sep
->pdev
->dev
, sep
->shared_size
,
387 sep
->shared_addr
, sep
->shared_bus
);
393 * sep_shared_bus_to_virt - convert bus/virt addresses
394 * @sep: pointer to struct sep_device
395 * @bus_address: address to convert
397 * Returns virtual address inside the shared area according
398 * to the bus address.
400 static void *sep_shared_bus_to_virt(struct sep_device
*sep
,
401 dma_addr_t bus_address
)
403 return sep
->shared_addr
+ (bus_address
- sep
->shared_bus
);
409 * sep_open - device open method
410 * @inode: inode of SEP device
411 * @filp: file handle to SEP device
413 * Open method for the SEP device. Called when userspace opens
414 * the SEP device node.
416 * Returns zero on success otherwise an error code.
418 static int sep_open(struct inode
*inode
, struct file
*filp
)
420 struct sep_device
*sep
;
421 struct sep_private_data
*priv
;
423 dev_dbg(&sep_dev
->pdev
->dev
, "[PID%d] open\n", current
->pid
);
425 if (filp
->f_flags
& O_NONBLOCK
)
429 * Get the SEP device structure and use it for the
430 * private_data field in filp for other methods
433 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
439 filp
->private_data
= priv
;
441 dev_dbg(&sep_dev
->pdev
->dev
, "[PID%d] priv is 0x%p\n",
444 /* Anyone can open; locking takes place at transaction level */
449 * sep_free_dma_table_data_handler - free DMA table
450 * @sep: pointer to struct sep_device
451 * @dma_ctx: dma context
453 * Handles the request to free DMA table for synchronic actions
455 int sep_free_dma_table_data_handler(struct sep_device
*sep
,
456 struct sep_dma_context
**dma_ctx
)
460 /* Pointer to the current dma_resource struct */
461 struct sep_dma_resource
*dma
;
463 dev_dbg(&sep
->pdev
->dev
,
464 "[PID%d] sep_free_dma_table_data_handler\n",
467 if (!dma_ctx
|| !(*dma_ctx
)) {
468 /* No context or context already freed */
469 dev_dbg(&sep
->pdev
->dev
,
470 "[PID%d] no DMA context or context already freed\n",
476 dev_dbg(&sep
->pdev
->dev
, "[PID%d] (*dma_ctx)->nr_dcb_creat 0x%x\n",
478 (*dma_ctx
)->nr_dcb_creat
);
480 for (dcb_counter
= 0;
481 dcb_counter
< (*dma_ctx
)->nr_dcb_creat
; dcb_counter
++) {
482 dma
= &(*dma_ctx
)->dma_res_arr
[dcb_counter
];
484 /* Unmap and free input map array */
485 if (dma
->in_map_array
) {
486 for (count
= 0; count
< dma
->in_num_pages
; count
++) {
487 dma_unmap_page(&sep
->pdev
->dev
,
488 dma
->in_map_array
[count
].dma_addr
,
489 dma
->in_map_array
[count
].size
,
492 kfree(dma
->in_map_array
);
496 * Output is handled different. If
497 * this was a secure dma into restricted memory,
498 * then we skip this step altogether as restricted
499 * memory is not available to the o/s at all.
501 if (((*dma_ctx
)->secure_dma
== false) &&
502 (dma
->out_map_array
)) {
504 for (count
= 0; count
< dma
->out_num_pages
; count
++) {
505 dma_unmap_page(&sep
->pdev
->dev
,
506 dma
->out_map_array
[count
].dma_addr
,
507 dma
->out_map_array
[count
].size
,
510 kfree(dma
->out_map_array
);
513 /* Free page cache for output */
514 if (dma
->in_page_array
) {
515 for (count
= 0; count
< dma
->in_num_pages
; count
++) {
516 flush_dcache_page(dma
->in_page_array
[count
]);
517 page_cache_release(dma
->in_page_array
[count
]);
519 kfree(dma
->in_page_array
);
522 /* Again, we do this only for non secure dma */
523 if (((*dma_ctx
)->secure_dma
== false) &&
524 (dma
->out_page_array
)) {
526 for (count
= 0; count
< dma
->out_num_pages
; count
++) {
527 if (!PageReserved(dma
->out_page_array
[count
]))
530 out_page_array
[count
]);
532 flush_dcache_page(dma
->out_page_array
[count
]);
533 page_cache_release(dma
->out_page_array
[count
]);
535 kfree(dma
->out_page_array
);
539 * Note that here we use in_map_num_entries because we
540 * don't have a page array; the page array is generated
541 * only in the lock_user_pages, which is not called
542 * for kernel crypto, which is what the sg (scatter gather
543 * is used for exclusively)
546 dma_unmap_sg(&sep
->pdev
->dev
, dma
->src_sg
,
547 dma
->in_map_num_entries
, DMA_TO_DEVICE
);
552 dma_unmap_sg(&sep
->pdev
->dev
, dma
->dst_sg
,
553 dma
->in_map_num_entries
, DMA_FROM_DEVICE
);
557 /* Reset all the values */
558 dma
->in_page_array
= NULL
;
559 dma
->out_page_array
= NULL
;
560 dma
->in_num_pages
= 0;
561 dma
->out_num_pages
= 0;
562 dma
->in_map_array
= NULL
;
563 dma
->out_map_array
= NULL
;
564 dma
->in_map_num_entries
= 0;
565 dma
->out_map_num_entries
= 0;
568 (*dma_ctx
)->nr_dcb_creat
= 0;
569 (*dma_ctx
)->num_lli_tables_created
= 0;
574 dev_dbg(&sep
->pdev
->dev
,
575 "[PID%d] sep_free_dma_table_data_handler end\n",
582 * sep_end_transaction_handler - end transaction
583 * @sep: pointer to struct sep_device
584 * @dma_ctx: DMA context
585 * @call_status: Call status
587 * This API handles the end transaction request.
589 static int sep_end_transaction_handler(struct sep_device
*sep
,
590 struct sep_dma_context
**dma_ctx
,
591 struct sep_call_status
*call_status
,
592 struct sep_queue_info
**my_queue_elem
)
594 dev_dbg(&sep
->pdev
->dev
, "[PID%d] ending transaction\n", current
->pid
);
597 * Extraneous transaction clearing would mess up PM
598 * device usage counters and SEP would get suspended
599 * just before we send a command to SEP in the next
602 if (sep_check_transaction_owner(sep
)) {
603 dev_dbg(&sep
->pdev
->dev
, "[PID%d] not transaction owner\n",
608 /* Update queue status */
609 sep_queue_status_remove(sep
, my_queue_elem
);
611 /* Check that all the DMA resources were freed */
613 sep_free_dma_table_data_handler(sep
, dma_ctx
);
615 /* Reset call status for next transaction */
617 call_status
->status
= 0;
619 /* Clear the message area to avoid next transaction reading
620 * sensitive results from previous transaction */
621 memset(sep
->shared_addr
, 0,
622 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES
);
624 /* start suspend delay */
625 #ifdef SEP_ENABLE_RUNTIME_PM
628 pm_runtime_mark_last_busy(&sep
->pdev
->dev
);
629 pm_runtime_put_autosuspend(&sep
->pdev
->dev
);
633 clear_bit(SEP_WORKING_LOCK_BIT
, &sep
->in_use_flags
);
634 sep
->pid_doing_transaction
= 0;
636 /* Now it's safe for next process to proceed */
637 dev_dbg(&sep
->pdev
->dev
, "[PID%d] waking up next transaction\n",
639 clear_bit(SEP_TRANSACTION_STARTED_LOCK_BIT
, &sep
->in_use_flags
);
640 wake_up(&sep
->event_transactions
);
647 * sep_release - close a SEP device
648 * @inode: inode of SEP device
649 * @filp: file handle being closed
651 * Called on the final close of a SEP device.
653 static int sep_release(struct inode
*inode
, struct file
*filp
)
655 struct sep_private_data
* const private_data
= filp
->private_data
;
656 struct sep_call_status
*call_status
= &private_data
->call_status
;
657 struct sep_device
*sep
= private_data
->device
;
658 struct sep_dma_context
**dma_ctx
= &private_data
->dma_ctx
;
659 struct sep_queue_info
**my_queue_elem
= &private_data
->my_queue_elem
;
661 dev_dbg(&sep
->pdev
->dev
, "[PID%d] release\n", current
->pid
);
663 sep_end_transaction_handler(sep
, dma_ctx
, call_status
,
666 kfree(filp
->private_data
);
672 * sep_mmap - maps the shared area to user space
673 * @filp: pointer to struct file
674 * @vma: pointer to vm_area_struct
676 * Called on an mmap of our space via the normal SEP device
678 static int sep_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
680 struct sep_private_data
* const private_data
= filp
->private_data
;
681 struct sep_call_status
*call_status
= &private_data
->call_status
;
682 struct sep_device
*sep
= private_data
->device
;
683 struct sep_queue_info
**my_queue_elem
= &private_data
->my_queue_elem
;
685 unsigned long error
= 0;
687 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sep_mmap\n", current
->pid
);
689 /* Set the transaction busy (own the device) */
691 * Problem for multithreaded applications is that here we're
692 * possibly going to sleep while holding a write lock on
693 * current->mm->mmap_sem, which will cause deadlock for ongoing
694 * transaction trying to create DMA tables
696 error
= sep_wait_transaction(sep
);
698 /* Interrupted by signal, don't clear transaction */
701 /* Clear the message area to avoid next transaction reading
702 * sensitive results from previous transaction */
703 memset(sep
->shared_addr
, 0,
704 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES
);
707 * Check that the size of the mapped range is as the size of the message
710 if ((vma
->vm_end
- vma
->vm_start
) > SEP_DRIVER_MMMAP_AREA_SIZE
) {
712 goto end_function_with_error
;
715 dev_dbg(&sep
->pdev
->dev
, "[PID%d] shared_addr is %p\n",
716 current
->pid
, sep
->shared_addr
);
718 /* Get bus address */
719 bus_addr
= sep
->shared_bus
;
721 if (remap_pfn_range(vma
, vma
->vm_start
, bus_addr
>> PAGE_SHIFT
,
722 vma
->vm_end
- vma
->vm_start
, vma
->vm_page_prot
)) {
723 dev_dbg(&sep
->pdev
->dev
, "[PID%d] remap_page_range failed\n",
726 goto end_function_with_error
;
729 /* Update call status */
730 set_bit(SEP_LEGACY_MMAP_DONE_OFFSET
, &call_status
->status
);
734 end_function_with_error
:
735 /* Clear our transaction */
736 sep_end_transaction_handler(sep
, NULL
, call_status
,
744 * sep_poll - poll handler
745 * @filp: pointer to struct file
746 * @wait: pointer to poll_table
748 * Called by the OS when the kernel is asked to do a poll on
751 static unsigned int sep_poll(struct file
*filp
, poll_table
*wait
)
753 struct sep_private_data
* const private_data
= filp
->private_data
;
754 struct sep_call_status
*call_status
= &private_data
->call_status
;
755 struct sep_device
*sep
= private_data
->device
;
759 unsigned long lock_irq_flag
;
761 /* Am I the process that owns the transaction? */
762 if (sep_check_transaction_owner(sep
)) {
763 dev_dbg(&sep
->pdev
->dev
, "[PID%d] poll pid not owner\n",
769 /* Check if send command or send_reply were activated previously */
770 if (0 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET
,
771 &call_status
->status
)) {
772 dev_warn(&sep
->pdev
->dev
, "[PID%d] sendmsg not called\n",
779 /* Add the event to the polling wait table */
780 dev_dbg(&sep
->pdev
->dev
, "[PID%d] poll: calling wait sep_event\n",
783 poll_wait(filp
, &sep
->event_interrupt
, wait
);
785 dev_dbg(&sep
->pdev
->dev
,
786 "[PID%d] poll: send_ct is %lx reply ct is %lx\n",
787 current
->pid
, sep
->send_ct
, sep
->reply_ct
);
789 /* Check if error occurred during poll */
790 retval2
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR3_REG_ADDR
);
791 if ((retval2
!= 0x0) && (retval2
!= 0x8)) {
792 dev_dbg(&sep
->pdev
->dev
, "[PID%d] poll; poll error %x\n",
793 current
->pid
, retval2
);
798 spin_lock_irqsave(&sep
->snd_rply_lck
, lock_irq_flag
);
800 if (sep
->send_ct
== sep
->reply_ct
) {
801 spin_unlock_irqrestore(&sep
->snd_rply_lck
, lock_irq_flag
);
802 retval
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR2_REG_ADDR
);
803 dev_dbg(&sep
->pdev
->dev
,
804 "[PID%d] poll: data ready check (GPR2) %x\n",
805 current
->pid
, retval
);
807 /* Check if printf request */
808 if ((retval
>> 30) & 0x1) {
809 dev_dbg(&sep
->pdev
->dev
,
810 "[PID%d] poll: SEP printf request\n",
815 /* Check if the this is SEP reply or request */
817 dev_dbg(&sep
->pdev
->dev
,
818 "[PID%d] poll: SEP request\n",
821 dev_dbg(&sep
->pdev
->dev
,
822 "[PID%d] poll: normal return\n",
824 sep_dump_message(sep
);
825 dev_dbg(&sep
->pdev
->dev
,
826 "[PID%d] poll; SEP reply POLLIN|POLLRDNORM\n",
828 mask
|= POLLIN
| POLLRDNORM
;
830 set_bit(SEP_LEGACY_POLL_DONE_OFFSET
, &call_status
->status
);
832 spin_unlock_irqrestore(&sep
->snd_rply_lck
, lock_irq_flag
);
833 dev_dbg(&sep
->pdev
->dev
,
834 "[PID%d] poll; no reply; returning mask of 0\n",
844 * sep_time_address - address in SEP memory of time
845 * @sep: SEP device we want the address from
847 * Return the address of the two dwords in memory used for time
850 static u32
*sep_time_address(struct sep_device
*sep
)
852 return sep
->shared_addr
+
853 SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES
;
857 * sep_set_time - set the SEP time
858 * @sep: the SEP we are setting the time for
860 * Calculates time and sets it at the predefined address.
861 * Called with the SEP mutex held.
863 static unsigned long sep_set_time(struct sep_device
*sep
)
866 u32
*time_addr
; /* Address of time as seen by the kernel */
869 do_gettimeofday(&time
);
871 /* Set value in the SYSTEM MEMORY offset */
872 time_addr
= sep_time_address(sep
);
874 time_addr
[0] = SEP_TIME_VAL_TOKEN
;
875 time_addr
[1] = time
.tv_sec
;
877 dev_dbg(&sep
->pdev
->dev
, "[PID%d] time.tv_sec is %lu\n",
878 current
->pid
, time
.tv_sec
);
879 dev_dbg(&sep
->pdev
->dev
, "[PID%d] time_addr is %p\n",
880 current
->pid
, time_addr
);
881 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sep->shared_addr is %p\n",
882 current
->pid
, sep
->shared_addr
);
888 * sep_send_command_handler - kick off a command
889 * @sep: SEP being signalled
891 * This function raises interrupt to SEP that signals that is has a new
892 * command from the host
894 * Note that this function does fall under the ioctl lock
896 int sep_send_command_handler(struct sep_device
*sep
)
898 unsigned long lock_irq_flag
;
902 /* Basic sanity check; set msg pool to start of shared area */
903 msg_pool
= (u32
*)sep
->shared_addr
;
906 /* Look for start msg token */
907 if (*msg_pool
!= SEP_START_MSG_TOKEN
) {
908 dev_warn(&sep
->pdev
->dev
, "start message token not present\n");
913 /* Do we have a reasonable size? */
915 if ((*msg_pool
< 2) ||
916 (*msg_pool
> SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES
)) {
918 dev_warn(&sep
->pdev
->dev
, "invalid message size\n");
923 /* Does the command look reasonable? */
926 dev_warn(&sep
->pdev
->dev
, "invalid message opcode\n");
931 #if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
932 dev_dbg(&sep
->pdev
->dev
, "[PID%d] before pm sync status 0x%X\n",
934 sep
->pdev
->dev
.power
.runtime_status
);
935 sep
->in_use
= 1; /* device is about to be used */
936 pm_runtime_get_sync(&sep
->pdev
->dev
);
939 if (test_and_set_bit(SEP_WORKING_LOCK_BIT
, &sep
->in_use_flags
)) {
943 sep
->in_use
= 1; /* device is about to be used */
946 sep_dump_message(sep
);
949 spin_lock_irqsave(&sep
->snd_rply_lck
, lock_irq_flag
);
951 spin_unlock_irqrestore(&sep
->snd_rply_lck
, lock_irq_flag
);
953 dev_dbg(&sep
->pdev
->dev
,
954 "[PID%d] sep_send_command_handler send_ct %lx reply_ct %lx\n",
955 current
->pid
, sep
->send_ct
, sep
->reply_ct
);
957 /* Send interrupt to SEP */
958 sep_write_reg(sep
, HW_HOST_HOST_SEP_GPR0_REG_ADDR
, 0x2);
966 * @sep: pointer to struct sep_device
967 * @sg: pointer to struct scatterlist
969 * @dma_maps: pointer to place a pointer to array of dma maps
970 * This is filled in; anything previous there will be lost
971 * The structure for dma maps is sep_dma_map
972 * @returns number of dma maps on success; negative on error
974 * This creates the dma table from the scatterlist
975 * It is used only for kernel crypto as it works with scatterlists
976 * representation of data buffers
979 static int sep_crypto_dma(
980 struct sep_device
*sep
,
981 struct scatterlist
*sg
,
982 struct sep_dma_map
**dma_maps
,
983 enum dma_data_direction direction
)
985 struct scatterlist
*temp_sg
;
989 struct sep_dma_map
*sep_dma
;
995 /* Count the segments */
1000 temp_sg
= scatterwalk_sg_next(temp_sg
);
1002 dev_dbg(&sep
->pdev
->dev
,
1003 "There are (hex) %x segments in sg\n", count_segment
);
1005 /* DMA map segments */
1006 count_mapped
= dma_map_sg(&sep
->pdev
->dev
, sg
,
1007 count_segment
, direction
);
1009 dev_dbg(&sep
->pdev
->dev
,
1010 "There are (hex) %x maps in sg\n", count_mapped
);
1012 if (count_mapped
== 0) {
1013 dev_dbg(&sep
->pdev
->dev
, "Cannot dma_map_sg\n");
1017 sep_dma
= kmalloc(sizeof(struct sep_dma_map
) *
1018 count_mapped
, GFP_ATOMIC
);
1020 if (sep_dma
== NULL
) {
1021 dev_dbg(&sep
->pdev
->dev
, "Cannot allocate dma_maps\n");
1025 for_each_sg(sg
, temp_sg
, count_mapped
, ct1
) {
1026 sep_dma
[ct1
].dma_addr
= sg_dma_address(temp_sg
);
1027 sep_dma
[ct1
].size
= sg_dma_len(temp_sg
);
1028 dev_dbg(&sep
->pdev
->dev
, "(all hex) map %x dma %lx len %lx\n",
1029 ct1
, (unsigned long)sep_dma
[ct1
].dma_addr
,
1030 (unsigned long)sep_dma
[ct1
].size
);
1033 *dma_maps
= sep_dma
;
1034 return count_mapped
;
1040 * @sep: pointer to struct sep_device
1041 * @sg: pointer to struct scatterlist
1042 * @data_size: total data size
1044 * @dma_maps: pointer to place a pointer to array of dma maps
1045 * This is filled in; anything previous there will be lost
1046 * The structure for dma maps is sep_dma_map
1047 * @lli_maps: pointer to place a pointer to array of lli maps
1048 * This is filled in; anything previous there will be lost
1049 * The structure for dma maps is sep_dma_map
1050 * @returns number of dma maps on success; negative on error
1052 * This creates the LLI table from the scatterlist
1053 * It is only used for kernel crypto as it works exclusively
1054 * with scatterlists (struct scatterlist) representation of
1057 static int sep_crypto_lli(
1058 struct sep_device
*sep
,
1059 struct scatterlist
*sg
,
1060 struct sep_dma_map
**maps
,
1061 struct sep_lli_entry
**llis
,
1063 enum dma_data_direction direction
)
1067 struct sep_lli_entry
*sep_lli
;
1068 struct sep_dma_map
*sep_map
;
1072 nbr_ents
= sep_crypto_dma(sep
, sg
, maps
, direction
);
1073 if (nbr_ents
<= 0) {
1074 dev_dbg(&sep
->pdev
->dev
, "crypto_dma failed %x\n",
1081 sep_lli
= kmalloc(sizeof(struct sep_lli_entry
) * nbr_ents
, GFP_ATOMIC
);
1083 if (sep_lli
== NULL
) {
1084 dev_dbg(&sep
->pdev
->dev
, "Cannot allocate lli_maps\n");
1091 for (ct1
= 0; ct1
< nbr_ents
; ct1
+= 1) {
1092 sep_lli
[ct1
].bus_address
= (u32
)sep_map
[ct1
].dma_addr
;
1094 /* Maximum for page is total data size */
1095 if (sep_map
[ct1
].size
> data_size
)
1096 sep_map
[ct1
].size
= data_size
;
1098 sep_lli
[ct1
].block_size
= (u32
)sep_map
[ct1
].size
;
1106 * sep_lock_kernel_pages - map kernel pages for DMA
1107 * @sep: pointer to struct sep_device
1108 * @kernel_virt_addr: address of data buffer in kernel
1109 * @data_size: size of data
1110 * @lli_array_ptr: lli array
1111 * @in_out_flag: input into device or output from device
1113 * This function locks all the physical pages of the kernel virtual buffer
1114 * and construct a basic lli array, where each entry holds the physical
1115 * page address and the size that application data holds in this page
1116 * This function is used only during kernel crypto mod calls from within
1117 * the kernel (when ioctl is not used)
1119 * This is used only for kernel crypto. Kernel pages
1120 * are handled differently as they are done via
1121 * scatter gather lists (struct scatterlist)
1123 static int sep_lock_kernel_pages(struct sep_device
*sep
,
1124 unsigned long kernel_virt_addr
,
1126 struct sep_lli_entry
**lli_array_ptr
,
1128 struct sep_dma_context
*dma_ctx
)
1132 struct scatterlist
*sg
;
1135 struct sep_lli_entry
*lli_array
;
1137 struct sep_dma_map
*map_array
;
1139 enum dma_data_direction direction
;
1144 if (in_out_flag
== SEP_DRIVER_IN_FLAG
) {
1145 direction
= DMA_TO_DEVICE
;
1146 sg
= dma_ctx
->src_sg
;
1148 direction
= DMA_FROM_DEVICE
;
1149 sg
= dma_ctx
->dst_sg
;
1152 num_pages
= sep_crypto_lli(sep
, sg
, &map_array
, &lli_array
,
1153 data_size
, direction
);
1155 if (num_pages
<= 0) {
1156 dev_dbg(&sep
->pdev
->dev
, "sep_crypto_lli returned error %x\n",
1161 /* Put mapped kernel sg into kernel resource array */
1163 /* Set output params according to the in_out flag */
1164 if (in_out_flag
== SEP_DRIVER_IN_FLAG
) {
1165 *lli_array_ptr
= lli_array
;
1166 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_num_pages
=
1168 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_page_array
=
1170 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_map_array
=
1172 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_map_num_entries
=
1174 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].src_sg
=
1177 *lli_array_ptr
= lli_array
;
1178 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_num_pages
=
1180 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_page_array
=
1182 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_map_array
=
1184 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].
1185 out_map_num_entries
= num_pages
;
1186 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].dst_sg
=
1194 * sep_lock_user_pages - lock and map user pages for DMA
1195 * @sep: pointer to struct sep_device
1196 * @app_virt_addr: user memory data buffer
1197 * @data_size: size of data buffer
1198 * @lli_array_ptr: lli array
1199 * @in_out_flag: input or output to device
1201 * This function locks all the physical pages of the application
1202 * virtual buffer and construct a basic lli array, where each entry
1203 * holds the physical page address and the size that application
1204 * data holds in this physical pages
1206 static int sep_lock_user_pages(struct sep_device
*sep
,
1209 struct sep_lli_entry
**lli_array_ptr
,
1211 struct sep_dma_context
*dma_ctx
)
1217 /* The the page of the end address of the user space buffer */
1219 /* The page of the start address of the user space buffer */
1221 /* The range in pages */
1223 /* Array of pointers to page */
1224 struct page
**page_array
;
1226 struct sep_lli_entry
*lli_array
;
1228 struct sep_dma_map
*map_array
;
1230 /* Set start and end pages and num pages */
1231 end_page
= (app_virt_addr
+ data_size
- 1) >> PAGE_SHIFT
;
1232 start_page
= app_virt_addr
>> PAGE_SHIFT
;
1233 num_pages
= end_page
- start_page
+ 1;
1235 dev_dbg(&sep
->pdev
->dev
,
1236 "[PID%d] lock user pages app_virt_addr is %x\n",
1237 current
->pid
, app_virt_addr
);
1239 dev_dbg(&sep
->pdev
->dev
, "[PID%d] data_size is (hex) %x\n",
1240 current
->pid
, data_size
);
1241 dev_dbg(&sep
->pdev
->dev
, "[PID%d] start_page is (hex) %x\n",
1242 current
->pid
, start_page
);
1243 dev_dbg(&sep
->pdev
->dev
, "[PID%d] end_page is (hex) %x\n",
1244 current
->pid
, end_page
);
1245 dev_dbg(&sep
->pdev
->dev
, "[PID%d] num_pages is (hex) %x\n",
1246 current
->pid
, num_pages
);
1248 /* Allocate array of pages structure pointers */
1249 page_array
= kmalloc(sizeof(struct page
*) * num_pages
, GFP_ATOMIC
);
1254 map_array
= kmalloc(sizeof(struct sep_dma_map
) * num_pages
, GFP_ATOMIC
);
1256 dev_warn(&sep
->pdev
->dev
,
1257 "[PID%d] kmalloc for map_array failed\n",
1260 goto end_function_with_error1
;
1263 lli_array
= kmalloc(sizeof(struct sep_lli_entry
) * num_pages
,
1267 dev_warn(&sep
->pdev
->dev
,
1268 "[PID%d] kmalloc for lli_array failed\n",
1271 goto end_function_with_error2
;
1274 /* Convert the application virtual address into a set of physical */
1275 down_read(¤t
->mm
->mmap_sem
);
1276 result
= get_user_pages(current
, current
->mm
, app_virt_addr
,
1278 ((in_out_flag
== SEP_DRIVER_IN_FLAG
) ? 0 : 1),
1279 0, page_array
, NULL
);
1281 up_read(¤t
->mm
->mmap_sem
);
1283 /* Check the number of pages locked - if not all then exit with error */
1284 if (result
!= num_pages
) {
1285 dev_warn(&sep
->pdev
->dev
,
1286 "[PID%d] not all pages locked by get_user_pages, "
1287 "result 0x%X, num_pages 0x%X\n",
1288 current
->pid
, result
, num_pages
);
1290 goto end_function_with_error3
;
1293 dev_dbg(&sep
->pdev
->dev
, "[PID%d] get_user_pages succeeded\n",
1297 * Fill the array using page array data and
1298 * map the pages - this action will also flush the cache as needed
1300 for (count
= 0; count
< num_pages
; count
++) {
1301 /* Fill the map array */
1302 map_array
[count
].dma_addr
=
1303 dma_map_page(&sep
->pdev
->dev
, page_array
[count
],
1304 0, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
1306 map_array
[count
].size
= PAGE_SIZE
;
1308 /* Fill the lli array entry */
1309 lli_array
[count
].bus_address
= (u32
)map_array
[count
].dma_addr
;
1310 lli_array
[count
].block_size
= PAGE_SIZE
;
1312 dev_dbg(&sep
->pdev
->dev
,
1313 "[PID%d] lli_array[%x].bus_address is %08lx, "
1314 "lli_array[%x].block_size is (hex) %x\n", current
->pid
,
1315 count
, (unsigned long)lli_array
[count
].bus_address
,
1316 count
, lli_array
[count
].block_size
);
1319 /* Check the offset for the first page */
1320 lli_array
[0].bus_address
=
1321 lli_array
[0].bus_address
+ (app_virt_addr
& (~PAGE_MASK
));
1323 /* Check that not all the data is in the first page only */
1324 if ((PAGE_SIZE
- (app_virt_addr
& (~PAGE_MASK
))) >= data_size
)
1325 lli_array
[0].block_size
= data_size
;
1327 lli_array
[0].block_size
=
1328 PAGE_SIZE
- (app_virt_addr
& (~PAGE_MASK
));
1330 dev_dbg(&sep
->pdev
->dev
,
1331 "[PID%d] After check if page 0 has all data\n",
1333 dev_dbg(&sep
->pdev
->dev
,
1334 "[PID%d] lli_array[0].bus_address is (hex) %08lx, "
1335 "lli_array[0].block_size is (hex) %x\n",
1337 (unsigned long)lli_array
[0].bus_address
,
1338 lli_array
[0].block_size
);
1341 /* Check the size of the last page */
1342 if (num_pages
> 1) {
1343 lli_array
[num_pages
- 1].block_size
=
1344 (app_virt_addr
+ data_size
) & (~PAGE_MASK
);
1345 if (lli_array
[num_pages
- 1].block_size
== 0)
1346 lli_array
[num_pages
- 1].block_size
= PAGE_SIZE
;
1348 dev_dbg(&sep
->pdev
->dev
,
1349 "[PID%d] After last page size adjustment\n",
1351 dev_dbg(&sep
->pdev
->dev
,
1352 "[PID%d] lli_array[%x].bus_address is (hex) %08lx, "
1353 "lli_array[%x].block_size is (hex) %x\n",
1356 (unsigned long)lli_array
[num_pages
- 1].bus_address
,
1358 lli_array
[num_pages
- 1].block_size
);
1361 /* Set output params according to the in_out flag */
1362 if (in_out_flag
== SEP_DRIVER_IN_FLAG
) {
1363 *lli_array_ptr
= lli_array
;
1364 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_num_pages
=
1366 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_page_array
=
1368 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_map_array
=
1370 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_map_num_entries
=
1372 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].src_sg
= NULL
;
1374 *lli_array_ptr
= lli_array
;
1375 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_num_pages
=
1377 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_page_array
=
1379 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_map_array
=
1381 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].
1382 out_map_num_entries
= num_pages
;
1383 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].dst_sg
= NULL
;
1387 end_function_with_error3
:
1388 /* Free lli array */
1391 end_function_with_error2
:
1394 end_function_with_error1
:
1395 /* Free page array */
1403 * sep_lli_table_secure_dma - get lli array for IMR addresses
1404 * @sep: pointer to struct sep_device
1405 * @app_virt_addr: user memory data buffer
1406 * @data_size: size of data buffer
1407 * @lli_array_ptr: lli array
1408 * @in_out_flag: not used
1409 * @dma_ctx: pointer to struct sep_dma_context
1411 * This function creates lli tables for outputting data to
1412 * IMR memory, which is memory that cannot be accessed by the
1413 * the x86 processor.
1415 static int sep_lli_table_secure_dma(struct sep_device
*sep
,
1418 struct sep_lli_entry
**lli_array_ptr
,
1420 struct sep_dma_context
*dma_ctx
)
1425 /* The the page of the end address of the user space buffer */
1427 /* The page of the start address of the user space buffer */
1429 /* The range in pages */
1432 struct sep_lli_entry
*lli_array
;
1434 /* Set start and end pages and num pages */
1435 end_page
= (app_virt_addr
+ data_size
- 1) >> PAGE_SHIFT
;
1436 start_page
= app_virt_addr
>> PAGE_SHIFT
;
1437 num_pages
= end_page
- start_page
+ 1;
1439 dev_dbg(&sep
->pdev
->dev
,
1440 "[PID%d] lock user pages app_virt_addr is %x\n",
1441 current
->pid
, app_virt_addr
);
1443 dev_dbg(&sep
->pdev
->dev
, "[PID%d] data_size is (hex) %x\n",
1444 current
->pid
, data_size
);
1445 dev_dbg(&sep
->pdev
->dev
, "[PID%d] start_page is (hex) %x\n",
1446 current
->pid
, start_page
);
1447 dev_dbg(&sep
->pdev
->dev
, "[PID%d] end_page is (hex) %x\n",
1448 current
->pid
, end_page
);
1449 dev_dbg(&sep
->pdev
->dev
, "[PID%d] num_pages is (hex) %x\n",
1450 current
->pid
, num_pages
);
1452 lli_array
= kmalloc(sizeof(struct sep_lli_entry
) * num_pages
,
1456 dev_warn(&sep
->pdev
->dev
,
1457 "[PID%d] kmalloc for lli_array failed\n",
1463 * Fill the lli_array
1465 start_page
= start_page
<< PAGE_SHIFT
;
1466 for (count
= 0; count
< num_pages
; count
++) {
1467 /* Fill the lli array entry */
1468 lli_array
[count
].bus_address
= start_page
;
1469 lli_array
[count
].block_size
= PAGE_SIZE
;
1471 start_page
+= PAGE_SIZE
;
1473 dev_dbg(&sep
->pdev
->dev
,
1474 "[PID%d] lli_array[%x].bus_address is %08lx, "
1475 "lli_array[%x].block_size is (hex) %x\n",
1477 count
, (unsigned long)lli_array
[count
].bus_address
,
1478 count
, lli_array
[count
].block_size
);
1481 /* Check the offset for the first page */
1482 lli_array
[0].bus_address
=
1483 lli_array
[0].bus_address
+ (app_virt_addr
& (~PAGE_MASK
));
1485 /* Check that not all the data is in the first page only */
1486 if ((PAGE_SIZE
- (app_virt_addr
& (~PAGE_MASK
))) >= data_size
)
1487 lli_array
[0].block_size
= data_size
;
1489 lli_array
[0].block_size
=
1490 PAGE_SIZE
- (app_virt_addr
& (~PAGE_MASK
));
1492 dev_dbg(&sep
->pdev
->dev
,
1493 "[PID%d] After check if page 0 has all data\n"
1494 "lli_array[0].bus_address is (hex) %08lx, "
1495 "lli_array[0].block_size is (hex) %x\n",
1497 (unsigned long)lli_array
[0].bus_address
,
1498 lli_array
[0].block_size
);
1500 /* Check the size of the last page */
1501 if (num_pages
> 1) {
1502 lli_array
[num_pages
- 1].block_size
=
1503 (app_virt_addr
+ data_size
) & (~PAGE_MASK
);
1504 if (lli_array
[num_pages
- 1].block_size
== 0)
1505 lli_array
[num_pages
- 1].block_size
= PAGE_SIZE
;
1507 dev_dbg(&sep
->pdev
->dev
,
1508 "[PID%d] After last page size adjustment\n"
1509 "lli_array[%x].bus_address is (hex) %08lx, "
1510 "lli_array[%x].block_size is (hex) %x\n",
1511 current
->pid
, num_pages
- 1,
1512 (unsigned long)lli_array
[num_pages
- 1].bus_address
,
1514 lli_array
[num_pages
- 1].block_size
);
1516 *lli_array_ptr
= lli_array
;
1517 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_num_pages
= num_pages
;
1518 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_page_array
= NULL
;
1519 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_map_array
= NULL
;
1520 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_map_num_entries
= 0;
1526 * sep_calculate_lli_table_max_size - size the LLI table
1527 * @sep: pointer to struct sep_device
1529 * @num_array_entries
1532 * This function calculates the size of data that can be inserted into
1533 * the lli table from this array, such that either the table is full
1534 * (all entries are entered), or there are no more entries in the
1537 static u32
sep_calculate_lli_table_max_size(struct sep_device
*sep
,
1538 struct sep_lli_entry
*lli_in_array_ptr
,
1539 u32 num_array_entries
,
1540 u32
*last_table_flag
)
1543 /* Table data size */
1544 u32 table_data_size
= 0;
1545 /* Data size for the next table */
1546 u32 next_table_data_size
;
1548 *last_table_flag
= 0;
1551 * Calculate the data in the out lli table till we fill the whole
1552 * table or till the data has ended
1555 (counter
< (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
- 1)) &&
1556 (counter
< num_array_entries
); counter
++)
1557 table_data_size
+= lli_in_array_ptr
[counter
].block_size
;
1560 * Check if we reached the last entry,
1561 * meaning this ia the last table to build,
1562 * and no need to check the block alignment
1564 if (counter
== num_array_entries
) {
1565 /* Set the last table flag */
1566 *last_table_flag
= 1;
1571 * Calculate the data size of the next table.
1572 * Stop if no entries left or if data size is more the DMA restriction
1574 next_table_data_size
= 0;
1575 for (; counter
< num_array_entries
; counter
++) {
1576 next_table_data_size
+= lli_in_array_ptr
[counter
].block_size
;
1577 if (next_table_data_size
>= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE
)
1582 * Check if the next table data size is less then DMA rstriction.
1583 * if it is - recalculate the current table size, so that the next
1584 * table data size will be adaquete for DMA
1586 if (next_table_data_size
&&
1587 next_table_data_size
< SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE
)
1589 table_data_size
-= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE
-
1590 next_table_data_size
);
1593 return table_data_size
;
1597 * sep_build_lli_table - build an lli array for the given table
1598 * @sep: pointer to struct sep_device
1599 * @lli_array_ptr: pointer to lli array
1600 * @lli_table_ptr: pointer to lli table
1601 * @num_processed_entries_ptr: pointer to number of entries
1602 * @num_table_entries_ptr: pointer to number of tables
1603 * @table_data_size: total data size
1605 * Builds an lli table from the lli_array according to
1606 * the given size of data
1608 static void sep_build_lli_table(struct sep_device
*sep
,
1609 struct sep_lli_entry
*lli_array_ptr
,
1610 struct sep_lli_entry
*lli_table_ptr
,
1611 u32
*num_processed_entries_ptr
,
1612 u32
*num_table_entries_ptr
,
1613 u32 table_data_size
)
1615 /* Current table data size */
1616 u32 curr_table_data_size
;
1617 /* Counter of lli array entry */
1620 /* Init current table data size and lli array entry counter */
1621 curr_table_data_size
= 0;
1623 *num_table_entries_ptr
= 1;
1625 dev_dbg(&sep
->pdev
->dev
,
1626 "[PID%d] build lli table table_data_size: (hex) %x\n",
1627 current
->pid
, table_data_size
);
1629 /* Fill the table till table size reaches the needed amount */
1630 while (curr_table_data_size
< table_data_size
) {
1631 /* Update the number of entries in table */
1632 (*num_table_entries_ptr
)++;
1634 lli_table_ptr
->bus_address
=
1635 cpu_to_le32(lli_array_ptr
[array_counter
].bus_address
);
1637 lli_table_ptr
->block_size
=
1638 cpu_to_le32(lli_array_ptr
[array_counter
].block_size
);
1640 curr_table_data_size
+= lli_array_ptr
[array_counter
].block_size
;
1642 dev_dbg(&sep
->pdev
->dev
,
1643 "[PID%d] lli_table_ptr is %p\n",
1644 current
->pid
, lli_table_ptr
);
1645 dev_dbg(&sep
->pdev
->dev
,
1646 "[PID%d] lli_table_ptr->bus_address: %08lx\n",
1648 (unsigned long)lli_table_ptr
->bus_address
);
1650 dev_dbg(&sep
->pdev
->dev
,
1651 "[PID%d] lli_table_ptr->block_size is (hex) %x\n",
1652 current
->pid
, lli_table_ptr
->block_size
);
1654 /* Check for overflow of the table data */
1655 if (curr_table_data_size
> table_data_size
) {
1656 dev_dbg(&sep
->pdev
->dev
,
1657 "[PID%d] curr_table_data_size too large\n",
1660 /* Update the size of block in the table */
1661 lli_table_ptr
->block_size
=
1662 cpu_to_le32(lli_table_ptr
->block_size
) -
1663 (curr_table_data_size
- table_data_size
);
1665 /* Update the physical address in the lli array */
1666 lli_array_ptr
[array_counter
].bus_address
+=
1667 cpu_to_le32(lli_table_ptr
->block_size
);
1669 /* Update the block size left in the lli array */
1670 lli_array_ptr
[array_counter
].block_size
=
1671 (curr_table_data_size
- table_data_size
);
1673 /* Advance to the next entry in the lli_array */
1676 dev_dbg(&sep
->pdev
->dev
,
1677 "[PID%d] lli_table_ptr->bus_address is %08lx\n",
1679 (unsigned long)lli_table_ptr
->bus_address
);
1680 dev_dbg(&sep
->pdev
->dev
,
1681 "[PID%d] lli_table_ptr->block_size is (hex) %x\n",
1683 lli_table_ptr
->block_size
);
1685 /* Move to the next entry in table */
1689 /* Set the info entry to default */
1690 lli_table_ptr
->bus_address
= 0xffffffff;
1691 lli_table_ptr
->block_size
= 0;
1693 /* Set the output parameter */
1694 *num_processed_entries_ptr
+= array_counter
;
1699 * sep_shared_area_virt_to_bus - map shared area to bus address
1700 * @sep: pointer to struct sep_device
1701 * @virt_address: virtual address to convert
1703 * This functions returns the physical address inside shared area according
1704 * to the virtual address. It can be either on the external RAM device
1705 * (ioremapped), or on the system RAM
1706 * This implementation is for the external RAM
1708 static dma_addr_t
sep_shared_area_virt_to_bus(struct sep_device
*sep
,
1711 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sh virt to phys v %p\n",
1712 current
->pid
, virt_address
);
1713 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sh virt to phys p %08lx\n",
1716 sep
->shared_bus
+ (virt_address
- sep
->shared_addr
));
1718 return sep
->shared_bus
+ (size_t)(virt_address
- sep
->shared_addr
);
1722 * sep_shared_area_bus_to_virt - map shared area bus address to kernel
1723 * @sep: pointer to struct sep_device
1724 * @bus_address: bus address to convert
1726 * This functions returns the virtual address inside shared area
1727 * according to the physical address. It can be either on the
1728 * external RAM device (ioremapped), or on the system RAM
1729 * This implementation is for the external RAM
1731 static void *sep_shared_area_bus_to_virt(struct sep_device
*sep
,
1732 dma_addr_t bus_address
)
1734 dev_dbg(&sep
->pdev
->dev
, "[PID%d] shared bus to virt b=%lx v=%lx\n",
1736 (unsigned long)bus_address
, (unsigned long)(sep
->shared_addr
+
1737 (size_t)(bus_address
- sep
->shared_bus
)));
1739 return sep
->shared_addr
+ (size_t)(bus_address
- sep
->shared_bus
);
1743 * sep_debug_print_lli_tables - dump LLI table
1744 * @sep: pointer to struct sep_device
1745 * @lli_table_ptr: pointer to sep_lli_entry
1746 * @num_table_entries: number of entries
1747 * @table_data_size: total data size
1749 * Walk the the list of the print created tables and print all the data
1751 static void sep_debug_print_lli_tables(struct sep_device
*sep
,
1752 struct sep_lli_entry
*lli_table_ptr
,
1753 unsigned long num_table_entries
,
1754 unsigned long table_data_size
)
1757 unsigned long table_count
= 1;
1758 unsigned long entries_count
= 0;
1760 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sep_debug_print_lli_tables start\n",
1762 if (num_table_entries
== 0) {
1763 dev_dbg(&sep
->pdev
->dev
, "[PID%d] no table to print\n",
1768 while ((unsigned long) lli_table_ptr
->bus_address
!= 0xffffffff) {
1769 dev_dbg(&sep
->pdev
->dev
,
1770 "[PID%d] lli table %08lx, "
1771 "table_data_size is (hex) %lx\n",
1772 current
->pid
, table_count
, table_data_size
);
1773 dev_dbg(&sep
->pdev
->dev
,
1774 "[PID%d] num_table_entries is (hex) %lx\n",
1775 current
->pid
, num_table_entries
);
1777 /* Print entries of the table (without info entry) */
1778 for (entries_count
= 0; entries_count
< num_table_entries
;
1779 entries_count
++, lli_table_ptr
++) {
1781 dev_dbg(&sep
->pdev
->dev
,
1782 "[PID%d] lli_table_ptr address is %08lx\n",
1784 (unsigned long) lli_table_ptr
);
1786 dev_dbg(&sep
->pdev
->dev
,
1787 "[PID%d] phys address is %08lx "
1788 "block size is (hex) %x\n", current
->pid
,
1789 (unsigned long)lli_table_ptr
->bus_address
,
1790 lli_table_ptr
->block_size
);
1793 /* Point to the info entry */
1796 dev_dbg(&sep
->pdev
->dev
,
1797 "[PID%d] phys lli_table_ptr->block_size "
1800 lli_table_ptr
->block_size
);
1802 dev_dbg(&sep
->pdev
->dev
,
1803 "[PID%d] phys lli_table_ptr->physical_address "
1806 (unsigned long)lli_table_ptr
->bus_address
);
1809 table_data_size
= lli_table_ptr
->block_size
& 0xffffff;
1810 num_table_entries
= (lli_table_ptr
->block_size
>> 24) & 0xff;
1812 dev_dbg(&sep
->pdev
->dev
,
1813 "[PID%d] phys table_data_size is "
1814 "(hex) %lx num_table_entries is"
1815 " %lx bus_address is%lx\n",
1819 (unsigned long)lli_table_ptr
->bus_address
);
1821 if ((unsigned long)lli_table_ptr
->bus_address
!= 0xffffffff)
1822 lli_table_ptr
= (struct sep_lli_entry
*)
1823 sep_shared_bus_to_virt(sep
,
1824 (unsigned long)lli_table_ptr
->bus_address
);
1828 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sep_debug_print_lli_tables end\n",
1835 * sep_prepare_empty_lli_table - create a blank LLI table
1836 * @sep: pointer to struct sep_device
1837 * @lli_table_addr_ptr: pointer to lli table
1838 * @num_entries_ptr: pointer to number of entries
1839 * @table_data_size_ptr: point to table data size
1840 * @dmatables_region: Optional buffer for DMA tables
1841 * @dma_ctx: DMA context
1843 * This function creates empty lli tables when there is no data
1845 static void sep_prepare_empty_lli_table(struct sep_device
*sep
,
1846 dma_addr_t
*lli_table_addr_ptr
,
1847 u32
*num_entries_ptr
,
1848 u32
*table_data_size_ptr
,
1849 void **dmatables_region
,
1850 struct sep_dma_context
*dma_ctx
)
1852 struct sep_lli_entry
*lli_table_ptr
;
1854 /* Find the area for new table */
1856 (struct sep_lli_entry
*)(sep
->shared_addr
+
1857 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES
+
1858 dma_ctx
->num_lli_tables_created
* sizeof(struct sep_lli_entry
) *
1859 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
);
1861 if (dmatables_region
&& *dmatables_region
)
1862 lli_table_ptr
= *dmatables_region
;
1864 lli_table_ptr
->bus_address
= 0;
1865 lli_table_ptr
->block_size
= 0;
1868 lli_table_ptr
->bus_address
= 0xFFFFFFFF;
1869 lli_table_ptr
->block_size
= 0;
1871 /* Set the output parameter value */
1872 *lli_table_addr_ptr
= sep
->shared_bus
+
1873 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES
+
1874 dma_ctx
->num_lli_tables_created
*
1875 sizeof(struct sep_lli_entry
) *
1876 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
;
1878 /* Set the num of entries and table data size for empty table */
1879 *num_entries_ptr
= 2;
1880 *table_data_size_ptr
= 0;
1882 /* Update the number of created tables */
1883 dma_ctx
->num_lli_tables_created
++;
1887 * sep_prepare_input_dma_table - prepare input DMA mappings
1888 * @sep: pointer to struct sep_device
1893 * @table_data_size_ptr:
1894 * @is_kva: set for kernel data (kernel crypt io call)
1896 * This function prepares only input DMA table for synchronic symmetric
1898 * Note that all bus addresses that are passed to the SEP
1899 * are in 32 bit format; the SEP is a 32 bit device
1901 static int sep_prepare_input_dma_table(struct sep_device
*sep
,
1902 unsigned long app_virt_addr
,
1905 dma_addr_t
*lli_table_ptr
,
1906 u32
*num_entries_ptr
,
1907 u32
*table_data_size_ptr
,
1909 void **dmatables_region
,
1910 struct sep_dma_context
*dma_ctx
1914 /* Pointer to the info entry of the table - the last entry */
1915 struct sep_lli_entry
*info_entry_ptr
;
1916 /* Array of pointers to page */
1917 struct sep_lli_entry
*lli_array_ptr
;
1918 /* Points to the first entry to be processed in the lli_in_array */
1919 u32 current_entry
= 0;
1920 /* Num entries in the virtual buffer */
1921 u32 sep_lli_entries
= 0;
1922 /* Lli table pointer */
1923 struct sep_lli_entry
*in_lli_table_ptr
;
1924 /* The total data in one table */
1925 u32 table_data_size
= 0;
1926 /* Flag for last table */
1927 u32 last_table_flag
= 0;
1928 /* Number of entries in lli table */
1929 u32 num_entries_in_table
= 0;
1930 /* Next table address */
1931 void *lli_table_alloc_addr
= NULL
;
1932 void *dma_lli_table_alloc_addr
= NULL
;
1933 void *dma_in_lli_table_ptr
= NULL
;
1935 dev_dbg(&sep
->pdev
->dev
,
1936 "[PID%d] prepare intput dma tbl data size: (hex) %x\n",
1937 current
->pid
, data_size
);
1939 dev_dbg(&sep
->pdev
->dev
, "[PID%d] block_size is (hex) %x\n",
1940 current
->pid
, block_size
);
1942 /* Initialize the pages pointers */
1943 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_page_array
= NULL
;
1944 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_num_pages
= 0;
1946 /* Set the kernel address for first table to be allocated */
1947 lli_table_alloc_addr
= (void *)(sep
->shared_addr
+
1948 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES
+
1949 dma_ctx
->num_lli_tables_created
* sizeof(struct sep_lli_entry
) *
1950 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
);
1952 if (data_size
== 0) {
1953 if (dmatables_region
) {
1954 error
= sep_allocate_dmatables_region(sep
,
1961 /* Special case - create meptu table - 2 entries, zero data */
1962 sep_prepare_empty_lli_table(sep
, lli_table_ptr
,
1963 num_entries_ptr
, table_data_size_ptr
,
1964 dmatables_region
, dma_ctx
);
1965 goto update_dcb_counter
;
1968 /* Check if the pages are in Kernel Virtual Address layout */
1970 error
= sep_lock_kernel_pages(sep
, app_virt_addr
,
1971 data_size
, &lli_array_ptr
, SEP_DRIVER_IN_FLAG
,
1975 * Lock the pages of the user buffer
1976 * and translate them to pages
1978 error
= sep_lock_user_pages(sep
, app_virt_addr
,
1979 data_size
, &lli_array_ptr
, SEP_DRIVER_IN_FLAG
,
1985 dev_dbg(&sep
->pdev
->dev
,
1986 "[PID%d] output sep_in_num_pages is (hex) %x\n",
1988 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_num_pages
);
1991 info_entry_ptr
= NULL
;
1994 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_num_pages
;
1996 dma_lli_table_alloc_addr
= lli_table_alloc_addr
;
1997 if (dmatables_region
) {
1998 error
= sep_allocate_dmatables_region(sep
,
2004 lli_table_alloc_addr
= *dmatables_region
;
2007 /* Loop till all the entries in in array are processed */
2008 while (current_entry
< sep_lli_entries
) {
2010 /* Set the new input and output tables */
2012 (struct sep_lli_entry
*)lli_table_alloc_addr
;
2013 dma_in_lli_table_ptr
=
2014 (struct sep_lli_entry
*)dma_lli_table_alloc_addr
;
2016 lli_table_alloc_addr
+= sizeof(struct sep_lli_entry
) *
2017 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
;
2018 dma_lli_table_alloc_addr
+= sizeof(struct sep_lli_entry
) *
2019 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
;
2021 if (dma_lli_table_alloc_addr
>
2022 ((void *)sep
->shared_addr
+
2023 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES
+
2024 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES
)) {
2027 goto end_function_error
;
2031 /* Update the number of created tables */
2032 dma_ctx
->num_lli_tables_created
++;
2034 /* Calculate the maximum size of data for input table */
2035 table_data_size
= sep_calculate_lli_table_max_size(sep
,
2036 &lli_array_ptr
[current_entry
],
2037 (sep_lli_entries
- current_entry
),
2041 * If this is not the last table -
2042 * then align it to the block size
2044 if (!last_table_flag
)
2046 (table_data_size
/ block_size
) * block_size
;
2048 dev_dbg(&sep
->pdev
->dev
,
2049 "[PID%d] output table_data_size is (hex) %x\n",
2053 /* Construct input lli table */
2054 sep_build_lli_table(sep
, &lli_array_ptr
[current_entry
],
2056 ¤t_entry
, &num_entries_in_table
, table_data_size
);
2058 if (info_entry_ptr
== NULL
) {
2060 /* Set the output parameters to physical addresses */
2061 *lli_table_ptr
= sep_shared_area_virt_to_bus(sep
,
2062 dma_in_lli_table_ptr
);
2063 *num_entries_ptr
= num_entries_in_table
;
2064 *table_data_size_ptr
= table_data_size
;
2066 dev_dbg(&sep
->pdev
->dev
,
2067 "[PID%d] output lli_table_in_ptr is %08lx\n",
2069 (unsigned long)*lli_table_ptr
);
2072 /* Update the info entry of the previous in table */
2073 info_entry_ptr
->bus_address
=
2074 sep_shared_area_virt_to_bus(sep
,
2075 dma_in_lli_table_ptr
);
2076 info_entry_ptr
->block_size
=
2077 ((num_entries_in_table
) << 24) |
2080 /* Save the pointer to the info entry of the current tables */
2081 info_entry_ptr
= in_lli_table_ptr
+ num_entries_in_table
- 1;
2083 /* Print input tables */
2084 if (!dmatables_region
) {
2085 sep_debug_print_lli_tables(sep
, (struct sep_lli_entry
*)
2086 sep_shared_area_bus_to_virt(sep
, *lli_table_ptr
),
2087 *num_entries_ptr
, *table_data_size_ptr
);
2090 /* The array of the pages */
2091 kfree(lli_array_ptr
);
2094 /* Update DCB counter */
2095 dma_ctx
->nr_dcb_creat
++;
2099 /* Free all the allocated resources */
2100 kfree(dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_map_array
);
2101 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_map_array
= NULL
;
2102 kfree(lli_array_ptr
);
2103 kfree(dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_page_array
);
2104 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_page_array
= NULL
;
2112 * sep_construct_dma_tables_from_lli - prepare AES/DES mappings
2113 * @sep: pointer to struct sep_device
2115 * @sep_in_lli_entries:
2117 * @sep_out_lli_entries
2120 * @lli_table_out_ptr
2121 * @in_num_entries_ptr
2122 * @out_num_entries_ptr
2123 * @table_data_size_ptr
2125 * This function creates the input and output DMA tables for
2126 * symmetric operations (AES/DES) according to the block
2127 * size from LLI arays
2128 * Note that all bus addresses that are passed to the SEP
2129 * are in 32 bit format; the SEP is a 32 bit device
2131 static int sep_construct_dma_tables_from_lli(
2132 struct sep_device
*sep
,
2133 struct sep_lli_entry
*lli_in_array
,
2134 u32 sep_in_lli_entries
,
2135 struct sep_lli_entry
*lli_out_array
,
2136 u32 sep_out_lli_entries
,
2138 dma_addr_t
*lli_table_in_ptr
,
2139 dma_addr_t
*lli_table_out_ptr
,
2140 u32
*in_num_entries_ptr
,
2141 u32
*out_num_entries_ptr
,
2142 u32
*table_data_size_ptr
,
2143 void **dmatables_region
,
2144 struct sep_dma_context
*dma_ctx
)
2146 /* Points to the area where next lli table can be allocated */
2147 void *lli_table_alloc_addr
= NULL
;
2149 * Points to the area in shared region where next lli table
2152 void *dma_lli_table_alloc_addr
= NULL
;
2153 /* Input lli table in dmatables_region or shared region */
2154 struct sep_lli_entry
*in_lli_table_ptr
= NULL
;
2155 /* Input lli table location in the shared region */
2156 struct sep_lli_entry
*dma_in_lli_table_ptr
= NULL
;
2157 /* Output lli table in dmatables_region or shared region */
2158 struct sep_lli_entry
*out_lli_table_ptr
= NULL
;
2159 /* Output lli table location in the shared region */
2160 struct sep_lli_entry
*dma_out_lli_table_ptr
= NULL
;
2161 /* Pointer to the info entry of the table - the last entry */
2162 struct sep_lli_entry
*info_in_entry_ptr
= NULL
;
2163 /* Pointer to the info entry of the table - the last entry */
2164 struct sep_lli_entry
*info_out_entry_ptr
= NULL
;
2165 /* Points to the first entry to be processed in the lli_in_array */
2166 u32 current_in_entry
= 0;
2167 /* Points to the first entry to be processed in the lli_out_array */
2168 u32 current_out_entry
= 0;
2169 /* Max size of the input table */
2170 u32 in_table_data_size
= 0;
2171 /* Max size of the output table */
2172 u32 out_table_data_size
= 0;
2173 /* Flag te signifies if this is the last tables build */
2174 u32 last_table_flag
= 0;
2175 /* The data size that should be in table */
2176 u32 table_data_size
= 0;
2177 /* Number of entries in the input table */
2178 u32 num_entries_in_table
= 0;
2179 /* Number of entries in the output table */
2180 u32 num_entries_out_table
= 0;
2183 dev_warn(&sep
->pdev
->dev
, "DMA context uninitialized\n");
2187 /* Initiate to point after the message area */
2188 lli_table_alloc_addr
= (void *)(sep
->shared_addr
+
2189 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES
+
2190 (dma_ctx
->num_lli_tables_created
*
2191 (sizeof(struct sep_lli_entry
) *
2192 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
)));
2193 dma_lli_table_alloc_addr
= lli_table_alloc_addr
;
2195 if (dmatables_region
) {
2196 /* 2 for both in+out table */
2197 if (sep_allocate_dmatables_region(sep
,
2200 2*sep_in_lli_entries
))
2202 lli_table_alloc_addr
= *dmatables_region
;
2205 /* Loop till all the entries in in array are not processed */
2206 while (current_in_entry
< sep_in_lli_entries
) {
2207 /* Set the new input and output tables */
2209 (struct sep_lli_entry
*)lli_table_alloc_addr
;
2210 dma_in_lli_table_ptr
=
2211 (struct sep_lli_entry
*)dma_lli_table_alloc_addr
;
2213 lli_table_alloc_addr
+= sizeof(struct sep_lli_entry
) *
2214 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
;
2215 dma_lli_table_alloc_addr
+= sizeof(struct sep_lli_entry
) *
2216 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
;
2218 /* Set the first output tables */
2220 (struct sep_lli_entry
*)lli_table_alloc_addr
;
2221 dma_out_lli_table_ptr
=
2222 (struct sep_lli_entry
*)dma_lli_table_alloc_addr
;
2224 /* Check if the DMA table area limit was overrun */
2225 if ((dma_lli_table_alloc_addr
+ sizeof(struct sep_lli_entry
) *
2226 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
) >
2227 ((void *)sep
->shared_addr
+
2228 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES
+
2229 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES
)) {
2231 dev_warn(&sep
->pdev
->dev
, "dma table limit overrun\n");
2235 /* Update the number of the lli tables created */
2236 dma_ctx
->num_lli_tables_created
+= 2;
2238 lli_table_alloc_addr
+= sizeof(struct sep_lli_entry
) *
2239 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
;
2240 dma_lli_table_alloc_addr
+= sizeof(struct sep_lli_entry
) *
2241 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
;
2243 /* Calculate the maximum size of data for input table */
2244 in_table_data_size
=
2245 sep_calculate_lli_table_max_size(sep
,
2246 &lli_in_array
[current_in_entry
],
2247 (sep_in_lli_entries
- current_in_entry
),
2250 /* Calculate the maximum size of data for output table */
2251 out_table_data_size
=
2252 sep_calculate_lli_table_max_size(sep
,
2253 &lli_out_array
[current_out_entry
],
2254 (sep_out_lli_entries
- current_out_entry
),
2257 if (!last_table_flag
) {
2258 in_table_data_size
= (in_table_data_size
/
2259 block_size
) * block_size
;
2260 out_table_data_size
= (out_table_data_size
/
2261 block_size
) * block_size
;
2264 table_data_size
= in_table_data_size
;
2265 if (table_data_size
> out_table_data_size
)
2266 table_data_size
= out_table_data_size
;
2268 dev_dbg(&sep
->pdev
->dev
,
2269 "[PID%d] construct tables from lli"
2270 " in_table_data_size is (hex) %x\n", current
->pid
,
2271 in_table_data_size
);
2273 dev_dbg(&sep
->pdev
->dev
,
2274 "[PID%d] construct tables from lli"
2275 "out_table_data_size is (hex) %x\n", current
->pid
,
2276 out_table_data_size
);
2278 /* Construct input lli table */
2279 sep_build_lli_table(sep
, &lli_in_array
[current_in_entry
],
2282 &num_entries_in_table
,
2285 /* Construct output lli table */
2286 sep_build_lli_table(sep
, &lli_out_array
[current_out_entry
],
2289 &num_entries_out_table
,
2292 /* If info entry is null - this is the first table built */
2293 if (info_in_entry_ptr
== NULL
) {
2294 /* Set the output parameters to physical addresses */
2296 sep_shared_area_virt_to_bus(sep
, dma_in_lli_table_ptr
);
2298 *in_num_entries_ptr
= num_entries_in_table
;
2300 *lli_table_out_ptr
=
2301 sep_shared_area_virt_to_bus(sep
,
2302 dma_out_lli_table_ptr
);
2304 *out_num_entries_ptr
= num_entries_out_table
;
2305 *table_data_size_ptr
= table_data_size
;
2307 dev_dbg(&sep
->pdev
->dev
,
2308 "[PID%d] output lli_table_in_ptr is %08lx\n",
2310 (unsigned long)*lli_table_in_ptr
);
2311 dev_dbg(&sep
->pdev
->dev
,
2312 "[PID%d] output lli_table_out_ptr is %08lx\n",
2314 (unsigned long)*lli_table_out_ptr
);
2316 /* Update the info entry of the previous in table */
2317 info_in_entry_ptr
->bus_address
=
2318 sep_shared_area_virt_to_bus(sep
,
2319 dma_in_lli_table_ptr
);
2321 info_in_entry_ptr
->block_size
=
2322 ((num_entries_in_table
) << 24) |
2325 /* Update the info entry of the previous in table */
2326 info_out_entry_ptr
->bus_address
=
2327 sep_shared_area_virt_to_bus(sep
,
2328 dma_out_lli_table_ptr
);
2330 info_out_entry_ptr
->block_size
=
2331 ((num_entries_out_table
) << 24) |
2334 dev_dbg(&sep
->pdev
->dev
,
2335 "[PID%d] output lli_table_in_ptr:%08lx %08x\n",
2337 (unsigned long)info_in_entry_ptr
->bus_address
,
2338 info_in_entry_ptr
->block_size
);
2340 dev_dbg(&sep
->pdev
->dev
,
2341 "[PID%d] output lli_table_out_ptr:"
2344 (unsigned long)info_out_entry_ptr
->bus_address
,
2345 info_out_entry_ptr
->block_size
);
2348 /* Save the pointer to the info entry of the current tables */
2349 info_in_entry_ptr
= in_lli_table_ptr
+
2350 num_entries_in_table
- 1;
2351 info_out_entry_ptr
= out_lli_table_ptr
+
2352 num_entries_out_table
- 1;
2354 dev_dbg(&sep
->pdev
->dev
,
2355 "[PID%d] output num_entries_out_table is %x\n",
2357 (u32
)num_entries_out_table
);
2358 dev_dbg(&sep
->pdev
->dev
,
2359 "[PID%d] output info_in_entry_ptr is %lx\n",
2361 (unsigned long)info_in_entry_ptr
);
2362 dev_dbg(&sep
->pdev
->dev
,
2363 "[PID%d] output info_out_entry_ptr is %lx\n",
2365 (unsigned long)info_out_entry_ptr
);
2368 /* Print input tables */
2369 if (!dmatables_region
) {
2370 sep_debug_print_lli_tables(
2372 (struct sep_lli_entry
*)
2373 sep_shared_area_bus_to_virt(sep
, *lli_table_in_ptr
),
2374 *in_num_entries_ptr
,
2375 *table_data_size_ptr
);
2378 /* Print output tables */
2379 if (!dmatables_region
) {
2380 sep_debug_print_lli_tables(
2382 (struct sep_lli_entry
*)
2383 sep_shared_area_bus_to_virt(sep
, *lli_table_out_ptr
),
2384 *out_num_entries_ptr
,
2385 *table_data_size_ptr
);
2392 * sep_prepare_input_output_dma_table - prepare DMA I/O table
2393 * @app_virt_in_addr:
2394 * @app_virt_out_addr:
2397 * @lli_table_in_ptr:
2398 * @lli_table_out_ptr:
2399 * @in_num_entries_ptr:
2400 * @out_num_entries_ptr:
2401 * @table_data_size_ptr:
2402 * @is_kva: set for kernel data; used only for kernel crypto module
2404 * This function builds input and output DMA tables for synchronic
2405 * symmetric operations (AES, DES, HASH). It also checks that each table
2406 * is of the modular block size
2407 * Note that all bus addresses that are passed to the SEP
2408 * are in 32 bit format; the SEP is a 32 bit device
2410 static int sep_prepare_input_output_dma_table(struct sep_device
*sep
,
2411 unsigned long app_virt_in_addr
,
2412 unsigned long app_virt_out_addr
,
2415 dma_addr_t
*lli_table_in_ptr
,
2416 dma_addr_t
*lli_table_out_ptr
,
2417 u32
*in_num_entries_ptr
,
2418 u32
*out_num_entries_ptr
,
2419 u32
*table_data_size_ptr
,
2421 void **dmatables_region
,
2422 struct sep_dma_context
*dma_ctx
)
2426 /* Array of pointers of page */
2427 struct sep_lli_entry
*lli_in_array
;
2428 /* Array of pointers of page */
2429 struct sep_lli_entry
*lli_out_array
;
2436 if (data_size
== 0) {
2437 /* Prepare empty table for input and output */
2438 if (dmatables_region
) {
2439 error
= sep_allocate_dmatables_region(
2447 sep_prepare_empty_lli_table(sep
, lli_table_in_ptr
,
2448 in_num_entries_ptr
, table_data_size_ptr
,
2449 dmatables_region
, dma_ctx
);
2451 sep_prepare_empty_lli_table(sep
, lli_table_out_ptr
,
2452 out_num_entries_ptr
, table_data_size_ptr
,
2453 dmatables_region
, dma_ctx
);
2455 goto update_dcb_counter
;
2458 /* Initialize the pages pointers */
2459 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_page_array
= NULL
;
2460 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_page_array
= NULL
;
2462 /* Lock the pages of the buffer and translate them to pages */
2463 if (is_kva
== true) {
2464 dev_dbg(&sep
->pdev
->dev
, "[PID%d] Locking kernel input pages\n",
2466 error
= sep_lock_kernel_pages(sep
, app_virt_in_addr
,
2467 data_size
, &lli_in_array
, SEP_DRIVER_IN_FLAG
,
2470 dev_warn(&sep
->pdev
->dev
,
2471 "[PID%d] sep_lock_kernel_pages for input "
2472 "virtual buffer failed\n", current
->pid
);
2477 dev_dbg(&sep
->pdev
->dev
, "[PID%d] Locking kernel output pages\n",
2479 error
= sep_lock_kernel_pages(sep
, app_virt_out_addr
,
2480 data_size
, &lli_out_array
, SEP_DRIVER_OUT_FLAG
,
2484 dev_warn(&sep
->pdev
->dev
,
2485 "[PID%d] sep_lock_kernel_pages for output "
2486 "virtual buffer failed\n", current
->pid
);
2488 goto end_function_free_lli_in
;
2494 dev_dbg(&sep
->pdev
->dev
, "[PID%d] Locking user input pages\n",
2496 error
= sep_lock_user_pages(sep
, app_virt_in_addr
,
2497 data_size
, &lli_in_array
, SEP_DRIVER_IN_FLAG
,
2500 dev_warn(&sep
->pdev
->dev
,
2501 "[PID%d] sep_lock_user_pages for input "
2502 "virtual buffer failed\n", current
->pid
);
2507 if (dma_ctx
->secure_dma
== true) {
2508 /* secure_dma requires use of non accessible memory */
2509 dev_dbg(&sep
->pdev
->dev
, "[PID%d] in secure_dma\n",
2511 error
= sep_lli_table_secure_dma(sep
,
2512 app_virt_out_addr
, data_size
, &lli_out_array
,
2513 SEP_DRIVER_OUT_FLAG
, dma_ctx
);
2515 dev_warn(&sep
->pdev
->dev
,
2516 "[PID%d] secure dma table setup "
2517 " for output virtual buffer failed\n",
2520 goto end_function_free_lli_in
;
2523 /* For normal, non-secure dma */
2524 dev_dbg(&sep
->pdev
->dev
, "[PID%d] not in secure_dma\n",
2527 dev_dbg(&sep
->pdev
->dev
,
2528 "[PID%d] Locking user output pages\n",
2531 error
= sep_lock_user_pages(sep
, app_virt_out_addr
,
2532 data_size
, &lli_out_array
, SEP_DRIVER_OUT_FLAG
,
2536 dev_warn(&sep
->pdev
->dev
,
2537 "[PID%d] sep_lock_user_pages"
2538 " for output virtual buffer failed\n",
2541 goto end_function_free_lli_in
;
2546 dev_dbg(&sep
->pdev
->dev
,
2547 "[PID%d] After lock; prep input output dma table sep_in_num_pages is (hex) %x\n",
2549 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_num_pages
);
2551 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sep_out_num_pages is (hex) %x\n",
2553 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_num_pages
);
2555 dev_dbg(&sep
->pdev
->dev
,
2556 "[PID%d] SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is (hex) %x\n",
2557 current
->pid
, SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
);
2559 /* Call the function that creates table from the lli arrays */
2560 dev_dbg(&sep
->pdev
->dev
, "[PID%d] calling create table from lli\n",
2562 error
= sep_construct_dma_tables_from_lli(
2564 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].
2567 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].
2569 block_size
, lli_table_in_ptr
, lli_table_out_ptr
,
2570 in_num_entries_ptr
, out_num_entries_ptr
,
2571 table_data_size_ptr
, dmatables_region
, dma_ctx
);
2574 dev_warn(&sep
->pdev
->dev
,
2575 "[PID%d] sep_construct_dma_tables_from_lli failed\n",
2577 goto end_function_with_error
;
2580 kfree(lli_out_array
);
2581 kfree(lli_in_array
);
2584 /* Update DCB counter */
2585 dma_ctx
->nr_dcb_creat
++;
2589 end_function_with_error
:
2590 kfree(dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_map_array
);
2591 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_map_array
= NULL
;
2592 kfree(dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_page_array
);
2593 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].out_page_array
= NULL
;
2594 kfree(lli_out_array
);
2597 end_function_free_lli_in
:
2598 kfree(dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_map_array
);
2599 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_map_array
= NULL
;
2600 kfree(dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_page_array
);
2601 dma_ctx
->dma_res_arr
[dma_ctx
->nr_dcb_creat
].in_page_array
= NULL
;
2602 kfree(lli_in_array
);
2611 * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks
2612 * @app_in_address: unsigned long; for data buffer in (user space)
2613 * @app_out_address: unsigned long; for data buffer out (user space)
2614 * @data_in_size: u32; for size of data
2615 * @block_size: u32; for block size
2616 * @tail_block_size: u32; for size of tail block
2617 * @isapplet: bool; to indicate external app
2618 * @is_kva: bool; kernel buffer; only used for kernel crypto module
2619 * @secure_dma; indicates whether this is secure_dma using IMR
2621 * This function prepares the linked DMA tables and puts the
2622 * address for the linked list of tables inta a DCB (data control
2623 * block) the address of which is known by the SEP hardware
2624 * Note that all bus addresses that are passed to the SEP
2625 * are in 32 bit format; the SEP is a 32 bit device
2627 int sep_prepare_input_output_dma_table_in_dcb(struct sep_device
*sep
,
2628 unsigned long app_in_address
,
2629 unsigned long app_out_address
,
2632 u32 tail_block_size
,
2636 struct sep_dcblock
*dcb_region
,
2637 void **dmatables_region
,
2638 struct sep_dma_context
**dma_ctx
,
2639 struct scatterlist
*src_sg
,
2640 struct scatterlist
*dst_sg
)
2645 /* Address of the created DCB table */
2646 struct sep_dcblock
*dcb_table_ptr
= NULL
;
2647 /* The physical address of the first input DMA table */
2648 dma_addr_t in_first_mlli_address
= 0;
2649 /* Number of entries in the first input DMA table */
2650 u32 in_first_num_entries
= 0;
2651 /* The physical address of the first output DMA table */
2652 dma_addr_t out_first_mlli_address
= 0;
2653 /* Number of entries in the first output DMA table */
2654 u32 out_first_num_entries
= 0;
2655 /* Data in the first input/output table */
2656 u32 first_data_size
= 0;
2658 dev_dbg(&sep
->pdev
->dev
, "[PID%d] app_in_address %lx\n",
2659 current
->pid
, app_in_address
);
2661 dev_dbg(&sep
->pdev
->dev
, "[PID%d] app_out_address %lx\n",
2662 current
->pid
, app_out_address
);
2664 dev_dbg(&sep
->pdev
->dev
, "[PID%d] data_in_size %x\n",
2665 current
->pid
, data_in_size
);
2667 dev_dbg(&sep
->pdev
->dev
, "[PID%d] block_size %x\n",
2668 current
->pid
, block_size
);
2670 dev_dbg(&sep
->pdev
->dev
, "[PID%d] tail_block_size %x\n",
2671 current
->pid
, tail_block_size
);
2673 dev_dbg(&sep
->pdev
->dev
, "[PID%d] isapplet %x\n",
2674 current
->pid
, isapplet
);
2676 dev_dbg(&sep
->pdev
->dev
, "[PID%d] is_kva %x\n",
2677 current
->pid
, is_kva
);
2679 dev_dbg(&sep
->pdev
->dev
, "[PID%d] src_sg %p\n",
2680 current
->pid
, src_sg
);
2682 dev_dbg(&sep
->pdev
->dev
, "[PID%d] dst_sg %p\n",
2683 current
->pid
, dst_sg
);
2686 dev_warn(&sep
->pdev
->dev
, "[PID%d] no DMA context pointer\n",
2693 /* In case there are multiple DCBs for this transaction */
2694 dev_dbg(&sep
->pdev
->dev
, "[PID%d] DMA context already set\n",
2697 *dma_ctx
= kzalloc(sizeof(**dma_ctx
), GFP_KERNEL
);
2699 dev_dbg(&sep
->pdev
->dev
,
2700 "[PID%d] Not enough memory for DMA context\n",
2705 dev_dbg(&sep
->pdev
->dev
,
2706 "[PID%d] Created DMA context addr at 0x%p\n",
2707 current
->pid
, *dma_ctx
);
2710 (*dma_ctx
)->secure_dma
= secure_dma
;
2712 /* these are for kernel crypto only */
2713 (*dma_ctx
)->src_sg
= src_sg
;
2714 (*dma_ctx
)->dst_sg
= dst_sg
;
2716 if ((*dma_ctx
)->nr_dcb_creat
== SEP_MAX_NUM_SYNC_DMA_OPS
) {
2717 /* No more DCBs to allocate */
2718 dev_dbg(&sep
->pdev
->dev
, "[PID%d] no more DCBs available\n",
2721 goto end_function_error
;
2724 /* Allocate new DCB */
2726 dcb_table_ptr
= dcb_region
;
2728 dcb_table_ptr
= (struct sep_dcblock
*)(sep
->shared_addr
+
2729 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES
+
2730 ((*dma_ctx
)->nr_dcb_creat
*
2731 sizeof(struct sep_dcblock
)));
2734 /* Set the default values in the DCB */
2735 dcb_table_ptr
->input_mlli_address
= 0;
2736 dcb_table_ptr
->input_mlli_num_entries
= 0;
2737 dcb_table_ptr
->input_mlli_data_size
= 0;
2738 dcb_table_ptr
->output_mlli_address
= 0;
2739 dcb_table_ptr
->output_mlli_num_entries
= 0;
2740 dcb_table_ptr
->output_mlli_data_size
= 0;
2741 dcb_table_ptr
->tail_data_size
= 0;
2742 dcb_table_ptr
->out_vr_tail_pt
= 0;
2744 if (isapplet
== true) {
2746 /* Check if there is enough data for DMA operation */
2747 if (data_in_size
< SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE
) {
2748 if (is_kva
== true) {
2750 goto end_function_error
;
2752 if (copy_from_user(dcb_table_ptr
->tail_data
,
2753 (void __user
*)app_in_address
,
2756 goto end_function_error
;
2760 dcb_table_ptr
->tail_data_size
= data_in_size
;
2762 /* Set the output user-space address for mem2mem op */
2763 if (app_out_address
)
2764 dcb_table_ptr
->out_vr_tail_pt
=
2765 (aligned_u64
)app_out_address
;
2768 * Update both data length parameters in order to avoid
2769 * second data copy and allow building of empty mlli
2776 if (!app_out_address
) {
2777 tail_size
= data_in_size
% block_size
;
2779 if (tail_block_size
== block_size
)
2780 tail_size
= block_size
;
2787 if (tail_size
> sizeof(dcb_table_ptr
->tail_data
))
2789 if (is_kva
== true) {
2791 goto end_function_error
;
2793 /* We have tail data - copy it to DCB */
2794 if (copy_from_user(dcb_table_ptr
->tail_data
,
2795 (void __user
*)(app_in_address
+
2796 data_in_size
- tail_size
), tail_size
)) {
2798 goto end_function_error
;
2801 if (app_out_address
)
2803 * Calculate the output address
2804 * according to tail data size
2806 dcb_table_ptr
->out_vr_tail_pt
=
2807 (aligned_u64
)app_out_address
+
2808 data_in_size
- tail_size
;
2810 /* Save the real tail data size */
2811 dcb_table_ptr
->tail_data_size
= tail_size
;
2813 * Update the data size without the tail
2814 * data size AKA data for the dma
2816 data_in_size
= (data_in_size
- tail_size
);
2819 /* Check if we need to build only input table or input/output */
2820 if (app_out_address
) {
2821 /* Prepare input/output tables */
2822 error
= sep_prepare_input_output_dma_table(sep
,
2827 &in_first_mlli_address
,
2828 &out_first_mlli_address
,
2829 &in_first_num_entries
,
2830 &out_first_num_entries
,
2836 /* Prepare input tables */
2837 error
= sep_prepare_input_dma_table(sep
,
2841 &in_first_mlli_address
,
2842 &in_first_num_entries
,
2850 dev_warn(&sep
->pdev
->dev
,
2851 "prepare DMA table call failed "
2852 "from prepare DCB call\n");
2853 goto end_function_error
;
2856 /* Set the DCB values */
2857 dcb_table_ptr
->input_mlli_address
= in_first_mlli_address
;
2858 dcb_table_ptr
->input_mlli_num_entries
= in_first_num_entries
;
2859 dcb_table_ptr
->input_mlli_data_size
= first_data_size
;
2860 dcb_table_ptr
->output_mlli_address
= out_first_mlli_address
;
2861 dcb_table_ptr
->output_mlli_num_entries
= out_first_num_entries
;
2862 dcb_table_ptr
->output_mlli_data_size
= first_data_size
;
2877 * sep_free_dma_tables_and_dcb - free DMA tables and DCBs
2878 * @sep: pointer to struct sep_device
2879 * @isapplet: indicates external application (used for kernel access)
2880 * @is_kva: indicates kernel addresses (only used for kernel crypto)
2882 * This function frees the DMA tables and DCB
2884 static int sep_free_dma_tables_and_dcb(struct sep_device
*sep
, bool isapplet
,
2885 bool is_kva
, struct sep_dma_context
**dma_ctx
)
2887 struct sep_dcblock
*dcb_table_ptr
;
2888 unsigned long pt_hold
;
2895 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sep_free_dma_tables_and_dcb\n",
2898 if (((*dma_ctx
)->secure_dma
== false) && (isapplet
== true)) {
2899 dev_dbg(&sep
->pdev
->dev
, "[PID%d] handling applet\n",
2902 /* Tail stuff is only for non secure_dma */
2903 /* Set pointer to first DCB table */
2904 dcb_table_ptr
= (struct sep_dcblock
*)
2906 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES
);
2909 * Go over each DCB and see if
2910 * tail pointer must be updated
2912 for (i
= 0; dma_ctx
&& *dma_ctx
&&
2913 i
< (*dma_ctx
)->nr_dcb_creat
; i
++, dcb_table_ptr
++) {
2914 if (dcb_table_ptr
->out_vr_tail_pt
) {
2915 pt_hold
= (unsigned long)dcb_table_ptr
->
2917 tail_pt
= (void *)pt_hold
;
2918 if (is_kva
== true) {
2922 error_temp
= copy_to_user(
2923 (void __user
*)tail_pt
,
2924 dcb_table_ptr
->tail_data
,
2925 dcb_table_ptr
->tail_data_size
);
2928 /* Release the DMA resource */
2936 /* Free the output pages, if any */
2937 sep_free_dma_table_data_handler(sep
, dma_ctx
);
2939 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sep_free_dma_tables_and_dcb end\n",
2946 * sep_prepare_dcb_handler - prepare a control block
2947 * @sep: pointer to struct sep_device
2948 * @arg: pointer to user parameters
2949 * @secure_dma: indicate whether we are using secure_dma on IMR
2951 * This function will retrieve the RAR buffer physical addresses, type
2952 * & size corresponding to the RAR handles provided in the buffers vector.
2954 static int sep_prepare_dcb_handler(struct sep_device
*sep
, unsigned long arg
,
2956 struct sep_dma_context
**dma_ctx
)
2959 /* Command arguments */
2960 static struct build_dcb_struct command_args
;
2962 /* Get the command arguments */
2963 if (copy_from_user(&command_args
, (void __user
*)arg
,
2964 sizeof(struct build_dcb_struct
))) {
2969 dev_dbg(&sep
->pdev
->dev
,
2970 "[PID%d] prep dcb handler app_in_address is %08llx\n",
2971 current
->pid
, command_args
.app_in_address
);
2972 dev_dbg(&sep
->pdev
->dev
,
2973 "[PID%d] app_out_address is %08llx\n",
2974 current
->pid
, command_args
.app_out_address
);
2975 dev_dbg(&sep
->pdev
->dev
,
2976 "[PID%d] data_size is %x\n",
2977 current
->pid
, command_args
.data_in_size
);
2978 dev_dbg(&sep
->pdev
->dev
,
2979 "[PID%d] block_size is %x\n",
2980 current
->pid
, command_args
.block_size
);
2981 dev_dbg(&sep
->pdev
->dev
,
2982 "[PID%d] tail block_size is %x\n",
2983 current
->pid
, command_args
.tail_block_size
);
2984 dev_dbg(&sep
->pdev
->dev
,
2985 "[PID%d] is_applet is %x\n",
2986 current
->pid
, command_args
.is_applet
);
2988 if (!command_args
.app_in_address
) {
2989 dev_warn(&sep
->pdev
->dev
,
2990 "[PID%d] null app_in_address\n", current
->pid
);
2995 error
= sep_prepare_input_output_dma_table_in_dcb(sep
,
2996 (unsigned long)command_args
.app_in_address
,
2997 (unsigned long)command_args
.app_out_address
,
2998 command_args
.data_in_size
, command_args
.block_size
,
2999 command_args
.tail_block_size
,
3000 command_args
.is_applet
, false,
3001 secure_dma
, NULL
, NULL
, dma_ctx
, NULL
, NULL
);
3009 * sep_free_dcb_handler - free control block resources
3010 * @sep: pointer to struct sep_device
3012 * This function frees the DCB resources and updates the needed
3013 * user-space buffers.
3015 static int sep_free_dcb_handler(struct sep_device
*sep
,
3016 struct sep_dma_context
**dma_ctx
)
3018 if (!dma_ctx
|| !(*dma_ctx
)) {
3019 dev_dbg(&sep
->pdev
->dev
,
3020 "[PID%d] no dma context defined, nothing to free\n",
3025 dev_dbg(&sep
->pdev
->dev
, "[PID%d] free dcbs num of DCBs %x\n",
3027 (*dma_ctx
)->nr_dcb_creat
);
3029 return sep_free_dma_tables_and_dcb(sep
, false, false, dma_ctx
);
3033 * sep_ioctl - ioctl handler for sep device
3034 * @filp: pointer to struct file
3036 * @arg: pointer to argument structure
3038 * Implement the ioctl methods available on the SEP device.
3040 static long sep_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
3042 struct sep_private_data
* const private_data
= filp
->private_data
;
3043 struct sep_call_status
*call_status
= &private_data
->call_status
;
3044 struct sep_device
*sep
= private_data
->device
;
3045 struct sep_dma_context
**dma_ctx
= &private_data
->dma_ctx
;
3046 struct sep_queue_info
**my_queue_elem
= &private_data
->my_queue_elem
;
3049 dev_dbg(&sep
->pdev
->dev
, "[PID%d] ioctl cmd 0x%x\n",
3051 dev_dbg(&sep
->pdev
->dev
, "[PID%d] dma context addr 0x%p\n",
3052 current
->pid
, *dma_ctx
);
3054 /* Make sure we own this device */
3055 error
= sep_check_transaction_owner(sep
);
3057 dev_dbg(&sep
->pdev
->dev
, "[PID%d] ioctl pid is not owner\n",
3062 /* Check that sep_mmap has been called before */
3063 if (0 == test_bit(SEP_LEGACY_MMAP_DONE_OFFSET
,
3064 &call_status
->status
)) {
3065 dev_dbg(&sep
->pdev
->dev
,
3066 "[PID%d] mmap not called\n", current
->pid
);
3071 /* Check that the command is for SEP device */
3072 if (_IOC_TYPE(cmd
) != SEP_IOC_MAGIC_NUMBER
) {
3078 case SEP_IOCSENDSEPCOMMAND
:
3079 dev_dbg(&sep
->pdev
->dev
,
3080 "[PID%d] SEP_IOCSENDSEPCOMMAND start\n",
3082 if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET
,
3083 &call_status
->status
)) {
3084 dev_warn(&sep
->pdev
->dev
,
3085 "[PID%d] send msg already done\n",
3090 /* Send command to SEP */
3091 error
= sep_send_command_handler(sep
);
3093 set_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET
,
3094 &call_status
->status
);
3095 dev_dbg(&sep
->pdev
->dev
,
3096 "[PID%d] SEP_IOCSENDSEPCOMMAND end\n",
3099 case SEP_IOCENDTRANSACTION
:
3100 dev_dbg(&sep
->pdev
->dev
,
3101 "[PID%d] SEP_IOCENDTRANSACTION start\n",
3103 error
= sep_end_transaction_handler(sep
, dma_ctx
, call_status
,
3105 dev_dbg(&sep
->pdev
->dev
,
3106 "[PID%d] SEP_IOCENDTRANSACTION end\n",
3109 case SEP_IOCPREPAREDCB
:
3110 dev_dbg(&sep
->pdev
->dev
,
3111 "[PID%d] SEP_IOCPREPAREDCB start\n",
3113 case SEP_IOCPREPAREDCB_SECURE_DMA
:
3114 dev_dbg(&sep
->pdev
->dev
,
3115 "[PID%d] SEP_IOCPREPAREDCB_SECURE_DMA start\n",
3117 if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET
,
3118 &call_status
->status
)) {
3119 dev_dbg(&sep
->pdev
->dev
,
3120 "[PID%d] dcb prep needed before send msg\n",
3127 dev_dbg(&sep
->pdev
->dev
,
3128 "[PID%d] dcb null arg\n", current
->pid
);
3133 if (cmd
== SEP_IOCPREPAREDCB
) {
3135 dev_dbg(&sep
->pdev
->dev
,
3136 "[PID%d] SEP_IOCPREPAREDCB (no secure_dma)\n",
3139 error
= sep_prepare_dcb_handler(sep
, arg
, false,
3143 dev_dbg(&sep
->pdev
->dev
,
3144 "[PID%d] SEP_IOC_POC (with secure_dma)\n",
3147 error
= sep_prepare_dcb_handler(sep
, arg
, true,
3150 dev_dbg(&sep
->pdev
->dev
, "[PID%d] dcb's end\n",
3153 case SEP_IOCFREEDCB
:
3154 dev_dbg(&sep
->pdev
->dev
, "[PID%d] SEP_IOCFREEDCB start\n",
3156 case SEP_IOCFREEDCB_SECURE_DMA
:
3157 dev_dbg(&sep
->pdev
->dev
,
3158 "[PID%d] SEP_IOCFREEDCB_SECURE_DMA start\n",
3160 error
= sep_free_dcb_handler(sep
, dma_ctx
);
3161 dev_dbg(&sep
->pdev
->dev
, "[PID%d] SEP_IOCFREEDCB end\n",
3166 dev_dbg(&sep
->pdev
->dev
, "[PID%d] default end\n",
3172 dev_dbg(&sep
->pdev
->dev
, "[PID%d] ioctl end\n", current
->pid
);
3178 * sep_inthandler - interrupt handler for sep device
3180 * @dev_id: device id
3182 static irqreturn_t
sep_inthandler(int irq
, void *dev_id
)
3184 unsigned long lock_irq_flag
;
3185 u32 reg_val
, reg_val2
= 0;
3186 struct sep_device
*sep
= dev_id
;
3187 irqreturn_t int_error
= IRQ_HANDLED
;
3189 /* Are we in power save? */
3190 #if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
3191 if (sep
->pdev
->dev
.power
.runtime_status
!= RPM_ACTIVE
) {
3192 dev_dbg(&sep
->pdev
->dev
, "interrupt during pwr save\n");
3197 if (test_bit(SEP_WORKING_LOCK_BIT
, &sep
->in_use_flags
) == 0) {
3198 dev_dbg(&sep
->pdev
->dev
, "interrupt while nobody using sep\n");
3202 /* Read the IRR register to check if this is SEP interrupt */
3203 reg_val
= sep_read_reg(sep
, HW_HOST_IRR_REG_ADDR
);
3205 dev_dbg(&sep
->pdev
->dev
, "sep int: IRR REG val: %x\n", reg_val
);
3207 if (reg_val
& (0x1 << 13)) {
3209 /* Lock and update the counter of reply messages */
3210 spin_lock_irqsave(&sep
->snd_rply_lck
, lock_irq_flag
);
3212 spin_unlock_irqrestore(&sep
->snd_rply_lck
, lock_irq_flag
);
3214 dev_dbg(&sep
->pdev
->dev
, "sep int: send_ct %lx reply_ct %lx\n",
3215 sep
->send_ct
, sep
->reply_ct
);
3217 /* Is this a kernel client request */
3218 if (sep
->in_kernel
) {
3219 tasklet_schedule(&sep
->finish_tasklet
);
3220 goto finished_interrupt
;
3223 /* Is this printf or daemon request? */
3224 reg_val2
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR2_REG_ADDR
);
3225 dev_dbg(&sep
->pdev
->dev
,
3226 "SEP Interrupt - GPR2 is %08x\n", reg_val2
);
3228 clear_bit(SEP_WORKING_LOCK_BIT
, &sep
->in_use_flags
);
3230 if ((reg_val2
>> 30) & 0x1) {
3231 dev_dbg(&sep
->pdev
->dev
, "int: printf request\n");
3232 } else if (reg_val2
>> 31) {
3233 dev_dbg(&sep
->pdev
->dev
, "int: daemon request\n");
3235 dev_dbg(&sep
->pdev
->dev
, "int: SEP reply\n");
3236 wake_up(&sep
->event_interrupt
);
3239 dev_dbg(&sep
->pdev
->dev
, "int: not SEP interrupt\n");
3240 int_error
= IRQ_NONE
;
3245 if (int_error
== IRQ_HANDLED
)
3246 sep_write_reg(sep
, HW_HOST_ICR_REG_ADDR
, reg_val
);
3252 * sep_reconfig_shared_area - reconfigure shared area
3253 * @sep: pointer to struct sep_device
3255 * Reconfig the shared area between HOST and SEP - needed in case
3256 * the DX_CC_Init function was called before OS loading.
3258 static int sep_reconfig_shared_area(struct sep_device
*sep
)
3262 /* use to limit waiting for SEP */
3263 unsigned long end_time
;
3265 /* Send the new SHARED MESSAGE AREA to the SEP */
3266 dev_dbg(&sep
->pdev
->dev
, "reconfig shared; sending %08llx to sep\n",
3267 (unsigned long long)sep
->shared_bus
);
3269 sep_write_reg(sep
, HW_HOST_HOST_SEP_GPR1_REG_ADDR
, sep
->shared_bus
);
3271 /* Poll for SEP response */
3272 ret_val
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR1_REG_ADDR
);
3274 end_time
= jiffies
+ (WAIT_TIME
* HZ
);
3276 while ((time_before(jiffies
, end_time
)) && (ret_val
!= 0xffffffff) &&
3277 (ret_val
!= sep
->shared_bus
))
3278 ret_val
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR1_REG_ADDR
);
3280 /* Check the return value (register) */
3281 if (ret_val
!= sep
->shared_bus
) {
3282 dev_warn(&sep
->pdev
->dev
, "could not reconfig shared area\n");
3283 dev_warn(&sep
->pdev
->dev
, "result was %x\n", ret_val
);
3288 dev_dbg(&sep
->pdev
->dev
, "reconfig shared area end\n");
3294 * sep_activate_dcb_dmatables_context - Takes DCB & DMA tables
3297 * @dcb_region: DCB region copy
3298 * @dmatables_region: MLLI/DMA tables copy
3299 * @dma_ctx: DMA context for current transaction
3301 ssize_t
sep_activate_dcb_dmatables_context(struct sep_device
*sep
,
3302 struct sep_dcblock
**dcb_region
,
3303 void **dmatables_region
,
3304 struct sep_dma_context
*dma_ctx
)
3306 void *dmaregion_free_start
= NULL
;
3307 void *dmaregion_free_end
= NULL
;
3308 void *dcbregion_free_start
= NULL
;
3309 void *dcbregion_free_end
= NULL
;
3312 dev_dbg(&sep
->pdev
->dev
, "[PID%d] activating dcb/dma region\n",
3315 if (1 > dma_ctx
->nr_dcb_creat
) {
3316 dev_warn(&sep
->pdev
->dev
,
3317 "[PID%d] invalid number of dcbs to activate 0x%08X\n",
3318 current
->pid
, dma_ctx
->nr_dcb_creat
);
3323 dmaregion_free_start
= sep
->shared_addr
3324 + SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES
;
3325 dmaregion_free_end
= dmaregion_free_start
3326 + SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES
- 1;
3328 if (dmaregion_free_start
3329 + dma_ctx
->dmatables_len
> dmaregion_free_end
) {
3333 memcpy(dmaregion_free_start
,
3335 dma_ctx
->dmatables_len
);
3336 /* Free MLLI table copy */
3337 kfree(*dmatables_region
);
3338 *dmatables_region
= NULL
;
3340 /* Copy thread's DCB table copy to DCB table region */
3341 dcbregion_free_start
= sep
->shared_addr
+
3342 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES
;
3343 dcbregion_free_end
= dcbregion_free_start
+
3344 (SEP_MAX_NUM_SYNC_DMA_OPS
*
3345 sizeof(struct sep_dcblock
)) - 1;
3347 if (dcbregion_free_start
3348 + (dma_ctx
->nr_dcb_creat
* sizeof(struct sep_dcblock
))
3349 > dcbregion_free_end
) {
3354 memcpy(dcbregion_free_start
,
3356 dma_ctx
->nr_dcb_creat
* sizeof(struct sep_dcblock
));
3358 /* Print the tables */
3359 dev_dbg(&sep
->pdev
->dev
, "activate: input table\n");
3360 sep_debug_print_lli_tables(sep
,
3361 (struct sep_lli_entry
*)sep_shared_area_bus_to_virt(sep
,
3362 (*dcb_region
)->input_mlli_address
),
3363 (*dcb_region
)->input_mlli_num_entries
,
3364 (*dcb_region
)->input_mlli_data_size
);
3366 dev_dbg(&sep
->pdev
->dev
, "activate: output table\n");
3367 sep_debug_print_lli_tables(sep
,
3368 (struct sep_lli_entry
*)sep_shared_area_bus_to_virt(sep
,
3369 (*dcb_region
)->output_mlli_address
),
3370 (*dcb_region
)->output_mlli_num_entries
,
3371 (*dcb_region
)->output_mlli_data_size
);
3373 dev_dbg(&sep
->pdev
->dev
,
3374 "[PID%d] printing activated tables\n", current
->pid
);
3377 kfree(*dmatables_region
);
3378 *dmatables_region
= NULL
;
3387 * sep_create_dcb_dmatables_context - Creates DCB & MLLI/DMA table context
3389 * @dcb_region: DCB region buf to create for current transaction
3390 * @dmatables_region: MLLI/DMA tables buf to create for current transaction
3391 * @dma_ctx: DMA context buf to create for current transaction
3392 * @user_dcb_args: User arguments for DCB/MLLI creation
3393 * @num_dcbs: Number of DCBs to create
3394 * @secure_dma: Indicate use of IMR restricted memory secure dma
3396 static ssize_t
sep_create_dcb_dmatables_context(struct sep_device
*sep
,
3397 struct sep_dcblock
**dcb_region
,
3398 void **dmatables_region
,
3399 struct sep_dma_context
**dma_ctx
,
3400 const struct build_dcb_struct __user
*user_dcb_args
,
3401 const u32 num_dcbs
, bool secure_dma
)
3405 struct build_dcb_struct
*dcb_args
= NULL
;
3407 dev_dbg(&sep
->pdev
->dev
, "[PID%d] creating dcb/dma region\n",
3410 if (!dcb_region
|| !dma_ctx
|| !dmatables_region
|| !user_dcb_args
) {
3415 if (SEP_MAX_NUM_SYNC_DMA_OPS
< num_dcbs
) {
3416 dev_warn(&sep
->pdev
->dev
,
3417 "[PID%d] invalid number of dcbs 0x%08X\n",
3418 current
->pid
, num_dcbs
);
3423 dcb_args
= kzalloc(num_dcbs
* sizeof(struct build_dcb_struct
),
3426 dev_warn(&sep
->pdev
->dev
, "[PID%d] no memory for dcb args\n",
3432 if (copy_from_user(dcb_args
,
3434 num_dcbs
* sizeof(struct build_dcb_struct
))) {
3439 /* Allocate thread-specific memory for DCB */
3440 *dcb_region
= kzalloc(num_dcbs
* sizeof(struct sep_dcblock
),
3442 if (!(*dcb_region
)) {
3447 /* Prepare DCB and MLLI table into the allocated regions */
3448 for (i
= 0; i
< num_dcbs
; i
++) {
3449 error
= sep_prepare_input_output_dma_table_in_dcb(sep
,
3450 (unsigned long)dcb_args
[i
].app_in_address
,
3451 (unsigned long)dcb_args
[i
].app_out_address
,
3452 dcb_args
[i
].data_in_size
,
3453 dcb_args
[i
].block_size
,
3454 dcb_args
[i
].tail_block_size
,
3455 dcb_args
[i
].is_applet
,
3457 *dcb_region
, dmatables_region
,
3462 dev_warn(&sep
->pdev
->dev
,
3463 "[PID%d] dma table creation failed\n",
3468 if (dcb_args
[i
].app_in_address
!= 0)
3469 (*dma_ctx
)->input_data_len
+= dcb_args
[i
].data_in_size
;
3479 * sep_create_dcb_dmatables_context_kernel - Creates DCB & MLLI/DMA table context
3482 * @dcb_region: DCB region buf to create for current transaction
3483 * @dmatables_region: MLLI/DMA tables buf to create for current transaction
3484 * @dma_ctx: DMA context buf to create for current transaction
3485 * @user_dcb_args: User arguments for DCB/MLLI creation
3486 * @num_dcbs: Number of DCBs to create
3487 * This does that same thing as sep_create_dcb_dmatables_context
3488 * except that it is used only for the kernel crypto operation. It is
3489 * separate because there is no user data involved; the dcb data structure
3490 * is specific for kernel crypto (build_dcb_struct_kernel)
3492 int sep_create_dcb_dmatables_context_kernel(struct sep_device
*sep
,
3493 struct sep_dcblock
**dcb_region
,
3494 void **dmatables_region
,
3495 struct sep_dma_context
**dma_ctx
,
3496 const struct build_dcb_struct_kernel
*dcb_data
,
3502 dev_dbg(&sep
->pdev
->dev
, "[PID%d] creating dcb/dma region\n",
3505 if (!dcb_region
|| !dma_ctx
|| !dmatables_region
|| !dcb_data
) {
3510 if (SEP_MAX_NUM_SYNC_DMA_OPS
< num_dcbs
) {
3511 dev_warn(&sep
->pdev
->dev
,
3512 "[PID%d] invalid number of dcbs 0x%08X\n",
3513 current
->pid
, num_dcbs
);
3518 dev_dbg(&sep
->pdev
->dev
, "[PID%d] num_dcbs is %d\n",
3519 current
->pid
, num_dcbs
);
3521 /* Allocate thread-specific memory for DCB */
3522 *dcb_region
= kzalloc(num_dcbs
* sizeof(struct sep_dcblock
),
3524 if (!(*dcb_region
)) {
3529 /* Prepare DCB and MLLI table into the allocated regions */
3530 for (i
= 0; i
< num_dcbs
; i
++) {
3531 error
= sep_prepare_input_output_dma_table_in_dcb(sep
,
3532 (unsigned long)dcb_data
->app_in_address
,
3533 (unsigned long)dcb_data
->app_out_address
,
3534 dcb_data
->data_in_size
,
3535 dcb_data
->block_size
,
3536 dcb_data
->tail_block_size
,
3537 dcb_data
->is_applet
,
3540 *dcb_region
, dmatables_region
,
3545 dev_warn(&sep
->pdev
->dev
,
3546 "[PID%d] dma table creation failed\n",
3558 * sep_activate_msgarea_context - Takes the message area context into use
3560 * @msg_region: Message area context buf
3561 * @msg_len: Message area context buffer size
3563 static ssize_t
sep_activate_msgarea_context(struct sep_device
*sep
,
3565 const size_t msg_len
)
3567 dev_dbg(&sep
->pdev
->dev
, "[PID%d] activating msg region\n",
3570 if (!msg_region
|| !(*msg_region
) ||
3571 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES
< msg_len
) {
3572 dev_warn(&sep
->pdev
->dev
,
3573 "[PID%d] invalid act msgarea len 0x%08zX\n",
3574 current
->pid
, msg_len
);
3578 memcpy(sep
->shared_addr
, *msg_region
, msg_len
);
3584 * sep_create_msgarea_context - Creates message area context
3586 * @msg_region: Msg area region buf to create for current transaction
3587 * @msg_user: Content for msg area region from user
3588 * @msg_len: Message area size
3590 static ssize_t
sep_create_msgarea_context(struct sep_device
*sep
,
3592 const void __user
*msg_user
,
3593 const size_t msg_len
)
3597 dev_dbg(&sep
->pdev
->dev
, "[PID%d] creating msg region\n",
3602 SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES
< msg_len
||
3603 SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES
> msg_len
) {
3604 dev_warn(&sep
->pdev
->dev
,
3605 "[PID%d] invalid creat msgarea len 0x%08zX\n",
3606 current
->pid
, msg_len
);
3611 /* Allocate thread-specific memory for message buffer */
3612 *msg_region
= kzalloc(msg_len
, GFP_KERNEL
);
3613 if (!(*msg_region
)) {
3614 dev_warn(&sep
->pdev
->dev
,
3615 "[PID%d] no mem for msgarea context\n",
3621 /* Copy input data to write() to allocated message buffer */
3622 if (copy_from_user(*msg_region
, msg_user
, msg_len
)) {
3628 if (error
&& msg_region
) {
3638 * sep_read - Returns results of an operation for fastcall interface
3639 * @filp: File pointer
3640 * @buf_user: User buffer for storing results
3641 * @count_user: User buffer size
3642 * @offset: File offset, not supported
3644 * The implementation does not support reading in chunks, all data must be
3645 * consumed during a single read system call.
3647 static ssize_t
sep_read(struct file
*filp
,
3648 char __user
*buf_user
, size_t count_user
,
3651 struct sep_private_data
* const private_data
= filp
->private_data
;
3652 struct sep_call_status
*call_status
= &private_data
->call_status
;
3653 struct sep_device
*sep
= private_data
->device
;
3654 struct sep_dma_context
**dma_ctx
= &private_data
->dma_ctx
;
3655 struct sep_queue_info
**my_queue_elem
= &private_data
->my_queue_elem
;
3656 ssize_t error
= 0, error_tmp
= 0;
3658 /* Am I the process that owns the transaction? */
3659 error
= sep_check_transaction_owner(sep
);
3661 dev_dbg(&sep
->pdev
->dev
, "[PID%d] read pid is not owner\n",
3666 /* Checks that user has called necessary apis */
3667 if (0 == test_bit(SEP_FASTCALL_WRITE_DONE_OFFSET
,
3668 &call_status
->status
)) {
3669 dev_warn(&sep
->pdev
->dev
,
3670 "[PID%d] fastcall write not called\n",
3673 goto end_function_error
;
3677 dev_warn(&sep
->pdev
->dev
,
3678 "[PID%d] null user buffer\n",
3681 goto end_function_error
;
3685 /* Wait for SEP to finish */
3686 wait_event(sep
->event_interrupt
,
3687 test_bit(SEP_WORKING_LOCK_BIT
,
3688 &sep
->in_use_flags
) == 0);
3690 sep_dump_message(sep
);
3692 dev_dbg(&sep
->pdev
->dev
, "[PID%d] count_user = 0x%08zX\n",
3693 current
->pid
, count_user
);
3695 /* In case user has allocated bigger buffer */
3696 if (count_user
> SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES
)
3697 count_user
= SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES
;
3699 if (copy_to_user(buf_user
, sep
->shared_addr
, count_user
)) {
3701 goto end_function_error
;
3704 dev_dbg(&sep
->pdev
->dev
, "[PID%d] read succeeded\n", current
->pid
);
3708 /* Copy possible tail data to user and free DCB and MLLIs */
3709 error_tmp
= sep_free_dcb_handler(sep
, dma_ctx
);
3711 dev_warn(&sep
->pdev
->dev
, "[PID%d] dcb free failed\n",
3714 /* End the transaction, wakeup pending ones */
3715 error_tmp
= sep_end_transaction_handler(sep
, dma_ctx
, call_status
,
3718 dev_warn(&sep
->pdev
->dev
,
3719 "[PID%d] ending transaction failed\n",
3727 * sep_fastcall_args_get - Gets fastcall params from user
3729 * @args: Parameters buffer
3730 * @buf_user: User buffer for operation parameters
3731 * @count_user: User buffer size
3733 static inline ssize_t
sep_fastcall_args_get(struct sep_device
*sep
,
3734 struct sep_fastcall_hdr
*args
,
3735 const char __user
*buf_user
,
3736 const size_t count_user
)
3739 size_t actual_count
= 0;
3742 dev_warn(&sep
->pdev
->dev
,
3743 "[PID%d] null user buffer\n",
3749 if (count_user
< sizeof(struct sep_fastcall_hdr
)) {
3750 dev_warn(&sep
->pdev
->dev
,
3751 "[PID%d] too small message size 0x%08zX\n",
3752 current
->pid
, count_user
);
3758 if (copy_from_user(args
, buf_user
, sizeof(struct sep_fastcall_hdr
))) {
3763 if (SEP_FC_MAGIC
!= args
->magic
) {
3764 dev_warn(&sep
->pdev
->dev
,
3765 "[PID%d] invalid fastcall magic 0x%08X\n",
3766 current
->pid
, args
->magic
);
3771 dev_dbg(&sep
->pdev
->dev
, "[PID%d] fastcall hdr num of DCBs 0x%08X\n",
3772 current
->pid
, args
->num_dcbs
);
3773 dev_dbg(&sep
->pdev
->dev
, "[PID%d] fastcall hdr msg len 0x%08X\n",
3774 current
->pid
, args
->msg_len
);
3776 if (SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES
< args
->msg_len
||
3777 SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES
> args
->msg_len
) {
3778 dev_warn(&sep
->pdev
->dev
,
3779 "[PID%d] invalid message length\n",
3785 actual_count
= sizeof(struct sep_fastcall_hdr
)
3787 + (args
->num_dcbs
* sizeof(struct build_dcb_struct
));
3789 if (actual_count
!= count_user
) {
3790 dev_warn(&sep
->pdev
->dev
,
3791 "[PID%d] inconsistent message "
3792 "sizes 0x%08zX vs 0x%08zX\n",
3793 current
->pid
, actual_count
, count_user
);
3803 * sep_write - Starts an operation for fastcall interface
3804 * @filp: File pointer
3805 * @buf_user: User buffer for operation parameters
3806 * @count_user: User buffer size
3807 * @offset: File offset, not supported
3809 * The implementation does not support writing in chunks,
3810 * all data must be given during a single write system call.
3812 static ssize_t
sep_write(struct file
*filp
,
3813 const char __user
*buf_user
, size_t count_user
,
3816 struct sep_private_data
* const private_data
= filp
->private_data
;
3817 struct sep_call_status
*call_status
= &private_data
->call_status
;
3818 struct sep_device
*sep
= private_data
->device
;
3819 struct sep_dma_context
*dma_ctx
= NULL
;
3820 struct sep_fastcall_hdr call_hdr
= {0};
3821 void *msg_region
= NULL
;
3822 void *dmatables_region
= NULL
;
3823 struct sep_dcblock
*dcb_region
= NULL
;
3825 struct sep_queue_info
*my_queue_elem
= NULL
;
3826 bool my_secure_dma
; /* are we using secure_dma (IMR)? */
3828 dev_dbg(&sep
->pdev
->dev
, "[PID%d] sep dev is 0x%p\n",
3830 dev_dbg(&sep
->pdev
->dev
, "[PID%d] private_data is 0x%p\n",
3831 current
->pid
, private_data
);
3833 error
= sep_fastcall_args_get(sep
, &call_hdr
, buf_user
, count_user
);
3837 buf_user
+= sizeof(struct sep_fastcall_hdr
);
3839 if (call_hdr
.secure_dma
== 0)
3840 my_secure_dma
= false;
3842 my_secure_dma
= true;
3845 * Controlling driver memory usage by limiting amount of
3846 * buffers created. Only SEP_DOUBLEBUF_USERS_LIMIT number
3847 * of threads can progress further at a time
3849 dev_dbg(&sep
->pdev
->dev
,
3850 "[PID%d] waiting for double buffering region access\n",
3852 error
= down_interruptible(&sep
->sep_doublebuf
);
3853 dev_dbg(&sep
->pdev
->dev
, "[PID%d] double buffering region start\n",
3856 /* Signal received */
3857 goto end_function_error
;
3862 * Prepare contents of the shared area regions for
3863 * the operation into temporary buffers
3865 if (0 < call_hdr
.num_dcbs
) {
3866 error
= sep_create_dcb_dmatables_context(sep
,
3870 (const struct build_dcb_struct __user
*)
3872 call_hdr
.num_dcbs
, my_secure_dma
);
3874 goto end_function_error_doublebuf
;
3876 buf_user
+= call_hdr
.num_dcbs
* sizeof(struct build_dcb_struct
);
3879 error
= sep_create_msgarea_context(sep
,
3884 goto end_function_error_doublebuf
;
3886 dev_dbg(&sep
->pdev
->dev
, "[PID%d] updating queue status\n",
3888 my_queue_elem
= sep_queue_status_add(sep
,
3889 ((struct sep_msgarea_hdr
*)msg_region
)->opcode
,
3890 (dma_ctx
) ? dma_ctx
->input_data_len
: 0,
3892 current
->comm
, sizeof(current
->comm
));
3894 if (!my_queue_elem
) {
3895 dev_dbg(&sep
->pdev
->dev
,
3896 "[PID%d] updating queue status error\n", current
->pid
);
3898 goto end_function_error_doublebuf
;
3901 /* Wait until current process gets the transaction */
3902 error
= sep_wait_transaction(sep
);
3905 /* Interrupted by signal, don't clear transaction */
3906 dev_dbg(&sep
->pdev
->dev
, "[PID%d] interrupted by signal\n",
3908 sep_queue_status_remove(sep
, &my_queue_elem
);
3909 goto end_function_error_doublebuf
;
3912 dev_dbg(&sep
->pdev
->dev
, "[PID%d] saving queue element\n",
3914 private_data
->my_queue_elem
= my_queue_elem
;
3916 /* Activate shared area regions for the transaction */
3917 error
= sep_activate_msgarea_context(sep
, &msg_region
,
3920 goto end_function_error_clear_transact
;
3922 sep_dump_message(sep
);
3924 if (0 < call_hdr
.num_dcbs
) {
3925 error
= sep_activate_dcb_dmatables_context(sep
,
3930 goto end_function_error_clear_transact
;
3933 /* Send command to SEP */
3934 error
= sep_send_command_handler(sep
);
3936 goto end_function_error_clear_transact
;
3938 /* Store DMA context for the transaction */
3939 private_data
->dma_ctx
= dma_ctx
;
3940 /* Update call status */
3941 set_bit(SEP_FASTCALL_WRITE_DONE_OFFSET
, &call_status
->status
);
3944 up(&sep
->sep_doublebuf
);
3945 dev_dbg(&sep
->pdev
->dev
, "[PID%d] double buffering region end\n",
3950 end_function_error_clear_transact
:
3951 sep_end_transaction_handler(sep
, &dma_ctx
, call_status
,
3952 &private_data
->my_queue_elem
);
3954 end_function_error_doublebuf
:
3955 up(&sep
->sep_doublebuf
);
3956 dev_dbg(&sep
->pdev
->dev
, "[PID%d] double buffering region end\n",
3961 sep_free_dma_table_data_handler(sep
, &dma_ctx
);
3965 kfree(dmatables_region
);
3971 * sep_seek - Handler for seek system call
3972 * @filp: File pointer
3973 * @offset: File offset
3974 * @origin: Options for offset
3976 * Fastcall interface does not support seeking, all reads
3977 * and writes are from/to offset zero
3979 static loff_t
sep_seek(struct file
*filp
, loff_t offset
, int origin
)
3987 * sep_file_operations - file operation on sep device
3988 * @sep_ioctl: ioctl handler from user space call
3989 * @sep_poll: poll handler
3990 * @sep_open: handles sep device open request
3991 * @sep_release:handles sep device release request
3992 * @sep_mmap: handles memory mapping requests
3993 * @sep_read: handles read request on sep device
3994 * @sep_write: handles write request on sep device
3995 * @sep_seek: handles seek request on sep device
3997 static const struct file_operations sep_file_operations
= {
3998 .owner
= THIS_MODULE
,
3999 .unlocked_ioctl
= sep_ioctl
,
4002 .release
= sep_release
,
4010 * sep_sysfs_read - read sysfs entry per gives arguments
4011 * @filp: file pointer
4012 * @kobj: kobject pointer
4013 * @attr: binary file attributes
4014 * @buf: read to this buffer
4015 * @pos: offset to read
4016 * @count: amount of data to read
4018 * This function is to read sysfs entries for sep driver per given arguments.
4021 sep_sysfs_read(struct file
*filp
, struct kobject
*kobj
,
4022 struct bin_attribute
*attr
,
4023 char *buf
, loff_t pos
, size_t count
)
4025 unsigned long lck_flags
;
4026 size_t nleft
= count
;
4027 struct sep_device
*sep
= sep_dev
;
4028 struct sep_queue_info
*queue_elem
= NULL
;
4032 spin_lock_irqsave(&sep
->sep_queue_lock
, lck_flags
);
4034 queue_num
= sep
->sep_queue_num
;
4035 if (queue_num
> SEP_DOUBLEBUF_USERS_LIMIT
)
4036 queue_num
= SEP_DOUBLEBUF_USERS_LIMIT
;
4039 if (count
< sizeof(queue_num
)
4040 + (queue_num
* sizeof(struct sep_queue_data
))) {
4041 spin_unlock_irqrestore(&sep
->sep_queue_lock
, lck_flags
);
4045 memcpy(buf
, &queue_num
, sizeof(queue_num
));
4046 buf
+= sizeof(queue_num
);
4047 nleft
-= sizeof(queue_num
);
4049 list_for_each_entry(queue_elem
, &sep
->sep_queue_status
, list
) {
4050 if (i
++ > queue_num
)
4053 memcpy(buf
, &queue_elem
->data
, sizeof(queue_elem
->data
));
4054 nleft
-= sizeof(queue_elem
->data
);
4055 buf
+= sizeof(queue_elem
->data
);
4057 spin_unlock_irqrestore(&sep
->sep_queue_lock
, lck_flags
);
4059 return count
- nleft
;
4063 * bin_attributes - defines attributes for queue_status
4064 * @attr: attributes (name & permissions)
4065 * @read: function pointer to read this file
4066 * @size: maxinum size of binary attribute
4068 static const struct bin_attribute queue_status
= {
4069 .attr
= {.name
= "queue_status", .mode
= 0444},
4070 .read
= sep_sysfs_read
,
4072 + (SEP_DOUBLEBUF_USERS_LIMIT
* sizeof(struct sep_queue_data
)),
4076 * sep_register_driver_with_fs - register misc devices
4077 * @sep: pointer to struct sep_device
4079 * This function registers the driver with the file system
4081 static int sep_register_driver_with_fs(struct sep_device
*sep
)
4085 sep
->miscdev_sep
.minor
= MISC_DYNAMIC_MINOR
;
4086 sep
->miscdev_sep
.name
= SEP_DEV_NAME
;
4087 sep
->miscdev_sep
.fops
= &sep_file_operations
;
4089 ret_val
= misc_register(&sep
->miscdev_sep
);
4091 dev_warn(&sep
->pdev
->dev
, "misc reg fails for SEP %x\n",
4096 ret_val
= device_create_bin_file(sep
->miscdev_sep
.this_device
,
4099 dev_warn(&sep
->pdev
->dev
, "sysfs attribute1 fails for SEP %x\n",
4109 *sep_probe - probe a matching PCI device
4111 *@ent: pci_device_id
4113 *Attempt to set up and configure a SEP device that has been
4114 *discovered by the PCI layer. Allocates all required resources.
4116 static int __devinit
sep_probe(struct pci_dev
*pdev
,
4117 const struct pci_device_id
*ent
)
4120 struct sep_device
*sep
= NULL
;
4122 if (sep_dev
!= NULL
) {
4123 dev_dbg(&pdev
->dev
, "only one SEP supported.\n");
4127 /* Enable the device */
4128 error
= pci_enable_device(pdev
);
4130 dev_warn(&pdev
->dev
, "error enabling pci device\n");
4134 /* Allocate the sep_device structure for this device */
4135 sep_dev
= kzalloc(sizeof(struct sep_device
), GFP_ATOMIC
);
4136 if (sep_dev
== NULL
) {
4137 dev_warn(&pdev
->dev
,
4138 "can't kmalloc the sep_device structure\n");
4140 goto end_function_disable_device
;
4144 * We're going to use another variable for actually
4145 * working with the device; this way, if we have
4146 * multiple devices in the future, it would be easier
4147 * to make appropriate changes
4151 sep
->pdev
= pci_dev_get(pdev
);
4153 init_waitqueue_head(&sep
->event_transactions
);
4154 init_waitqueue_head(&sep
->event_interrupt
);
4155 spin_lock_init(&sep
->snd_rply_lck
);
4156 spin_lock_init(&sep
->sep_queue_lock
);
4157 sema_init(&sep
->sep_doublebuf
, SEP_DOUBLEBUF_USERS_LIMIT
);
4159 INIT_LIST_HEAD(&sep
->sep_queue_status
);
4161 dev_dbg(&sep
->pdev
->dev
,
4162 "sep probe: PCI obtained, device being prepared\n");
4164 /* Set up our register area */
4165 sep
->reg_physical_addr
= pci_resource_start(sep
->pdev
, 0);
4166 if (!sep
->reg_physical_addr
) {
4167 dev_warn(&sep
->pdev
->dev
, "Error getting register start\n");
4169 goto end_function_free_sep_dev
;
4172 sep
->reg_physical_end
= pci_resource_end(sep
->pdev
, 0);
4173 if (!sep
->reg_physical_end
) {
4174 dev_warn(&sep
->pdev
->dev
, "Error getting register end\n");
4176 goto end_function_free_sep_dev
;
4179 sep
->reg_addr
= ioremap_nocache(sep
->reg_physical_addr
,
4180 (size_t)(sep
->reg_physical_end
- sep
->reg_physical_addr
+ 1));
4181 if (!sep
->reg_addr
) {
4182 dev_warn(&sep
->pdev
->dev
, "Error getting register virtual\n");
4184 goto end_function_free_sep_dev
;
4187 dev_dbg(&sep
->pdev
->dev
,
4188 "Register area start %llx end %llx virtual %p\n",
4189 (unsigned long long)sep
->reg_physical_addr
,
4190 (unsigned long long)sep
->reg_physical_end
,
4193 /* Allocate the shared area */
4194 sep
->shared_size
= SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES
+
4195 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES
+
4196 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES
+
4197 SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES
+
4198 SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES
;
4200 if (sep_map_and_alloc_shared_area(sep
)) {
4202 /* Allocation failed */
4203 goto end_function_error
;
4206 /* Clear ICR register */
4207 sep_write_reg(sep
, HW_HOST_ICR_REG_ADDR
, 0xFFFFFFFF);
4209 /* Set the IMR register - open only GPR 2 */
4210 sep_write_reg(sep
, HW_HOST_IMR_REG_ADDR
, (~(0x1 << 13)));
4212 /* Read send/receive counters from SEP */
4213 sep
->reply_ct
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR2_REG_ADDR
);
4214 sep
->reply_ct
&= 0x3FFFFFFF;
4215 sep
->send_ct
= sep
->reply_ct
;
4217 /* Get the interrupt line */
4218 error
= request_irq(pdev
->irq
, sep_inthandler
, IRQF_SHARED
,
4222 goto end_function_deallocate_sep_shared_area
;
4224 /* The new chip requires a shared area reconfigure */
4225 error
= sep_reconfig_shared_area(sep
);
4227 goto end_function_free_irq
;
4231 /* Finally magic up the device nodes */
4232 /* Register driver with the fs */
4233 error
= sep_register_driver_with_fs(sep
);
4236 dev_err(&sep
->pdev
->dev
, "error registering dev file\n");
4237 goto end_function_free_irq
;
4240 sep
->in_use
= 0; /* through touching the device */
4241 #ifdef SEP_ENABLE_RUNTIME_PM
4242 pm_runtime_put_noidle(&sep
->pdev
->dev
);
4243 pm_runtime_allow(&sep
->pdev
->dev
);
4244 pm_runtime_set_autosuspend_delay(&sep
->pdev
->dev
,
4246 pm_runtime_use_autosuspend(&sep
->pdev
->dev
);
4247 pm_runtime_mark_last_busy(&sep
->pdev
->dev
);
4248 sep
->power_save_setup
= 1;
4250 /* register kernel crypto driver */
4251 #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
4252 error
= sep_crypto_setup();
4254 dev_err(&sep
->pdev
->dev
, "crypto setup failed\n");
4255 goto end_function_free_irq
;
4260 end_function_free_irq
:
4261 free_irq(pdev
->irq
, sep
);
4263 end_function_deallocate_sep_shared_area
:
4264 /* De-allocate shared area */
4265 sep_unmap_and_free_shared_area(sep
);
4268 iounmap(sep
->reg_addr
);
4270 end_function_free_sep_dev
:
4271 pci_dev_put(sep_dev
->pdev
);
4275 end_function_disable_device
:
4276 pci_disable_device(pdev
);
4283 * sep_remove - handles removing device from pci subsystem
4284 * @pdev: pointer to pci device
4286 * This function will handle removing our sep device from pci subsystem on exit
4287 * or unloading this module. It should free up all used resources, and unmap if
4288 * any memory regions mapped.
4290 static void sep_remove(struct pci_dev
*pdev
)
4292 struct sep_device
*sep
= sep_dev
;
4294 /* Unregister from fs */
4295 misc_deregister(&sep
->miscdev_sep
);
4297 /* Unregister from kernel crypto */
4298 #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
4299 sep_crypto_takedown();
4302 free_irq(sep
->pdev
->irq
, sep
);
4304 /* Free the shared area */
4305 sep_unmap_and_free_shared_area(sep_dev
);
4306 iounmap(sep_dev
->reg_addr
);
4308 #ifdef SEP_ENABLE_RUNTIME_PM
4311 pm_runtime_forbid(&sep
->pdev
->dev
);
4312 pm_runtime_get_noresume(&sep
->pdev
->dev
);
4315 pci_dev_put(sep_dev
->pdev
);
4320 /* Initialize struct pci_device_id for our driver */
4321 static DEFINE_PCI_DEVICE_TABLE(sep_pci_id_tbl
) = {
4322 {PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x0826)},
4323 {PCI_DEVICE(PCI_VENDOR_ID_INTEL
, 0x08e9)},
4327 /* Export our pci_device_id structure to user space */
4328 MODULE_DEVICE_TABLE(pci
, sep_pci_id_tbl
);
4330 #ifdef SEP_ENABLE_RUNTIME_PM
4333 * sep_pm_resume - rsume routine while waking up from S3 state
4334 * @dev: pointer to sep device
4336 * This function is to be used to wake up sep driver while system awakes from S3
4337 * state i.e. suspend to ram. The RAM in intact.
4338 * Notes - revisit with more understanding of pm, ICR/IMR & counters.
4340 static int sep_pci_resume(struct device
*dev
)
4342 struct sep_device
*sep
= sep_dev
;
4344 dev_dbg(&sep
->pdev
->dev
, "pci resume called\n");
4346 if (sep
->power_state
== SEP_DRIVER_POWERON
)
4349 /* Clear ICR register */
4350 sep_write_reg(sep
, HW_HOST_ICR_REG_ADDR
, 0xFFFFFFFF);
4352 /* Set the IMR register - open only GPR 2 */
4353 sep_write_reg(sep
, HW_HOST_IMR_REG_ADDR
, (~(0x1 << 13)));
4355 /* Read send/receive counters from SEP */
4356 sep
->reply_ct
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR2_REG_ADDR
);
4357 sep
->reply_ct
&= 0x3FFFFFFF;
4358 sep
->send_ct
= sep
->reply_ct
;
4360 sep
->power_state
= SEP_DRIVER_POWERON
;
4366 * sep_pm_suspend - suspend routine while going to S3 state
4367 * @dev: pointer to sep device
4369 * This function is to be used to suspend sep driver while system goes to S3
4370 * state i.e. suspend to ram. The RAM in intact and ON during this suspend.
4371 * Notes - revisit with more understanding of pm, ICR/IMR
4373 static int sep_pci_suspend(struct device
*dev
)
4375 struct sep_device
*sep
= sep_dev
;
4377 dev_dbg(&sep
->pdev
->dev
, "pci suspend called\n");
4378 if (sep
->in_use
== 1)
4381 sep
->power_state
= SEP_DRIVER_POWEROFF
;
4383 /* Clear ICR register */
4384 sep_write_reg(sep
, HW_HOST_ICR_REG_ADDR
, 0xFFFFFFFF);
4386 /* Set the IMR to block all */
4387 sep_write_reg(sep
, HW_HOST_IMR_REG_ADDR
, 0xFFFFFFFF);
4393 * sep_pm_runtime_resume - runtime resume routine
4394 * @dev: pointer to sep device
4396 * Notes - revisit with more understanding of pm, ICR/IMR & counters
4398 static int sep_pm_runtime_resume(struct device
*dev
)
4403 struct sep_device
*sep
= sep_dev
;
4405 dev_dbg(&sep
->pdev
->dev
, "pm runtime resume called\n");
4408 * Wait until the SCU boot is ready
4409 * This is done by iterating SCU_DELAY_ITERATION (10
4410 * microseconds each) up to SCU_DELAY_MAX (50) times.
4411 * This bit can be set in a random time that is less
4412 * than 500 microseconds after each power resume
4416 while ((!retval2
) && (delay_count
< SCU_DELAY_MAX
)) {
4417 retval2
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR3_REG_ADDR
);
4418 retval2
&= 0x00000008;
4420 udelay(SCU_DELAY_ITERATION
);
4426 dev_warn(&sep
->pdev
->dev
, "scu boot bit not set at resume\n");
4430 /* Clear ICR register */
4431 sep_write_reg(sep
, HW_HOST_ICR_REG_ADDR
, 0xFFFFFFFF);
4433 /* Set the IMR register - open only GPR 2 */
4434 sep_write_reg(sep
, HW_HOST_IMR_REG_ADDR
, (~(0x1 << 13)));
4436 /* Read send/receive counters from SEP */
4437 sep
->reply_ct
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR2_REG_ADDR
);
4438 sep
->reply_ct
&= 0x3FFFFFFF;
4439 sep
->send_ct
= sep
->reply_ct
;
4445 * sep_pm_runtime_suspend - runtime suspend routine
4446 * @dev: pointer to sep device
4448 * Notes - revisit with more understanding of pm
4450 static int sep_pm_runtime_suspend(struct device
*dev
)
4452 struct sep_device
*sep
= sep_dev
;
4454 dev_dbg(&sep
->pdev
->dev
, "pm runtime suspend called\n");
4456 /* Clear ICR register */
4457 sep_write_reg(sep
, HW_HOST_ICR_REG_ADDR
, 0xFFFFFFFF);
4462 * sep_pm - power management for sep driver
4463 * @sep_pm_runtime_resume: resume- no communication with cpu & main memory
4464 * @sep_pm_runtime_suspend: suspend- no communication with cpu & main memory
4465 * @sep_pci_suspend: suspend - main memory is still ON
4466 * @sep_pci_resume: resume - main memory is still ON
4468 static const struct dev_pm_ops sep_pm
= {
4469 .runtime_resume
= sep_pm_runtime_resume
,
4470 .runtime_suspend
= sep_pm_runtime_suspend
,
4471 .resume
= sep_pci_resume
,
4472 .suspend
= sep_pci_suspend
,
4474 #endif /* SEP_ENABLE_RUNTIME_PM */
4477 * sep_pci_driver - registers this device with pci subsystem
4478 * @name: name identifier for this driver
4479 * @sep_pci_id_tbl: pointer to struct pci_device_id table
4480 * @sep_probe: pointer to probe function in PCI driver
4481 * @sep_remove: pointer to remove function in PCI driver
4483 static struct pci_driver sep_pci_driver
= {
4484 #ifdef SEP_ENABLE_RUNTIME_PM
4489 .name
= "sep_sec_driver",
4490 .id_table
= sep_pci_id_tbl
,
4492 .remove
= sep_remove
4496 * sep_init - init function
4498 * Module load time. Register the PCI device driver.
4501 static int __init
sep_init(void)
4503 return pci_register_driver(&sep_pci_driver
);
4508 * sep_exit - called to unload driver
4510 * Unregister the driver The device will perform all the cleanup required.
4512 static void __exit
sep_exit(void)
4514 pci_unregister_driver(&sep_pci_driver
);
4518 module_init(sep_init
);
4519 module_exit(sep_exit
);
4521 MODULE_LICENSE("GPL");