be8594ecc619ddb595d4300328409a530f21c210
[deliverable/linux.git] / drivers / staging / sep / sep_driver.c
1 /*
2 *
3 * sep_driver.c - Security Processor Driver main group of functions
4 *
5 * Copyright(c) 2009,2010 Intel Corporation. All rights reserved.
6 * Contributions(c) 2009,2010 Discretix. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 *
21 * CONTACTS:
22 *
23 * Mark Allyn mark.a.allyn@intel.com
24 * Jayant Mangalampalli jayant.mangalampalli@intel.com
25 *
26 * CHANGES:
27 *
28 * 2009.06.26 Initial publish
29 * 2010.09.14 Upgrade to Medfield
30 *
31 */
32 #include <linux/init.h>
33 #include <linux/module.h>
34 #include <linux/miscdevice.h>
35 #include <linux/fs.h>
36 #include <linux/cdev.h>
37 #include <linux/kdev_t.h>
38 #include <linux/mutex.h>
39 #include <linux/sched.h>
40 #include <linux/mm.h>
41 #include <linux/poll.h>
42 #include <linux/wait.h>
43 #include <linux/pci.h>
44 #include <linux/firmware.h>
45 #include <linux/slab.h>
46 #include <linux/ioctl.h>
47 #include <asm/current.h>
48 #include <linux/ioport.h>
49 #include <linux/io.h>
50 #include <linux/interrupt.h>
51 #include <linux/pagemap.h>
52 #include <asm/cacheflush.h>
53 #include <linux/sched.h>
54 #include <linux/delay.h>
55 #include <linux/jiffies.h>
56 #include <linux/rar_register.h>
57
58 #include "sep_driver_hw_defs.h"
59 #include "sep_driver_config.h"
60 #include "sep_driver_api.h"
61 #include "sep_dev.h"
62
63 /*----------------------------------------
64 DEFINES
65 -----------------------------------------*/
66
67 #define SEP_RAR_IO_MEM_REGION_SIZE 0x40000
68
69 /*--------------------------------------------
70 GLOBAL variables
71 --------------------------------------------*/
72
73 /* Keep this a single static object for now to keep the conversion easy */
74
75 static struct sep_device *sep_dev;
76
77 /**
78 * sep_dump_message - dump the message that is pending
79 * @sep: SEP device
80 */
81 static void sep_dump_message(struct sep_device *sep)
82 {
83 int count;
84 u32 *p = sep->shared_addr;
85 for (count = 0; count < 12 * 4; count += 4)
86 dev_dbg(&sep->pdev->dev, "Word %d of the message is %x\n",
87 count, *p++);
88 }
89
90 /**
91 * sep_map_and_alloc_shared_area - allocate shared block
92 * @sep: security processor
93 * @size: size of shared area
94 */
95 static int sep_map_and_alloc_shared_area(struct sep_device *sep)
96 {
97 sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev,
98 sep->shared_size,
99 &sep->shared_bus, GFP_KERNEL);
100
101 if (!sep->shared_addr) {
102 dev_warn(&sep->pdev->dev,
103 "shared memory dma_alloc_coherent failed\n");
104 return -ENOMEM;
105 }
106 dev_dbg(&sep->pdev->dev,
107 "shared_addr %zx bytes @%p (bus %llx)\n",
108 sep->shared_size, sep->shared_addr,
109 (unsigned long long)sep->shared_bus);
110 return 0;
111 }
112
113 /**
114 * sep_unmap_and_free_shared_area - free shared block
115 * @sep: security processor
116 */
117 static void sep_unmap_and_free_shared_area(struct sep_device *sep)
118 {
119 dma_free_coherent(&sep->pdev->dev, sep->shared_size,
120 sep->shared_addr, sep->shared_bus);
121 }
122
123 /**
124 * sep_shared_bus_to_virt - convert bus/virt addresses
125 * @sep: pointer to struct sep_device
126 * @bus_address: address to convert
127 *
128 * Returns virtual address inside the shared area according
129 * to the bus address.
130 */
131 static void *sep_shared_bus_to_virt(struct sep_device *sep,
132 dma_addr_t bus_address)
133 {
134 return sep->shared_addr + (bus_address - sep->shared_bus);
135 }
136
137 /**
138 * open function for the singleton driver
139 * @inode_ptr struct inode *
140 * @file_ptr struct file *
141 *
142 * Called when the user opens the singleton device interface
143 */
144 static int sep_singleton_open(struct inode *inode_ptr, struct file *file_ptr)
145 {
146 struct sep_device *sep;
147
148 /*
149 * Get the SEP device structure and use it for the
150 * private_data field in filp for other methods
151 */
152 sep = sep_dev;
153
154 file_ptr->private_data = sep;
155
156 if (test_and_set_bit(0, &sep->singleton_access_flag))
157 return -EBUSY;
158 return 0;
159 }
160
161 /**
162 * sep_open - device open method
163 * @inode: inode of SEP device
164 * @filp: file handle to SEP device
165 *
166 * Open method for the SEP device. Called when userspace opens
167 * the SEP device node.
168 *
169 * Returns zero on success otherwise an error code.
170 */
171 static int sep_open(struct inode *inode, struct file *filp)
172 {
173 struct sep_device *sep;
174
175 /*
176 * Get the SEP device structure and use it for the
177 * private_data field in filp for other methods
178 */
179 sep = sep_dev;
180 filp->private_data = sep;
181
182 /* Anyone can open; locking takes place at transaction level */
183 return 0;
184 }
185
186 /**
187 * sep_singleton_release - close a SEP singleton device
188 * @inode: inode of SEP device
189 * @filp: file handle being closed
190 *
191 * Called on the final close of a SEP device. As the open protects against
192 * multiple simultaenous opens that means this method is called when the
193 * final reference to the open handle is dropped.
194 */
195 static int sep_singleton_release(struct inode *inode, struct file *filp)
196 {
197 struct sep_device *sep = filp->private_data;
198
199 clear_bit(0, &sep->singleton_access_flag);
200 return 0;
201 }
202
203 /**
204 * sep_request_daemonopen - request daemon open method
205 * @inode: inode of SEP device
206 * @filp: file handle to SEP device
207 *
208 * Open method for the SEP request daemon. Called when
209 * request daemon in userspace opens the SEP device node.
210 *
211 * Returns zero on success otherwise an error code.
212 */
213 static int sep_request_daemon_open(struct inode *inode, struct file *filp)
214 {
215 struct sep_device *sep = sep_dev;
216 int error = 0;
217
218 filp->private_data = sep;
219
220 /* There is supposed to be only one request daemon */
221 if (test_and_set_bit(0, &sep->request_daemon_open))
222 error = -EBUSY;
223 return error;
224 }
225
226 /**
227 * sep_request_daemon_release - close a SEP daemon
228 * @inode: inode of SEP device
229 * @filp: file handle being closed
230 *
231 * Called on the final close of a SEP daemon.
232 */
233 static int sep_request_daemon_release(struct inode *inode, struct file *filp)
234 {
235 struct sep_device *sep = filp->private_data;
236
237 dev_dbg(&sep->pdev->dev, "Request daemon release for pid %d\n",
238 current->pid);
239
240 /* Clear the request_daemon_open flag */
241 clear_bit(0, &sep->request_daemon_open);
242 return 0;
243 }
244
245 /**
246 * sep_req_daemon_send_reply_command_handler - poke the SEP
247 * @sep: struct sep_device *
248 *
249 * This function raises interrupt to SEPm that signals that is has a
250 * new command from HOST
251 */
252 static int sep_req_daemon_send_reply_command_handler(struct sep_device *sep)
253 {
254 unsigned long lck_flags;
255
256 sep_dump_message(sep);
257
258 /* Counters are lockable region */
259 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
260 sep->send_ct++;
261 sep->reply_ct++;
262
263 /* Send the interrupt to SEP */
264 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep->send_ct);
265 sep->send_ct++;
266
267 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
268
269 dev_dbg(&sep->pdev->dev,
270 "sep_req_daemon_send_reply send_ct %lx reply_ct %lx\n",
271 sep->send_ct, sep->reply_ct);
272
273 return 0;
274 }
275
276
277 /**
278 * sep_free_dma_table_data_handler - free DMA table
279 * @sep: pointere to struct sep_device
280 *
281 * Handles the request to free DMA table for synchronic actions
282 */
283 static int sep_free_dma_table_data_handler(struct sep_device *sep)
284 {
285 int count;
286 int dcb_counter;
287 /* Pointer to the current dma_resource struct */
288 struct sep_dma_resource *dma;
289
290 for (dcb_counter = 0; dcb_counter < sep->nr_dcb_creat; dcb_counter++) {
291 dma = &sep->dma_res_arr[dcb_counter];
292
293 /* Unmap and free input map array */
294 if (dma->in_map_array) {
295 for (count = 0; count < dma->in_num_pages; count++) {
296 dma_unmap_page(&sep->pdev->dev,
297 dma->in_map_array[count].dma_addr,
298 dma->in_map_array[count].size,
299 DMA_TO_DEVICE);
300 }
301 kfree(dma->in_map_array);
302 }
303
304 /* Unmap output map array, DON'T free it yet */
305 if (dma->out_map_array) {
306 for (count = 0; count < dma->out_num_pages; count++) {
307 dma_unmap_page(&sep->pdev->dev,
308 dma->out_map_array[count].dma_addr,
309 dma->out_map_array[count].size,
310 DMA_FROM_DEVICE);
311 }
312 kfree(dma->out_map_array);
313 }
314
315 /* Free page cache for output */
316 if (dma->in_page_array) {
317 for (count = 0; count < dma->in_num_pages; count++) {
318 flush_dcache_page(dma->in_page_array[count]);
319 page_cache_release(dma->in_page_array[count]);
320 }
321 kfree(dma->in_page_array);
322 }
323
324 if (dma->out_page_array) {
325 for (count = 0; count < dma->out_num_pages; count++) {
326 if (!PageReserved(dma->out_page_array[count]))
327 SetPageDirty(dma->out_page_array[count]);
328 flush_dcache_page(dma->out_page_array[count]);
329 page_cache_release(dma->out_page_array[count]);
330 }
331 kfree(dma->out_page_array);
332 }
333
334 /* Reset all the values */
335 dma->in_page_array = NULL;
336 dma->out_page_array = NULL;
337 dma->in_num_pages = 0;
338 dma->out_num_pages = 0;
339 dma->in_map_array = NULL;
340 dma->out_map_array = NULL;
341 dma->in_map_num_entries = 0;
342 dma->out_map_num_entries = 0;
343 }
344
345 sep->nr_dcb_creat = 0;
346 sep->num_lli_tables_created = 0;
347
348 return 0;
349 }
350
351 /**
352 * sep_request_daemon_mmap - maps the shared area to user space
353 * @filp: pointer to struct file
354 * @vma: pointer to vm_area_struct
355 *
356 * Called by the kernel when the daemon attempts an mmap() syscall
357 * using our handle.
358 */
359 static int sep_request_daemon_mmap(struct file *filp,
360 struct vm_area_struct *vma)
361 {
362 struct sep_device *sep = filp->private_data;
363 dma_addr_t bus_address;
364 int error = 0;
365
366 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
367 error = -EINVAL;
368 goto end_function;
369 }
370
371 /* Get physical address */
372 bus_address = sep->shared_bus;
373
374 if (remap_pfn_range(vma, vma->vm_start, bus_address >> PAGE_SHIFT,
375 vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
376
377 dev_warn(&sep->pdev->dev, "remap_page_range failed\n");
378 error = -EAGAIN;
379 goto end_function;
380 }
381
382 end_function:
383 return error;
384 }
385
386 /**
387 * sep_request_daemon_poll - poll implementation
388 * @sep: struct sep_device * for current SEP device
389 * @filp: struct file * for open file
390 * @wait: poll_table * for poll
391 *
392 * Called when our device is part of a poll() or select() syscall
393 */
394 static unsigned int sep_request_daemon_poll(struct file *filp,
395 poll_table *wait)
396 {
397 u32 mask = 0;
398 /* GPR2 register */
399 u32 retval2;
400 unsigned long lck_flags;
401 struct sep_device *sep = filp->private_data;
402
403 poll_wait(filp, &sep->event_request_daemon, wait);
404
405 dev_dbg(&sep->pdev->dev, "daemon poll: send_ct is %lx reply ct is %lx\n",
406 sep->send_ct, sep->reply_ct);
407
408 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
409 /* Check if the data is ready */
410 if (sep->send_ct == sep->reply_ct) {
411 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
412
413 retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
414 dev_dbg(&sep->pdev->dev,
415 "daemon poll: data check (GPR2) is %x\n", retval2);
416
417 /* Check if PRINT request */
418 if ((retval2 >> 30) & 0x1) {
419 dev_dbg(&sep->pdev->dev, "daemon poll: PRINTF request in\n");
420 mask |= POLLIN;
421 goto end_function;
422 }
423 /* Check if NVS request */
424 if (retval2 >> 31) {
425 dev_dbg(&sep->pdev->dev, "daemon poll: NVS request in\n");
426 mask |= POLLPRI | POLLWRNORM;
427 }
428 } else {
429 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
430 dev_dbg(&sep->pdev->dev,
431 "daemon poll: no reply received; returning 0\n");
432 mask = 0;
433 }
434 end_function:
435 return mask;
436 }
437
438 /**
439 * sep_release - close a SEP device
440 * @inode: inode of SEP device
441 * @filp: file handle being closed
442 *
443 * Called on the final close of a SEP device.
444 */
445 static int sep_release(struct inode *inode, struct file *filp)
446 {
447 struct sep_device *sep = filp->private_data;
448
449 dev_dbg(&sep->pdev->dev, "Release for pid %d\n", current->pid);
450
451 mutex_lock(&sep->sep_mutex);
452 /* Is this the process that has a transaction open?
453 * If so, lets reset pid_doing_transaction to 0 and
454 * clear the in use flags, and then wake up sep_event
455 * so that other processes can do transactions
456 */
457 if (sep->pid_doing_transaction == current->pid) {
458 clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
459 clear_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags);
460 sep_free_dma_table_data_handler(sep);
461 wake_up(&sep->event);
462 sep->pid_doing_transaction = 0;
463 }
464
465 mutex_unlock(&sep->sep_mutex);
466 return 0;
467 }
468
469 /**
470 * sep_mmap - maps the shared area to user space
471 * @filp: pointer to struct file
472 * @vma: pointer to vm_area_struct
473 *
474 * Called on an mmap of our space via the normal SEP device
475 */
476 static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
477 {
478 dma_addr_t bus_addr;
479 struct sep_device *sep = filp->private_data;
480 unsigned long error = 0;
481
482 /* Set the transaction busy (own the device) */
483 wait_event_interruptible(sep->event,
484 test_and_set_bit(SEP_MMAP_LOCK_BIT,
485 &sep->in_use_flags) == 0);
486
487 if (signal_pending(current)) {
488 error = -EINTR;
489 goto end_function_with_error;
490 }
491 /*
492 * The pid_doing_transaction indicates that this process
493 * now owns the facilities to performa a transaction with
494 * the SEP. While this process is performing a transaction,
495 * no other process who has the SEP device open can perform
496 * any transactions. This method allows more than one process
497 * to have the device open at any given time, which provides
498 * finer granularity for device utilization by multiple
499 * processes.
500 */
501 mutex_lock(&sep->sep_mutex);
502 sep->pid_doing_transaction = current->pid;
503 mutex_unlock(&sep->sep_mutex);
504
505 /* Zero the pools and the number of data pool alocation pointers */
506 sep->data_pool_bytes_allocated = 0;
507 sep->num_of_data_allocations = 0;
508
509 /*
510 * Check that the size of the mapped range is as the size of the message
511 * shared area
512 */
513 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
514 error = -EINVAL;
515 goto end_function_with_error;
516 }
517
518 dev_dbg(&sep->pdev->dev, "shared_addr is %p\n", sep->shared_addr);
519
520 /* Get bus address */
521 bus_addr = sep->shared_bus;
522
523 if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT,
524 vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
525 dev_warn(&sep->pdev->dev, "remap_page_range failed\n");
526 error = -EAGAIN;
527 goto end_function_with_error;
528 }
529 goto end_function;
530
531 end_function_with_error:
532 /* Clear the bit */
533 clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
534 mutex_lock(&sep->sep_mutex);
535 sep->pid_doing_transaction = 0;
536 mutex_unlock(&sep->sep_mutex);
537
538 /* Raise event for stuck contextes */
539
540 wake_up(&sep->event);
541
542 end_function:
543 return error;
544 }
545
546 /**
547 * sep_poll - poll handler
548 * @filp: pointer to struct file
549 * @wait: pointer to poll_table
550 *
551 * Called by the OS when the kernel is asked to do a poll on
552 * a SEP file handle.
553 */
554 static unsigned int sep_poll(struct file *filp, poll_table *wait)
555 {
556 u32 mask = 0;
557 u32 retval = 0;
558 u32 retval2 = 0;
559 unsigned long lck_flags;
560
561 struct sep_device *sep = filp->private_data;
562
563 /* Am I the process that owns the transaction? */
564 mutex_lock(&sep->sep_mutex);
565 if (current->pid != sep->pid_doing_transaction) {
566 dev_dbg(&sep->pdev->dev, "poll; wrong pid\n");
567 mask = POLLERR;
568 mutex_unlock(&sep->sep_mutex);
569 goto end_function;
570 }
571 mutex_unlock(&sep->sep_mutex);
572
573 /* Check if send command or send_reply were activated previously */
574 if (!test_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags)) {
575 mask = POLLERR;
576 goto end_function;
577 }
578
579 /* Add the event to the polling wait table */
580 dev_dbg(&sep->pdev->dev, "poll: calling wait sep_event\n");
581
582 poll_wait(filp, &sep->event, wait);
583
584 dev_dbg(&sep->pdev->dev, "poll: send_ct is %lx reply ct is %lx\n",
585 sep->send_ct, sep->reply_ct);
586
587 /* Check if error occurred during poll */
588 retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
589 if (retval2 != 0x0) {
590 dev_warn(&sep->pdev->dev, "poll; poll error %x\n", retval2);
591 mask |= POLLERR;
592 goto end_function;
593 }
594
595 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
596
597 if (sep->send_ct == sep->reply_ct) {
598 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
599 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
600 dev_dbg(&sep->pdev->dev, "poll: data ready check (GPR2) %x\n",
601 retval);
602
603 /* Check if printf request */
604 if ((retval >> 30) & 0x1) {
605 dev_dbg(&sep->pdev->dev, "poll: SEP printf request\n");
606 wake_up(&sep->event_request_daemon);
607 goto end_function;
608 }
609
610 /* Check if the this is SEP reply or request */
611 if (retval >> 31) {
612 dev_dbg(&sep->pdev->dev, "poll: SEP request\n");
613 wake_up(&sep->event_request_daemon);
614 } else {
615 dev_dbg(&sep->pdev->dev, "poll: normal return\n");
616 /* In case it is again by send_reply_comand */
617 clear_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags);
618 sep_dump_message(sep);
619 dev_dbg(&sep->pdev->dev,
620 "poll; SEP reply POLLIN | POLLRDNORM\n");
621 mask |= POLLIN | POLLRDNORM;
622 }
623 } else {
624 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
625 dev_dbg(&sep->pdev->dev,
626 "poll; no reply received; returning mask of 0\n");
627 mask = 0;
628 }
629
630 end_function:
631 return mask;
632 }
633
634 /**
635 * sep_time_address - address in SEP memory of time
636 * @sep: SEP device we want the address from
637 *
638 * Return the address of the two dwords in memory used for time
639 * setting.
640 */
641 static u32 *sep_time_address(struct sep_device *sep)
642 {
643 return sep->shared_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
644 }
645
646 /**
647 * sep_set_time - set the SEP time
648 * @sep: the SEP we are setting the time for
649 *
650 * Calculates time and sets it at the predefined address.
651 * Called with the SEP mutex held.
652 */
653 static unsigned long sep_set_time(struct sep_device *sep)
654 {
655 struct timeval time;
656 u32 *time_addr; /* Address of time as seen by the kernel */
657
658
659 do_gettimeofday(&time);
660
661 /* Set value in the SYSTEM MEMORY offset */
662 time_addr = sep_time_address(sep);
663
664 time_addr[0] = SEP_TIME_VAL_TOKEN;
665 time_addr[1] = time.tv_sec;
666
667 dev_dbg(&sep->pdev->dev, "time.tv_sec is %lu\n", time.tv_sec);
668 dev_dbg(&sep->pdev->dev, "time_addr is %p\n", time_addr);
669 dev_dbg(&sep->pdev->dev, "sep->shared_addr is %p\n", sep->shared_addr);
670
671 return time.tv_sec;
672 }
673
674 /**
675 * sep_set_caller_id_handler - insert caller id entry
676 * @sep: SEP device
677 * @arg: pointer to struct caller_id_struct
678 *
679 * Inserts the data into the caller id table. Note that this function
680 * falls under the ioctl lock
681 */
682 static int sep_set_caller_id_handler(struct sep_device *sep, unsigned long arg)
683 {
684 void __user *hash;
685 int error = 0;
686 int i;
687 struct caller_id_struct command_args;
688
689 for (i = 0; i < SEP_CALLER_ID_TABLE_NUM_ENTRIES; i++) {
690 if (sep->caller_id_table[i].pid == 0)
691 break;
692 }
693
694 if (i == SEP_CALLER_ID_TABLE_NUM_ENTRIES) {
695 dev_dbg(&sep->pdev->dev, "no more caller id entries left\n");
696 dev_dbg(&sep->pdev->dev, "maximum number is %d\n",
697 SEP_CALLER_ID_TABLE_NUM_ENTRIES);
698 error = -EUSERS;
699 goto end_function;
700 }
701
702 /* Copy the data */
703 if (copy_from_user(&command_args, (void __user *)arg,
704 sizeof(command_args))) {
705 error = -EFAULT;
706 goto end_function;
707 }
708
709 hash = (void __user *)(unsigned long)command_args.callerIdAddress;
710
711 if (!command_args.pid || !command_args.callerIdSizeInBytes) {
712 error = -EINVAL;
713 goto end_function;
714 }
715
716 dev_dbg(&sep->pdev->dev, "pid is %x\n", command_args.pid);
717 dev_dbg(&sep->pdev->dev, "callerIdSizeInBytes is %x\n",
718 command_args.callerIdSizeInBytes);
719
720 if (command_args.callerIdSizeInBytes >
721 SEP_CALLER_ID_HASH_SIZE_IN_BYTES) {
722 error = -EMSGSIZE;
723 goto end_function;
724 }
725
726 sep->caller_id_table[i].pid = command_args.pid;
727
728 if (copy_from_user(sep->caller_id_table[i].callerIdHash,
729 hash, command_args.callerIdSizeInBytes))
730 error = -EFAULT;
731 end_function:
732 return error;
733 }
734
735 /**
736 * sep_set_current_caller_id - set the caller id
737 * @sep: pointer to struct_sep_device
738 *
739 * Set the caller ID (if it exists) to the SEP. Note that this
740 * function falls under the ioctl lock
741 */
742 static int sep_set_current_caller_id(struct sep_device *sep)
743 {
744 int i;
745 u32 *hash_buf_ptr;
746
747 /* Zero the previous value */
748 memset(sep->shared_addr + SEP_CALLER_ID_OFFSET_BYTES,
749 0, SEP_CALLER_ID_HASH_SIZE_IN_BYTES);
750
751 for (i = 0; i < SEP_CALLER_ID_TABLE_NUM_ENTRIES; i++) {
752 if (sep->caller_id_table[i].pid == current->pid) {
753 dev_dbg(&sep->pdev->dev, "Caller Id found\n");
754
755 memcpy(sep->shared_addr + SEP_CALLER_ID_OFFSET_BYTES,
756 (void *)(sep->caller_id_table[i].callerIdHash),
757 SEP_CALLER_ID_HASH_SIZE_IN_BYTES);
758 break;
759 }
760 }
761 /* Ensure data is in little endian */
762 hash_buf_ptr = (u32 *)sep->shared_addr +
763 SEP_CALLER_ID_OFFSET_BYTES;
764
765 for (i = 0; i < SEP_CALLER_ID_HASH_SIZE_IN_WORDS; i++)
766 hash_buf_ptr[i] = cpu_to_le32(hash_buf_ptr[i]);
767
768 return 0;
769 }
770
771 /**
772 * sep_send_command_handler - kick off a command
773 * @sep: SEP being signalled
774 *
775 * This function raises interrupt to SEP that signals that is has a new
776 * command from the host
777 *
778 * Note that this function does fall under the ioctl lock
779 */
780 static int sep_send_command_handler(struct sep_device *sep)
781 {
782 unsigned long lck_flags;
783 int error = 0;
784
785 if (test_and_set_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags)) {
786 error = -EPROTO;
787 goto end_function;
788 }
789 sep_set_time(sep);
790
791 sep_set_current_caller_id(sep);
792
793 sep_dump_message(sep);
794
795 /* Update counter */
796 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
797 sep->send_ct++;
798 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
799
800 dev_dbg(&sep->pdev->dev,
801 "sep_send_command_handler send_ct %lx reply_ct %lx\n",
802 sep->send_ct, sep->reply_ct);
803
804 /* Send interrupt to SEP */
805 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
806
807 end_function:
808 return error;
809 }
810
811 /**
812 * sep_allocate_data_pool_memory_handler -allocate pool memory
813 * @sep: pointer to struct sep_device
814 * @arg: pointer to struct alloc_struct
815 *
816 * This function handles the allocate data pool memory request
817 * This function returns calculates the bus address of the
818 * allocated memory, and the offset of this area from the mapped address.
819 * Therefore, the FVOs in user space can calculate the exact virtual
820 * address of this allocated memory
821 */
822 static int sep_allocate_data_pool_memory_handler(struct sep_device *sep,
823 unsigned long arg)
824 {
825 int error = 0;
826 struct alloc_struct command_args;
827
828 /* Holds the allocated buffer address in the system memory pool */
829 u32 *token_addr;
830
831 if (copy_from_user(&command_args, (void __user *)arg,
832 sizeof(struct alloc_struct))) {
833 error = -EFAULT;
834 goto end_function;
835 }
836
837 /* Allocate memory */
838 if ((sep->data_pool_bytes_allocated + command_args.num_bytes) >
839 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
840 error = -ENOMEM;
841 goto end_function;
842 }
843
844 dev_dbg(&sep->pdev->dev,
845 "data pool bytes_allocated: %x\n", (int)sep->data_pool_bytes_allocated);
846 dev_dbg(&sep->pdev->dev,
847 "offset: %x\n", SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES);
848 /* Set the virtual and bus address */
849 command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES +
850 sep->data_pool_bytes_allocated;
851
852 /* Place in the shared area that is known by the SEP */
853 token_addr = (u32 *)(sep->shared_addr +
854 SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES +
855 (sep->num_of_data_allocations)*2*sizeof(u32));
856
857 token_addr[0] = SEP_DATA_POOL_POINTERS_VAL_TOKEN;
858 token_addr[1] = (u32)sep->shared_bus +
859 SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES +
860 sep->data_pool_bytes_allocated;
861
862 /* Write the memory back to the user space */
863 error = copy_to_user((void *)arg, (void *)&command_args,
864 sizeof(struct alloc_struct));
865 if (error) {
866 error = -EFAULT;
867 goto end_function;
868 }
869
870 /* Update the allocation */
871 sep->data_pool_bytes_allocated += command_args.num_bytes;
872 sep->num_of_data_allocations += 1;
873
874 end_function:
875 return error;
876 }
877
878 /**
879 * sep_lock_kernel_pages - map kernel pages for DMA
880 * @sep: pointer to struct sep_device
881 * @kernel_virt_addr: address of data buffer in kernel
882 * @data_size: size of data
883 * @lli_array_ptr: lli array
884 * @in_out_flag: input into device or output from device
885 *
886 * This function locks all the physical pages of the kernel virtual buffer
887 * and construct a basic lli array, where each entry holds the physical
888 * page address and the size that application data holds in this page
889 * This function is used only during kernel crypto mod calls from within
890 * the kernel (when ioctl is not used)
891 */
892 static int sep_lock_kernel_pages(struct sep_device *sep,
893 unsigned long kernel_virt_addr,
894 u32 data_size,
895 struct sep_lli_entry **lli_array_ptr,
896 int in_out_flag)
897
898 {
899 int error = 0;
900 /* Array of lli */
901 struct sep_lli_entry *lli_array;
902 /* Map array */
903 struct sep_dma_map *map_array;
904
905 dev_dbg(&sep->pdev->dev, "lock kernel pages kernel_virt_addr is %08lx\n",
906 (unsigned long)kernel_virt_addr);
907 dev_dbg(&sep->pdev->dev, "data_size is %x\n", data_size);
908
909 lli_array = kmalloc(sizeof(struct sep_lli_entry), GFP_ATOMIC);
910 if (!lli_array) {
911 error = -ENOMEM;
912 goto end_function;
913 }
914 map_array = kmalloc(sizeof(struct sep_dma_map), GFP_ATOMIC);
915 if (!map_array) {
916 error = -ENOMEM;
917 goto end_function_with_error;
918 }
919
920 map_array[0].dma_addr =
921 dma_map_single(&sep->pdev->dev, (void *)kernel_virt_addr,
922 data_size, DMA_BIDIRECTIONAL);
923 map_array[0].size = data_size;
924
925
926 /*
927 * Set the start address of the first page - app data may start not at
928 * the beginning of the page
929 */
930 lli_array[0].bus_address = (u32)map_array[0].dma_addr;
931 lli_array[0].block_size = map_array[0].size;
932
933 dev_dbg(&sep->pdev->dev,
934 "lli_array[0].bus_address is %08lx, lli_array[0].block_size is %x\n",
935 (unsigned long)lli_array[0].bus_address,
936 lli_array[0].block_size);
937
938 /* Set the output parameters */
939 if (in_out_flag == SEP_DRIVER_IN_FLAG) {
940 *lli_array_ptr = lli_array;
941 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = 1;
942 sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL;
943 sep->dma_res_arr[sep->nr_dcb_creat].in_map_array = map_array;
944 sep->dma_res_arr[sep->nr_dcb_creat].in_map_num_entries = 1;
945 } else {
946 *lli_array_ptr = lli_array;
947 sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages = 1;
948 sep->dma_res_arr[sep->nr_dcb_creat].out_page_array = NULL;
949 sep->dma_res_arr[sep->nr_dcb_creat].out_map_array = map_array;
950 sep->dma_res_arr[sep->nr_dcb_creat].out_map_num_entries = 1;
951 }
952 goto end_function;
953
954 end_function_with_error:
955 kfree(lli_array);
956
957 end_function:
958 return error;
959 }
960
961 /**
962 * sep_lock_user_pages - lock and map user pages for DMA
963 * @sep: pointer to struct sep_device
964 * @app_virt_addr: user memory data buffer
965 * @data_size: size of data buffer
966 * @lli_array_ptr: lli array
967 * @in_out_flag: input or output to device
968 *
969 * This function locks all the physical pages of the application
970 * virtual buffer and construct a basic lli array, where each entry
971 * holds the physical page address and the size that application
972 * data holds in this physical pages
973 */
974 static int sep_lock_user_pages(struct sep_device *sep,
975 u32 app_virt_addr,
976 u32 data_size,
977 struct sep_lli_entry **lli_array_ptr,
978 int in_out_flag)
979
980 {
981 int error = 0;
982 u32 count;
983 int result;
984 /* The the page of the end address of the user space buffer */
985 u32 end_page;
986 /* The page of the start address of the user space buffer */
987 u32 start_page;
988 /* The range in pages */
989 u32 num_pages;
990 /* Array of pointers to page */
991 struct page **page_array;
992 /* Array of lli */
993 struct sep_lli_entry *lli_array;
994 /* Map array */
995 struct sep_dma_map *map_array;
996 /* Direction of the DMA mapping for locked pages */
997 enum dma_data_direction dir;
998
999 /* Set start and end pages and num pages */
1000 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1001 start_page = app_virt_addr >> PAGE_SHIFT;
1002 num_pages = end_page - start_page + 1;
1003
1004 dev_dbg(&sep->pdev->dev, "lock user pages app_virt_addr is %x\n", app_virt_addr);
1005 dev_dbg(&sep->pdev->dev, "data_size is %x\n", data_size);
1006 dev_dbg(&sep->pdev->dev, "start_page is %x\n", start_page);
1007 dev_dbg(&sep->pdev->dev, "end_page is %x\n", end_page);
1008 dev_dbg(&sep->pdev->dev, "num_pages is %x\n", num_pages);
1009
1010 /* Allocate array of pages structure pointers */
1011 page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
1012 if (!page_array) {
1013 error = -ENOMEM;
1014 goto end_function;
1015 }
1016 map_array = kmalloc(sizeof(struct sep_dma_map) * num_pages, GFP_ATOMIC);
1017 if (!map_array) {
1018 dev_warn(&sep->pdev->dev, "kmalloc for map_array failed\n");
1019 error = -ENOMEM;
1020 goto end_function_with_error1;
1021 }
1022
1023 lli_array = kmalloc(sizeof(struct sep_lli_entry) * num_pages,
1024 GFP_ATOMIC);
1025
1026 if (!lli_array) {
1027 dev_warn(&sep->pdev->dev, "kmalloc for lli_array failed\n");
1028 error = -ENOMEM;
1029 goto end_function_with_error2;
1030 }
1031
1032 /* Convert the application virtual address into a set of physical */
1033 down_read(&current->mm->mmap_sem);
1034 result = get_user_pages(current, current->mm, app_virt_addr,
1035 num_pages,
1036 ((in_out_flag == SEP_DRIVER_IN_FLAG) ? 0 : 1),
1037 0, page_array, NULL);
1038
1039 up_read(&current->mm->mmap_sem);
1040
1041 /* Check the number of pages locked - if not all then exit with error */
1042 if (result != num_pages) {
1043 dev_warn(&sep->pdev->dev,
1044 "not all pages locked by get_user_pages\n");
1045 error = -ENOMEM;
1046 goto end_function_with_error3;
1047 }
1048
1049 dev_dbg(&sep->pdev->dev, "get_user_pages succeeded\n");
1050
1051 /* Set direction */
1052 if (in_out_flag == SEP_DRIVER_IN_FLAG)
1053 dir = DMA_TO_DEVICE;
1054 else
1055 dir = DMA_FROM_DEVICE;
1056
1057 /*
1058 * Fill the array using page array data and
1059 * map the pages - this action will also flush the cache as needed
1060 */
1061 for (count = 0; count < num_pages; count++) {
1062 /* Fill the map array */
1063 map_array[count].dma_addr =
1064 dma_map_page(&sep->pdev->dev, page_array[count],
1065 0, PAGE_SIZE, /*dir*/DMA_BIDIRECTIONAL);
1066
1067 map_array[count].size = PAGE_SIZE;
1068
1069 /* Fill the lli array entry */
1070 lli_array[count].bus_address = (u32)map_array[count].dma_addr;
1071 lli_array[count].block_size = PAGE_SIZE;
1072
1073 dev_warn(&sep->pdev->dev, "lli_array[%x].bus_address is %08lx, lli_array[%x].block_size is %x\n",
1074 count, (unsigned long)lli_array[count].bus_address,
1075 count, lli_array[count].block_size);
1076 }
1077
1078 /* Check the offset for the first page */
1079 lli_array[0].bus_address =
1080 lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
1081
1082 /* Check that not all the data is in the first page only */
1083 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1084 lli_array[0].block_size = data_size;
1085 else
1086 lli_array[0].block_size =
1087 PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1088
1089 dev_dbg(&sep->pdev->dev,
1090 "lli_array[0].bus_address is %08lx, lli_array[0].block_size is %x\n",
1091 (unsigned long)lli_array[count].bus_address,
1092 lli_array[count].block_size);
1093
1094 /* Check the size of the last page */
1095 if (num_pages > 1) {
1096 lli_array[num_pages - 1].block_size =
1097 (app_virt_addr + data_size) & (~PAGE_MASK);
1098 if (lli_array[num_pages - 1].block_size == 0)
1099 lli_array[num_pages - 1].block_size = PAGE_SIZE;
1100
1101 dev_warn(&sep->pdev->dev,
1102 "lli_array[%x].bus_address is "
1103 "%08lx, lli_array[%x].block_size is %x\n",
1104 num_pages - 1,
1105 (unsigned long)lli_array[num_pages - 1].bus_address,
1106 num_pages - 1,
1107 lli_array[num_pages - 1].block_size);
1108 }
1109
1110 /* Set output params according to the in_out flag */
1111 if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1112 *lli_array_ptr = lli_array;
1113 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = num_pages;
1114 sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = page_array;
1115 sep->dma_res_arr[sep->nr_dcb_creat].in_map_array = map_array;
1116 sep->dma_res_arr[sep->nr_dcb_creat].in_map_num_entries =
1117 num_pages;
1118 } else {
1119 *lli_array_ptr = lli_array;
1120 sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages = num_pages;
1121 sep->dma_res_arr[sep->nr_dcb_creat].out_page_array =
1122 page_array;
1123 sep->dma_res_arr[sep->nr_dcb_creat].out_map_array = map_array;
1124 sep->dma_res_arr[sep->nr_dcb_creat].out_map_num_entries =
1125 num_pages;
1126 }
1127 goto end_function;
1128
1129 end_function_with_error3:
1130 /* Free lli array */
1131 kfree(lli_array);
1132
1133 end_function_with_error2:
1134 kfree(map_array);
1135
1136 end_function_with_error1:
1137 /* Free page array */
1138 kfree(page_array);
1139
1140 end_function:
1141 return error;
1142 }
1143
1144 /**
1145 * u32 sep_calculate_lli_table_max_size - size the LLI table
1146 * @sep: pointer to struct sep_device
1147 * @lli_in_array_ptr
1148 * @num_array_entries
1149 * @last_table_flag
1150 *
1151 * This function calculates the size of data that can be inserted into
1152 * the lli table from this array, such that either the table is full
1153 * (all entries are entered), or there are no more entries in the
1154 * lli array
1155 */
1156 static u32 sep_calculate_lli_table_max_size(struct sep_device *sep,
1157 struct sep_lli_entry *lli_in_array_ptr,
1158 u32 num_array_entries,
1159 u32 *last_table_flag)
1160 {
1161 u32 counter;
1162 /* Table data size */
1163 u32 table_data_size = 0;
1164 /* Data size for the next table */
1165 u32 next_table_data_size;
1166
1167 *last_table_flag = 0;
1168
1169 /*
1170 * Calculate the data in the out lli table till we fill the whole
1171 * table or till the data has ended
1172 */
1173 for (counter = 0;
1174 (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) &&
1175 (counter < num_array_entries); counter++)
1176 table_data_size += lli_in_array_ptr[counter].block_size;
1177
1178 /*
1179 * Check if we reached the last entry,
1180 * meaning this ia the last table to build,
1181 * and no need to check the block alignment
1182 */
1183 if (counter == num_array_entries) {
1184 /* Set the last table flag */
1185 *last_table_flag = 1;
1186 goto end_function;
1187 }
1188
1189 /*
1190 * Calculate the data size of the next table.
1191 * Stop if no entries left or if data size is more the DMA restriction
1192 */
1193 next_table_data_size = 0;
1194 for (; counter < num_array_entries; counter++) {
1195 next_table_data_size += lli_in_array_ptr[counter].block_size;
1196 if (next_table_data_size >= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1197 break;
1198 }
1199
1200 /*
1201 * Check if the next table data size is less then DMA rstriction.
1202 * if it is - recalculate the current table size, so that the next
1203 * table data size will be adaquete for DMA
1204 */
1205 if (next_table_data_size &&
1206 next_table_data_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1207
1208 table_data_size -= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE -
1209 next_table_data_size);
1210
1211 end_function:
1212 return table_data_size;
1213 }
1214
1215 /**
1216 * sep_build_lli_table - build an lli array for the given table
1217 * @sep: pointer to struct sep_device
1218 * @lli_array_ptr: pointer to lli array
1219 * @lli_table_ptr: pointer to lli table
1220 * @num_processed_entries_ptr: pointer to number of entries
1221 * @num_table_entries_ptr: pointer to number of tables
1222 * @table_data_size: total data size
1223 *
1224 * Builds ant lli table from the lli_array according to
1225 * the given size of data
1226 */
1227 static void sep_build_lli_table(struct sep_device *sep,
1228 struct sep_lli_entry *lli_array_ptr,
1229 struct sep_lli_entry *lli_table_ptr,
1230 u32 *num_processed_entries_ptr,
1231 u32 *num_table_entries_ptr,
1232 u32 table_data_size)
1233 {
1234 /* Current table data size */
1235 u32 curr_table_data_size;
1236 /* Counter of lli array entry */
1237 u32 array_counter;
1238
1239 /* Init currrent table data size and lli array entry counter */
1240 curr_table_data_size = 0;
1241 array_counter = 0;
1242 *num_table_entries_ptr = 1;
1243
1244 dev_dbg(&sep->pdev->dev, "build lli table table_data_size is %x\n", table_data_size);
1245
1246 /* Fill the table till table size reaches the needed amount */
1247 while (curr_table_data_size < table_data_size) {
1248 /* Update the number of entries in table */
1249 (*num_table_entries_ptr)++;
1250
1251 lli_table_ptr->bus_address =
1252 cpu_to_le32(lli_array_ptr[array_counter].bus_address);
1253
1254 lli_table_ptr->block_size =
1255 cpu_to_le32(lli_array_ptr[array_counter].block_size);
1256
1257 curr_table_data_size += lli_array_ptr[array_counter].block_size;
1258
1259 dev_dbg(&sep->pdev->dev, "lli_table_ptr is %p\n",
1260 lli_table_ptr);
1261 dev_dbg(&sep->pdev->dev, "lli_table_ptr->bus_address is %08lx\n",
1262 (unsigned long)lli_table_ptr->bus_address);
1263 dev_dbg(&sep->pdev->dev, "lli_table_ptr->block_size is %x\n",
1264 lli_table_ptr->block_size);
1265
1266 /* Check for overflow of the table data */
1267 if (curr_table_data_size > table_data_size) {
1268 dev_dbg(&sep->pdev->dev,
1269 "curr_table_data_size too large\n");
1270
1271 /* Update the size of block in the table */
1272 lli_table_ptr->block_size -=
1273 cpu_to_le32((curr_table_data_size - table_data_size));
1274
1275 /* Update the physical address in the lli array */
1276 lli_array_ptr[array_counter].bus_address +=
1277 cpu_to_le32(lli_table_ptr->block_size);
1278
1279 /* Update the block size left in the lli array */
1280 lli_array_ptr[array_counter].block_size =
1281 (curr_table_data_size - table_data_size);
1282 } else
1283 /* Advance to the next entry in the lli_array */
1284 array_counter++;
1285
1286 dev_dbg(&sep->pdev->dev,
1287 "lli_table_ptr->bus_address is %08lx\n",
1288 (unsigned long)lli_table_ptr->bus_address);
1289 dev_dbg(&sep->pdev->dev,
1290 "lli_table_ptr->block_size is %x\n",
1291 lli_table_ptr->block_size);
1292
1293 /* Move to the next entry in table */
1294 lli_table_ptr++;
1295 }
1296
1297 /* Set the info entry to default */
1298 lli_table_ptr->bus_address = 0xffffffff;
1299 lli_table_ptr->block_size = 0;
1300
1301 /* Set the output parameter */
1302 *num_processed_entries_ptr += array_counter;
1303
1304 }
1305
1306 /**
1307 * sep_shared_area_virt_to_bus - map shared area to bus address
1308 * @sep: pointer to struct sep_device
1309 * @virt_address: virtual address to convert
1310 *
1311 * This functions returns the physical address inside shared area according
1312 * to the virtual address. It can be either on the externa RAM device
1313 * (ioremapped), or on the system RAM
1314 * This implementation is for the external RAM
1315 */
1316 static dma_addr_t sep_shared_area_virt_to_bus(struct sep_device *sep,
1317 void *virt_address)
1318 {
1319 dev_dbg(&sep->pdev->dev, "sh virt to phys v %p\n", virt_address);
1320 dev_dbg(&sep->pdev->dev, "sh virt to phys p %08lx\n",
1321 (unsigned long)
1322 sep->shared_bus + (virt_address - sep->shared_addr));
1323
1324 return sep->shared_bus + (size_t)(virt_address - sep->shared_addr);
1325 }
1326
1327 /**
1328 * sep_shared_area_bus_to_virt - map shared area bus address to kernel
1329 * @sep: pointer to struct sep_device
1330 * @bus_address: bus address to convert
1331 *
1332 * This functions returns the virtual address inside shared area
1333 * according to the physical address. It can be either on the
1334 * externa RAM device (ioremapped), or on the system RAM
1335 * This implementation is for the external RAM
1336 */
1337 static void *sep_shared_area_bus_to_virt(struct sep_device *sep,
1338 dma_addr_t bus_address)
1339 {
1340 dev_dbg(&sep->pdev->dev, "shared bus to virt b=%lx v=%lx\n",
1341 (unsigned long)bus_address, (unsigned long)(sep->shared_addr +
1342 (size_t)(bus_address - sep->shared_bus)));
1343
1344 return sep->shared_addr + (size_t)(bus_address - sep->shared_bus);
1345 }
1346
1347 /**
1348 * sep_debug_print_lli_tables - dump LLI table
1349 * @sep: pointer to struct sep_device
1350 * @lli_table_ptr: pointer to sep_lli_entry
1351 * @num_table_entries: number of entries
1352 * @table_data_size: total data size
1353 *
1354 * Walk the the list of the print created tables and print all the data
1355 */
1356 static void sep_debug_print_lli_tables(struct sep_device *sep,
1357 struct sep_lli_entry *lli_table_ptr,
1358 unsigned long num_table_entries,
1359 unsigned long table_data_size)
1360 {
1361 unsigned long table_count = 1;
1362 unsigned long entries_count = 0;
1363
1364 dev_dbg(&sep->pdev->dev, "sep_debug_print_lli_tables start\n");
1365
1366 while ((unsigned long) lli_table_ptr->bus_address != 0xffffffff) {
1367 dev_dbg(&sep->pdev->dev,
1368 "lli table %08lx, table_data_size is %lu\n",
1369 table_count, table_data_size);
1370 dev_dbg(&sep->pdev->dev, "num_table_entries is %lu\n",
1371 num_table_entries);
1372
1373 /* Print entries of the table (without info entry) */
1374 for (entries_count = 0; entries_count < num_table_entries;
1375 entries_count++, lli_table_ptr++) {
1376
1377 dev_dbg(&sep->pdev->dev,
1378 "lli_table_ptr address is %08lx\n",
1379 (unsigned long) lli_table_ptr);
1380
1381 dev_dbg(&sep->pdev->dev,
1382 "phys address is %08lx block size is %x\n",
1383 (unsigned long)lli_table_ptr->bus_address,
1384 lli_table_ptr->block_size);
1385 }
1386 /* Point to the info entry */
1387 lli_table_ptr--;
1388
1389 dev_dbg(&sep->pdev->dev,
1390 "phys lli_table_ptr->block_size is %x\n",
1391 lli_table_ptr->block_size);
1392
1393 dev_dbg(&sep->pdev->dev,
1394 "phys lli_table_ptr->physical_address is %08lu\n",
1395 (unsigned long)lli_table_ptr->bus_address);
1396
1397
1398 table_data_size = lli_table_ptr->block_size & 0xffffff;
1399 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1400
1401 dev_dbg(&sep->pdev->dev,
1402 "phys table_data_size is %lu num_table_entries is"
1403 " %lu bus_address is%lu\n", table_data_size,
1404 num_table_entries, (unsigned long)lli_table_ptr->bus_address);
1405
1406 if ((unsigned long)lli_table_ptr->bus_address != 0xffffffff)
1407 lli_table_ptr = (struct sep_lli_entry *)
1408 sep_shared_bus_to_virt(sep,
1409 (unsigned long)lli_table_ptr->bus_address);
1410
1411 table_count++;
1412 }
1413 dev_dbg(&sep->pdev->dev, "sep_debug_print_lli_tables end\n");
1414 }
1415
1416
1417 /**
1418 * sep_prepare_empty_lli_table - create a blank LLI table
1419 * @sep: pointer to struct sep_device
1420 * @lli_table_addr_ptr: pointer to lli table
1421 * @num_entries_ptr: pointer to number of entries
1422 * @table_data_size_ptr: point to table data size
1423 *
1424 * This function creates empty lli tables when there is no data
1425 */
1426 static void sep_prepare_empty_lli_table(struct sep_device *sep,
1427 dma_addr_t *lli_table_addr_ptr,
1428 u32 *num_entries_ptr,
1429 u32 *table_data_size_ptr)
1430 {
1431 struct sep_lli_entry *lli_table_ptr;
1432
1433 /* Find the area for new table */
1434 lli_table_ptr =
1435 (struct sep_lli_entry *)(sep->shared_addr +
1436 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1437 sep->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1438 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1439
1440 lli_table_ptr->bus_address = 0;
1441 lli_table_ptr->block_size = 0;
1442
1443 lli_table_ptr++;
1444 lli_table_ptr->bus_address = 0xFFFFFFFF;
1445 lli_table_ptr->block_size = 0;
1446
1447 /* Set the output parameter value */
1448 *lli_table_addr_ptr = sep->shared_bus +
1449 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1450 sep->num_lli_tables_created *
1451 sizeof(struct sep_lli_entry) *
1452 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1453
1454 /* Set the num of entries and table data size for empty table */
1455 *num_entries_ptr = 2;
1456 *table_data_size_ptr = 0;
1457
1458 /* Update the number of created tables */
1459 sep->num_lli_tables_created++;
1460 }
1461
1462 /**
1463 * sep_prepare_input_dma_table - prepare input DMA mappings
1464 * @sep: pointer to struct sep_device
1465 * @data_size:
1466 * @block_size:
1467 * @lli_table_ptr:
1468 * @num_entries_ptr:
1469 * @table_data_size_ptr:
1470 * @is_kva: set for kernel data (kernel cryptio call)
1471 *
1472 * This function prepares only input DMA table for synhronic symmetric
1473 * operations (HASH)
1474 * Note that all bus addresses that are passed to the SEP
1475 * are in 32 bit format; the SEP is a 32 bit device
1476 */
1477 static int sep_prepare_input_dma_table(struct sep_device *sep,
1478 unsigned long app_virt_addr,
1479 u32 data_size,
1480 u32 block_size,
1481 dma_addr_t *lli_table_ptr,
1482 u32 *num_entries_ptr,
1483 u32 *table_data_size_ptr,
1484 bool is_kva)
1485 {
1486 int error = 0;
1487 /* Pointer to the info entry of the table - the last entry */
1488 struct sep_lli_entry *info_entry_ptr;
1489 /* Array of pointers to page */
1490 struct sep_lli_entry *lli_array_ptr;
1491 /* Points to the first entry to be processed in the lli_in_array */
1492 u32 current_entry = 0;
1493 /* Num entries in the virtual buffer */
1494 u32 sep_lli_entries = 0;
1495 /* Lli table pointer */
1496 struct sep_lli_entry *in_lli_table_ptr;
1497 /* The total data in one table */
1498 u32 table_data_size = 0;
1499 /* Flag for last table */
1500 u32 last_table_flag = 0;
1501 /* Number of entries in lli table */
1502 u32 num_entries_in_table = 0;
1503 /* Next table address */
1504 void *lli_table_alloc_addr = 0;
1505
1506 dev_dbg(&sep->pdev->dev, "prepare intput dma table data_size is %x\n", data_size);
1507 dev_dbg(&sep->pdev->dev, "block_size is %x\n", block_size);
1508
1509 /* Initialize the pages pointers */
1510 sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL;
1511 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = 0;
1512
1513 /* Set the kernel address for first table to be allocated */
1514 lli_table_alloc_addr = (void *)(sep->shared_addr +
1515 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1516 sep->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1517 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1518
1519 if (data_size == 0) {
1520 /* Special case - create meptu table - 2 entries, zero data */
1521 sep_prepare_empty_lli_table(sep, lli_table_ptr,
1522 num_entries_ptr, table_data_size_ptr);
1523 goto update_dcb_counter;
1524 }
1525
1526 /* Check if the pages are in Kernel Virtual Address layout */
1527 if (is_kva == true)
1528 /* Lock the pages in the kernel */
1529 error = sep_lock_kernel_pages(sep, app_virt_addr,
1530 data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG);
1531 else
1532 /*
1533 * Lock the pages of the user buffer
1534 * and translate them to pages
1535 */
1536 error = sep_lock_user_pages(sep, app_virt_addr,
1537 data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG);
1538
1539 if (error)
1540 goto end_function;
1541
1542 dev_dbg(&sep->pdev->dev, "output sep_in_num_pages is %x\n",
1543 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages);
1544
1545 current_entry = 0;
1546 info_entry_ptr = NULL;
1547
1548 sep_lli_entries = sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages;
1549
1550 /* Loop till all the entries in in array are not processed */
1551 while (current_entry < sep_lli_entries) {
1552
1553 /* Set the new input and output tables */
1554 in_lli_table_ptr =
1555 (struct sep_lli_entry *)lli_table_alloc_addr;
1556
1557 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1558 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1559
1560 if (lli_table_alloc_addr >
1561 ((void *)sep->shared_addr +
1562 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1563 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
1564
1565 error = -ENOMEM;
1566 goto end_function_error;
1567
1568 }
1569
1570 /* Update the number of created tables */
1571 sep->num_lli_tables_created++;
1572
1573 /* Calculate the maximum size of data for input table */
1574 table_data_size = sep_calculate_lli_table_max_size(sep,
1575 &lli_array_ptr[current_entry],
1576 (sep_lli_entries - current_entry),
1577 &last_table_flag);
1578
1579 /*
1580 * If this is not the last table -
1581 * then align it to the block size
1582 */
1583 if (!last_table_flag)
1584 table_data_size =
1585 (table_data_size / block_size) * block_size;
1586
1587 dev_dbg(&sep->pdev->dev, "output table_data_size is %x\n",
1588 table_data_size);
1589
1590 /* Construct input lli table */
1591 sep_build_lli_table(sep, &lli_array_ptr[current_entry],
1592 in_lli_table_ptr,
1593 &current_entry, &num_entries_in_table, table_data_size);
1594
1595 if (info_entry_ptr == NULL) {
1596
1597 /* Set the output parameters to physical addresses */
1598 *lli_table_ptr = sep_shared_area_virt_to_bus(sep,
1599 in_lli_table_ptr);
1600 *num_entries_ptr = num_entries_in_table;
1601 *table_data_size_ptr = table_data_size;
1602
1603 dev_dbg(&sep->pdev->dev,
1604 "output lli_table_in_ptr is %08lx\n",
1605 (unsigned long)*lli_table_ptr);
1606
1607 } else {
1608 /* Update the info entry of the previous in table */
1609 info_entry_ptr->bus_address =
1610 sep_shared_area_virt_to_bus(sep,
1611 in_lli_table_ptr);
1612 info_entry_ptr->block_size =
1613 ((num_entries_in_table) << 24) |
1614 (table_data_size);
1615 }
1616 /* Save the pointer to the info entry of the current tables */
1617 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1618 }
1619 /* Print input tables */
1620 sep_debug_print_lli_tables(sep, (struct sep_lli_entry *)
1621 sep_shared_area_bus_to_virt(sep, *lli_table_ptr),
1622 *num_entries_ptr, *table_data_size_ptr);
1623 /* The array of the pages */
1624 kfree(lli_array_ptr);
1625
1626 update_dcb_counter:
1627 /* Update DCB counter */
1628 sep->nr_dcb_creat++;
1629 goto end_function;
1630
1631 end_function_error:
1632 /* Free all the allocated resources */
1633 kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_map_array);
1634 kfree(lli_array_ptr);
1635 kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_page_array);
1636
1637 end_function:
1638 return error;
1639
1640 }
1641 /**
1642 * sep_construct_dma_tables_from_lli - prepare AES/DES mappings
1643 * @sep: pointer to struct sep_device
1644 * @lli_in_array:
1645 * @sep_in_lli_entries:
1646 * @lli_out_array:
1647 * @sep_out_lli_entries
1648 * @block_size
1649 * @lli_table_in_ptr
1650 * @lli_table_out_ptr
1651 * @in_num_entries_ptr
1652 * @out_num_entries_ptr
1653 * @table_data_size_ptr
1654 *
1655 * This function creates the input and output DMA tables for
1656 * symmetric operations (AES/DES) according to the block
1657 * size from LLI arays
1658 * Note that all bus addresses that are passed to the SEP
1659 * are in 32 bit format; the SEP is a 32 bit device
1660 */
1661 static int sep_construct_dma_tables_from_lli(
1662 struct sep_device *sep,
1663 struct sep_lli_entry *lli_in_array,
1664 u32 sep_in_lli_entries,
1665 struct sep_lli_entry *lli_out_array,
1666 u32 sep_out_lli_entries,
1667 u32 block_size,
1668 dma_addr_t *lli_table_in_ptr,
1669 dma_addr_t *lli_table_out_ptr,
1670 u32 *in_num_entries_ptr,
1671 u32 *out_num_entries_ptr,
1672 u32 *table_data_size_ptr)
1673 {
1674 /* Points to the area where next lli table can be allocated */
1675 void *lli_table_alloc_addr = 0;
1676 /* Input lli table */
1677 struct sep_lli_entry *in_lli_table_ptr = NULL;
1678 /* Output lli table */
1679 struct sep_lli_entry *out_lli_table_ptr = NULL;
1680 /* Pointer to the info entry of the table - the last entry */
1681 struct sep_lli_entry *info_in_entry_ptr = NULL;
1682 /* Pointer to the info entry of the table - the last entry */
1683 struct sep_lli_entry *info_out_entry_ptr = NULL;
1684 /* Points to the first entry to be processed in the lli_in_array */
1685 u32 current_in_entry = 0;
1686 /* Points to the first entry to be processed in the lli_out_array */
1687 u32 current_out_entry = 0;
1688 /* Max size of the input table */
1689 u32 in_table_data_size = 0;
1690 /* Max size of the output table */
1691 u32 out_table_data_size = 0;
1692 /* Flag te signifies if this is the last tables build */
1693 u32 last_table_flag = 0;
1694 /* The data size that should be in table */
1695 u32 table_data_size = 0;
1696 /* Number of etnries in the input table */
1697 u32 num_entries_in_table = 0;
1698 /* Number of etnries in the output table */
1699 u32 num_entries_out_table = 0;
1700
1701 /* Initiate to point after the message area */
1702 lli_table_alloc_addr = (void *)(sep->shared_addr +
1703 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1704 (sep->num_lli_tables_created *
1705 (sizeof(struct sep_lli_entry) *
1706 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP)));
1707
1708 /* Loop till all the entries in in array are not processed */
1709 while (current_in_entry < sep_in_lli_entries) {
1710 /* Set the new input and output tables */
1711 in_lli_table_ptr =
1712 (struct sep_lli_entry *)lli_table_alloc_addr;
1713
1714 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1715 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1716
1717 /* Set the first output tables */
1718 out_lli_table_ptr =
1719 (struct sep_lli_entry *)lli_table_alloc_addr;
1720
1721 /* Check if the DMA table area limit was overrun */
1722 if ((lli_table_alloc_addr + sizeof(struct sep_lli_entry) *
1723 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP) >
1724 ((void *)sep->shared_addr +
1725 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1726 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
1727
1728 dev_warn(&sep->pdev->dev, "dma table limit overrun\n");
1729 return -ENOMEM;
1730 }
1731
1732 /* Update the number of the lli tables created */
1733 sep->num_lli_tables_created += 2;
1734
1735 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1736 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1737
1738 /* Calculate the maximum size of data for input table */
1739 in_table_data_size =
1740 sep_calculate_lli_table_max_size(sep,
1741 &lli_in_array[current_in_entry],
1742 (sep_in_lli_entries - current_in_entry),
1743 &last_table_flag);
1744
1745 /* Calculate the maximum size of data for output table */
1746 out_table_data_size =
1747 sep_calculate_lli_table_max_size(sep,
1748 &lli_out_array[current_out_entry],
1749 (sep_out_lli_entries - current_out_entry),
1750 &last_table_flag);
1751
1752 dev_dbg(&sep->pdev->dev,
1753 "construct tables from lli in_table_data_size is %x\n",
1754 in_table_data_size);
1755
1756 dev_dbg(&sep->pdev->dev,
1757 "construct tables from lli out_table_data_size is %x\n",
1758 out_table_data_size);
1759
1760 table_data_size = in_table_data_size;
1761
1762 if (!last_table_flag) {
1763 /*
1764 * If this is not the last table,
1765 * then must check where the data is smallest
1766 * and then align it to the block size
1767 */
1768 if (table_data_size > out_table_data_size)
1769 table_data_size = out_table_data_size;
1770
1771 /*
1772 * Now calculate the table size so that
1773 * it will be module block size
1774 */
1775 table_data_size = (table_data_size / block_size) *
1776 block_size;
1777 }
1778
1779 /* Construct input lli table */
1780 sep_build_lli_table(sep, &lli_in_array[current_in_entry],
1781 in_lli_table_ptr,
1782 &current_in_entry,
1783 &num_entries_in_table,
1784 table_data_size);
1785
1786 /* Construct output lli table */
1787 sep_build_lli_table(sep, &lli_out_array[current_out_entry],
1788 out_lli_table_ptr,
1789 &current_out_entry,
1790 &num_entries_out_table,
1791 table_data_size);
1792
1793 /* If info entry is null - this is the first table built */
1794 if (info_in_entry_ptr == NULL) {
1795 /* Set the output parameters to physical addresses */
1796 *lli_table_in_ptr =
1797 sep_shared_area_virt_to_bus(sep, in_lli_table_ptr);
1798
1799 *in_num_entries_ptr = num_entries_in_table;
1800
1801 *lli_table_out_ptr =
1802 sep_shared_area_virt_to_bus(sep,
1803 out_lli_table_ptr);
1804
1805 *out_num_entries_ptr = num_entries_out_table;
1806 *table_data_size_ptr = table_data_size;
1807
1808 dev_dbg(&sep->pdev->dev,
1809 "output lli_table_in_ptr is %08lx\n",
1810 (unsigned long)*lli_table_in_ptr);
1811 dev_dbg(&sep->pdev->dev,
1812 "output lli_table_out_ptr is %08lx\n",
1813 (unsigned long)*lli_table_out_ptr);
1814 } else {
1815 /* Update the info entry of the previous in table */
1816 info_in_entry_ptr->bus_address =
1817 sep_shared_area_virt_to_bus(sep,
1818 in_lli_table_ptr);
1819
1820 info_in_entry_ptr->block_size =
1821 ((num_entries_in_table) << 24) |
1822 (table_data_size);
1823
1824 /* Update the info entry of the previous in table */
1825 info_out_entry_ptr->bus_address =
1826 sep_shared_area_virt_to_bus(sep,
1827 out_lli_table_ptr);
1828
1829 info_out_entry_ptr->block_size =
1830 ((num_entries_out_table) << 24) |
1831 (table_data_size);
1832
1833 dev_dbg(&sep->pdev->dev,
1834 "output lli_table_in_ptr:%08lx %08x\n",
1835 (unsigned long)info_in_entry_ptr->bus_address,
1836 info_in_entry_ptr->block_size);
1837
1838 dev_dbg(&sep->pdev->dev,
1839 "output lli_table_out_ptr:%08lx %08x\n",
1840 (unsigned long)info_out_entry_ptr->bus_address,
1841 info_out_entry_ptr->block_size);
1842 }
1843
1844 /* Save the pointer to the info entry of the current tables */
1845 info_in_entry_ptr = in_lli_table_ptr +
1846 num_entries_in_table - 1;
1847 info_out_entry_ptr = out_lli_table_ptr +
1848 num_entries_out_table - 1;
1849
1850 dev_dbg(&sep->pdev->dev,
1851 "output num_entries_out_table is %x\n",
1852 (u32)num_entries_out_table);
1853 dev_dbg(&sep->pdev->dev,
1854 "output info_in_entry_ptr is %lx\n",
1855 (unsigned long)info_in_entry_ptr);
1856 dev_dbg(&sep->pdev->dev,
1857 "output info_out_entry_ptr is %lx\n",
1858 (unsigned long)info_out_entry_ptr);
1859 }
1860
1861 /* Print input tables */
1862 sep_debug_print_lli_tables(sep,
1863 (struct sep_lli_entry *)
1864 sep_shared_area_bus_to_virt(sep, *lli_table_in_ptr),
1865 *in_num_entries_ptr,
1866 *table_data_size_ptr);
1867
1868 /* Print output tables */
1869 sep_debug_print_lli_tables(sep,
1870 (struct sep_lli_entry *)
1871 sep_shared_area_bus_to_virt(sep, *lli_table_out_ptr),
1872 *out_num_entries_ptr,
1873 *table_data_size_ptr);
1874
1875 return 0;
1876 }
1877
1878 /**
1879 * sep_prepare_input_output_dma_table - prepare DMA I/O table
1880 * @app_virt_in_addr:
1881 * @app_virt_out_addr:
1882 * @data_size:
1883 * @block_size:
1884 * @lli_table_in_ptr:
1885 * @lli_table_out_ptr:
1886 * @in_num_entries_ptr:
1887 * @out_num_entries_ptr:
1888 * @table_data_size_ptr:
1889 * @is_kva: set for kernel data; used only for kernel crypto module
1890 *
1891 * This function builds input and output DMA tables for synhronic
1892 * symmetric operations (AES, DES, HASH). It also checks that each table
1893 * is of the modular block size
1894 * Note that all bus addresses that are passed to the SEP
1895 * are in 32 bit format; the SEP is a 32 bit device
1896 */
1897 static int sep_prepare_input_output_dma_table(struct sep_device *sep,
1898 unsigned long app_virt_in_addr,
1899 unsigned long app_virt_out_addr,
1900 u32 data_size,
1901 u32 block_size,
1902 dma_addr_t *lli_table_in_ptr,
1903 dma_addr_t *lli_table_out_ptr,
1904 u32 *in_num_entries_ptr,
1905 u32 *out_num_entries_ptr,
1906 u32 *table_data_size_ptr,
1907 bool is_kva)
1908
1909 {
1910 int error = 0;
1911 /* Array of pointers of page */
1912 struct sep_lli_entry *lli_in_array;
1913 /* Array of pointers of page */
1914 struct sep_lli_entry *lli_out_array;
1915
1916 if (data_size == 0) {
1917 /* Prepare empty table for input and output */
1918 sep_prepare_empty_lli_table(sep, lli_table_in_ptr,
1919 in_num_entries_ptr, table_data_size_ptr);
1920
1921 sep_prepare_empty_lli_table(sep, lli_table_out_ptr,
1922 out_num_entries_ptr, table_data_size_ptr);
1923
1924 goto update_dcb_counter;
1925 }
1926
1927 /* Initialize the pages pointers */
1928 sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL;
1929 sep->dma_res_arr[sep->nr_dcb_creat].out_page_array = NULL;
1930
1931 /* Lock the pages of the buffer and translate them to pages */
1932 if (is_kva == true) {
1933 error = sep_lock_kernel_pages(sep, app_virt_in_addr,
1934 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG);
1935
1936 if (error) {
1937 dev_warn(&sep->pdev->dev,
1938 "lock kernel for in failed\n");
1939 goto end_function;
1940 }
1941
1942 error = sep_lock_kernel_pages(sep, app_virt_out_addr,
1943 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG);
1944
1945 if (error) {
1946 dev_warn(&sep->pdev->dev,
1947 "lock kernel for out failed\n");
1948 goto end_function;
1949 }
1950 }
1951
1952 else {
1953 error = sep_lock_user_pages(sep, app_virt_in_addr,
1954 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG);
1955 if (error) {
1956 dev_warn(&sep->pdev->dev,
1957 "sep_lock_user_pages for input virtual buffer failed\n");
1958 goto end_function;
1959 }
1960
1961 error = sep_lock_user_pages(sep, app_virt_out_addr,
1962 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG);
1963
1964 if (error) {
1965 dev_warn(&sep->pdev->dev,
1966 "sep_lock_user_pages for output virtual buffer failed\n");
1967 goto end_function_free_lli_in;
1968 }
1969 }
1970
1971 dev_dbg(&sep->pdev->dev, "prep input output dma table sep_in_num_pages is %x\n",
1972 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages);
1973 dev_dbg(&sep->pdev->dev, "sep_out_num_pages is %x\n",
1974 sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages);
1975 dev_dbg(&sep->pdev->dev, "SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n",
1976 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1977
1978 /* Call the function that creates table from the lli arrays */
1979 error = sep_construct_dma_tables_from_lli(sep, lli_in_array,
1980 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages,
1981 lli_out_array,
1982 sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages,
1983 block_size, lli_table_in_ptr, lli_table_out_ptr,
1984 in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr);
1985
1986 if (error) {
1987 dev_warn(&sep->pdev->dev,
1988 "sep_construct_dma_tables_from_lli failed\n");
1989 goto end_function_with_error;
1990 }
1991
1992 kfree(lli_out_array);
1993 kfree(lli_in_array);
1994
1995 update_dcb_counter:
1996 /* Update DCB counter */
1997 sep->nr_dcb_creat++;
1998
1999 goto end_function;
2000
2001 end_function_with_error:
2002 kfree(sep->dma_res_arr[sep->nr_dcb_creat].out_map_array);
2003 kfree(sep->dma_res_arr[sep->nr_dcb_creat].out_page_array);
2004 kfree(lli_out_array);
2005
2006
2007 end_function_free_lli_in:
2008 kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_map_array);
2009 kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_page_array);
2010 kfree(lli_in_array);
2011
2012 end_function:
2013
2014 return error;
2015
2016 }
2017
2018 /**
2019 * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks
2020 * @app_in_address: unsigned long; for data buffer in (user space)
2021 * @app_out_address: unsigned long; for data buffer out (user space)
2022 * @data_in_size: u32; for size of data
2023 * @block_size: u32; for block size
2024 * @tail_block_size: u32; for size of tail block
2025 * @isapplet: bool; to indicate external app
2026 * @is_kva: bool; kernel buffer; only used for kernel crypto module
2027 *
2028 * This function prepares the linked DMA tables and puts the
2029 * address for the linked list of tables inta a DCB (data control
2030 * block) the address of which is known by the SEP hardware
2031 * Note that all bus addresses that are passed to the SEP
2032 * are in 32 bit format; the SEP is a 32 bit device
2033 */
2034 static int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
2035 unsigned long app_in_address,
2036 unsigned long app_out_address,
2037 u32 data_in_size,
2038 u32 block_size,
2039 u32 tail_block_size,
2040 bool isapplet,
2041 bool is_kva)
2042 {
2043 int error = 0;
2044 /* Size of tail */
2045 u32 tail_size = 0;
2046 /* Address of the created DCB table */
2047 struct sep_dcblock *dcb_table_ptr = NULL;
2048 /* The physical address of the first input DMA table */
2049 dma_addr_t in_first_mlli_address = 0;
2050 /* Number of entries in the first input DMA table */
2051 u32 in_first_num_entries = 0;
2052 /* The physical address of the first output DMA table */
2053 dma_addr_t out_first_mlli_address = 0;
2054 /* Number of entries in the first output DMA table */
2055 u32 out_first_num_entries = 0;
2056 /* Data in the first input/output table */
2057 u32 first_data_size = 0;
2058
2059 if (sep->nr_dcb_creat == SEP_MAX_NUM_SYNC_DMA_OPS) {
2060 /* No more DCBs to allocate */
2061 dev_warn(&sep->pdev->dev, "no more DCBs available\n");
2062 error = -ENOSPC;
2063 goto end_function;
2064 }
2065
2066 /* Allocate new DCB */
2067 dcb_table_ptr = (struct sep_dcblock *)(sep->shared_addr +
2068 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES +
2069 (sep->nr_dcb_creat * sizeof(struct sep_dcblock)));
2070
2071 /* Set the default values in the DCB */
2072 dcb_table_ptr->input_mlli_address = 0;
2073 dcb_table_ptr->input_mlli_num_entries = 0;
2074 dcb_table_ptr->input_mlli_data_size = 0;
2075 dcb_table_ptr->output_mlli_address = 0;
2076 dcb_table_ptr->output_mlli_num_entries = 0;
2077 dcb_table_ptr->output_mlli_data_size = 0;
2078 dcb_table_ptr->tail_data_size = 0;
2079 dcb_table_ptr->out_vr_tail_pt = 0;
2080
2081 if (isapplet == true) {
2082
2083 /* Check if there is enough data for DMA operation */
2084 if (data_in_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) {
2085 if (is_kva == true) {
2086 memcpy(dcb_table_ptr->tail_data,
2087 (void *)app_in_address, data_in_size);
2088 } else {
2089 if (copy_from_user(dcb_table_ptr->tail_data,
2090 (void __user *)app_in_address,
2091 data_in_size)) {
2092 error = -EFAULT;
2093 goto end_function;
2094 }
2095 }
2096
2097 dcb_table_ptr->tail_data_size = data_in_size;
2098
2099 /* Set the output user-space address for mem2mem op */
2100 if (app_out_address)
2101 dcb_table_ptr->out_vr_tail_pt =
2102 (aligned_u64)app_out_address;
2103
2104 /*
2105 * Update both data length parameters in order to avoid
2106 * second data copy and allow building of empty mlli
2107 * tables
2108 */
2109 tail_size = 0x0;
2110 data_in_size = 0x0;
2111
2112 } else {
2113 if (!app_out_address) {
2114 tail_size = data_in_size % block_size;
2115 if (!tail_size) {
2116 if (tail_block_size == block_size)
2117 tail_size = block_size;
2118 }
2119 } else {
2120 tail_size = 0;
2121 }
2122 }
2123 if (tail_size) {
2124 if (is_kva == true) {
2125 memcpy(dcb_table_ptr->tail_data,
2126 (void *)(app_in_address + data_in_size -
2127 tail_size), tail_size);
2128 } else {
2129 /* We have tail data - copy it to DCB */
2130 if (copy_from_user(dcb_table_ptr->tail_data,
2131 (void *)(app_in_address +
2132 data_in_size - tail_size), tail_size)) {
2133 error = -EFAULT;
2134 goto end_function;
2135 }
2136 }
2137 if (app_out_address)
2138 /*
2139 * Calculate the output address
2140 * according to tail data size
2141 */
2142 dcb_table_ptr->out_vr_tail_pt =
2143 (aligned_u64)app_out_address + data_in_size
2144 - tail_size;
2145
2146 /* Save the real tail data size */
2147 dcb_table_ptr->tail_data_size = tail_size;
2148 /*
2149 * Update the data size without the tail
2150 * data size AKA data for the dma
2151 */
2152 data_in_size = (data_in_size - tail_size);
2153 }
2154 }
2155 /* Check if we need to build only input table or input/output */
2156 if (app_out_address) {
2157 /* Prepare input/output tables */
2158 error = sep_prepare_input_output_dma_table(sep,
2159 app_in_address,
2160 app_out_address,
2161 data_in_size,
2162 block_size,
2163 &in_first_mlli_address,
2164 &out_first_mlli_address,
2165 &in_first_num_entries,
2166 &out_first_num_entries,
2167 &first_data_size,
2168 is_kva);
2169 } else {
2170 /* Prepare input tables */
2171 error = sep_prepare_input_dma_table(sep,
2172 app_in_address,
2173 data_in_size,
2174 block_size,
2175 &in_first_mlli_address,
2176 &in_first_num_entries,
2177 &first_data_size,
2178 is_kva);
2179 }
2180
2181 if (error) {
2182 dev_warn(&sep->pdev->dev, "prepare DMA table call failed from prepare DCB call\n");
2183 goto end_function;
2184 }
2185
2186 /* Set the DCB values */
2187 dcb_table_ptr->input_mlli_address = in_first_mlli_address;
2188 dcb_table_ptr->input_mlli_num_entries = in_first_num_entries;
2189 dcb_table_ptr->input_mlli_data_size = first_data_size;
2190 dcb_table_ptr->output_mlli_address = out_first_mlli_address;
2191 dcb_table_ptr->output_mlli_num_entries = out_first_num_entries;
2192 dcb_table_ptr->output_mlli_data_size = first_data_size;
2193
2194 end_function:
2195 return error;
2196
2197 }
2198
2199 /**
2200 * sep_free_dma_tables_and_dcb - free DMA tables and DCBs
2201 * @sep: pointer to struct sep_device
2202 * @isapplet: indicates external application (used for kernel access)
2203 * @is_kva: indicates kernel addresses (only used for kernel crypto)
2204 *
2205 * This function frees the DMA tables and DCB
2206 */
2207 static int sep_free_dma_tables_and_dcb(struct sep_device *sep, bool isapplet,
2208 bool is_kva)
2209 {
2210 int i = 0;
2211 int error = 0;
2212 int error_temp = 0;
2213 struct sep_dcblock *dcb_table_ptr;
2214 unsigned long pt_hold;
2215 void *tail_pt;
2216
2217 if (isapplet == true) {
2218 /* Set pointer to first DCB table */
2219 dcb_table_ptr = (struct sep_dcblock *)
2220 (sep->shared_addr +
2221 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES);
2222
2223 /* Go over each DCB and see if tail pointer must be updated */
2224 for (i = 0; i < sep->nr_dcb_creat; i++, dcb_table_ptr++) {
2225 if (dcb_table_ptr->out_vr_tail_pt) {
2226 pt_hold = (unsigned long)dcb_table_ptr->out_vr_tail_pt;
2227 tail_pt = (void *)pt_hold;
2228 if (is_kva == true) {
2229 memcpy(tail_pt,
2230 dcb_table_ptr->tail_data,
2231 dcb_table_ptr->tail_data_size);
2232 } else {
2233 error_temp = copy_to_user(
2234 tail_pt,
2235 dcb_table_ptr->tail_data,
2236 dcb_table_ptr->tail_data_size);
2237 }
2238 if (error_temp) {
2239 /* Release the DMA resource */
2240 error = -EFAULT;
2241 break;
2242 }
2243 }
2244 }
2245 }
2246 /* Free the output pages, if any */
2247 sep_free_dma_table_data_handler(sep);
2248
2249 return error;
2250 }
2251
2252 /**
2253 * sep_get_static_pool_addr_handler - get static pool address
2254 * @sep: pointer to struct sep_device
2255 *
2256 * This function sets the bus and virtual addresses of the static pool
2257 */
2258 static int sep_get_static_pool_addr_handler(struct sep_device *sep)
2259 {
2260 u32 *static_pool_addr = NULL;
2261
2262 static_pool_addr = (u32 *)(sep->shared_addr +
2263 SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES);
2264
2265 static_pool_addr[0] = SEP_STATIC_POOL_VAL_TOKEN;
2266 static_pool_addr[1] = (u32)sep->shared_bus +
2267 SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
2268
2269 dev_dbg(&sep->pdev->dev, "static pool segment: physical %x\n",
2270 (u32)static_pool_addr[1]);
2271
2272 return 0;
2273 }
2274
2275 /**
2276 * sep_end_transaction_handler - end transaction
2277 * @sep: pointer to struct sep_device
2278 *
2279 * This API handles the end transaction request
2280 */
2281 static int sep_end_transaction_handler(struct sep_device *sep)
2282 {
2283 /* Clear the data pool pointers Token */
2284 memset((void *)(sep->shared_addr +
2285 SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES),
2286 0, sep->num_of_data_allocations*2*sizeof(u32));
2287
2288 /* Check that all the DMA resources were freed */
2289 sep_free_dma_table_data_handler(sep);
2290
2291 clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
2292
2293 /*
2294 * We are now through with the transaction. Let's
2295 * allow other processes who have the device open
2296 * to perform transactions
2297 */
2298 mutex_lock(&sep->sep_mutex);
2299 sep->pid_doing_transaction = 0;
2300 mutex_unlock(&sep->sep_mutex);
2301 /* Raise event for stuck contextes */
2302 wake_up(&sep->event);
2303
2304 return 0;
2305 }
2306
2307 /**
2308 * sep_prepare_dcb_handler - prepare a control block
2309 * @sep: pointer to struct sep_device
2310 * @arg: pointer to user parameters
2311 *
2312 * This function will retrieve the RAR buffer physical addresses, type
2313 * & size corresponding to the RAR handles provided in the buffers vector.
2314 */
2315 static int sep_prepare_dcb_handler(struct sep_device *sep, unsigned long arg)
2316 {
2317 int error;
2318 /* Command arguments */
2319 struct build_dcb_struct command_args;
2320
2321 /* Get the command arguments */
2322 if (copy_from_user(&command_args, (void __user *)arg,
2323 sizeof(struct build_dcb_struct))) {
2324 error = -EFAULT;
2325 goto end_function;
2326 }
2327
2328 dev_dbg(&sep->pdev->dev, "prep dcb handler app_in_address is %08llx\n",
2329 command_args.app_in_address);
2330 dev_dbg(&sep->pdev->dev, "app_out_address is %08llx\n",
2331 command_args.app_out_address);
2332 dev_dbg(&sep->pdev->dev, "data_size is %x\n",
2333 command_args.data_in_size);
2334 dev_dbg(&sep->pdev->dev, "block_size is %x\n",
2335 command_args.block_size);
2336 dev_dbg(&sep->pdev->dev, "tail block_size is %x\n",
2337 command_args.tail_block_size);
2338
2339 error = sep_prepare_input_output_dma_table_in_dcb(sep,
2340 (unsigned long)command_args.app_in_address,
2341 (unsigned long)command_args.app_out_address,
2342 command_args.data_in_size, command_args.block_size,
2343 command_args.tail_block_size, true, false);
2344
2345 end_function:
2346 return error;
2347
2348 }
2349
2350 /**
2351 * sep_free_dcb_handler - free control block resources
2352 * @sep: pointer to struct sep_device
2353 *
2354 * This function frees the DCB resources and updates the needed
2355 * user-space buffers.
2356 */
2357 static int sep_free_dcb_handler(struct sep_device *sep)
2358 {
2359 return sep_free_dma_tables_and_dcb(sep, false, false);
2360 }
2361
2362 /**
2363 * sep_rar_prepare_output_msg_handler - prepare an output message
2364 * @sep: pointer to struct sep_device
2365 * @arg: pointer to user parameters
2366 *
2367 * This function will retrieve the RAR buffer physical addresses, type
2368 * & size corresponding to the RAR handles provided in the buffers vector.
2369 */
2370 static int sep_rar_prepare_output_msg_handler(struct sep_device *sep,
2371 unsigned long arg)
2372 {
2373 int error = 0;
2374 /* Command args */
2375 struct rar_hndl_to_bus_struct command_args;
2376 /* Bus address */
2377 dma_addr_t rar_bus = 0;
2378 /* Holds the RAR address in the system memory offset */
2379 u32 *rar_addr;
2380
2381 /* Copy the data */
2382 if (copy_from_user(&command_args, (void __user *)arg,
2383 sizeof(command_args))) {
2384 error = -EFAULT;
2385 goto end_function;
2386 }
2387
2388 /* Call to translation function only if user handle is not NULL */
2389 if (command_args.rar_handle)
2390 return -EOPNOTSUPP;
2391 dev_dbg(&sep->pdev->dev, "rar msg; rar_addr_bus = %x\n", (u32)rar_bus);
2392
2393 /* Set value in the SYSTEM MEMORY offset */
2394 rar_addr = (u32 *)(sep->shared_addr +
2395 SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES);
2396
2397 /* Copy the physical address to the System Area for the SEP */
2398 rar_addr[0] = SEP_RAR_VAL_TOKEN;
2399 rar_addr[1] = rar_bus;
2400
2401 end_function:
2402 return error;
2403 }
2404
2405 /**
2406 * sep_ioctl - ioctl api
2407 * @filp: pointer to struct file
2408 * @cmd: command
2409 * @arg: pointer to argument structure
2410 *
2411 * Implement the ioctl methods available on the SEP device.
2412 */
2413 static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2414 {
2415 int error = 0;
2416 struct sep_device *sep = filp->private_data;
2417
2418 /* Make sure we own this device */
2419 mutex_lock(&sep->sep_mutex);
2420 if ((current->pid != sep->pid_doing_transaction) &&
2421 (sep->pid_doing_transaction != 0)) {
2422 dev_dbg(&sep->pdev->dev, "ioctl pid is not owner\n");
2423 error = -EACCES;
2424 goto end_function;
2425 }
2426
2427 mutex_unlock(&sep->sep_mutex);
2428
2429 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER)
2430 return -ENOTTY;
2431
2432 /* Lock to prevent the daemon to interfere with operation */
2433 mutex_lock(&sep->ioctl_mutex);
2434
2435 switch (cmd) {
2436 case SEP_IOCSENDSEPCOMMAND:
2437 /* Send command to SEP */
2438 error = sep_send_command_handler(sep);
2439 break;
2440 case SEP_IOCALLOCDATAPOLL:
2441 /* Allocate data pool */
2442 error = sep_allocate_data_pool_memory_handler(sep, arg);
2443 break;
2444 case SEP_IOCGETSTATICPOOLADDR:
2445 /* Inform the SEP the bus address of the static pool */
2446 error = sep_get_static_pool_addr_handler(sep);
2447 break;
2448 case SEP_IOCENDTRANSACTION:
2449 error = sep_end_transaction_handler(sep);
2450 break;
2451 case SEP_IOCRARPREPAREMESSAGE:
2452 error = sep_rar_prepare_output_msg_handler(sep, arg);
2453 break;
2454 case SEP_IOCPREPAREDCB:
2455 error = sep_prepare_dcb_handler(sep, arg);
2456 break;
2457 case SEP_IOCFREEDCB:
2458 error = sep_free_dcb_handler(sep);
2459 break;
2460 default:
2461 error = -ENOTTY;
2462 break;
2463 }
2464
2465 end_function:
2466 mutex_unlock(&sep->ioctl_mutex);
2467 return error;
2468 }
2469
2470 /**
2471 * sep_singleton_ioctl - ioctl api for singleton interface
2472 * @filp: pointer to struct file
2473 * @cmd: command
2474 * @arg: pointer to argument structure
2475 *
2476 * Implement the additional ioctls for the singleton device
2477 */
2478 static long sep_singleton_ioctl(struct file *filp, u32 cmd, unsigned long arg)
2479 {
2480 long error = 0;
2481 struct sep_device *sep = filp->private_data;
2482
2483 /* Check that the command is for the SEP device */
2484 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER)
2485 return -ENOTTY;
2486
2487 /* Make sure we own this device */
2488 mutex_lock(&sep->sep_mutex);
2489 if ((current->pid != sep->pid_doing_transaction) &&
2490 (sep->pid_doing_transaction != 0)) {
2491 dev_dbg(&sep->pdev->dev, "singleton ioctl pid is not owner\n");
2492 mutex_unlock(&sep->sep_mutex);
2493 return -EACCES;
2494 }
2495
2496 mutex_unlock(&sep->sep_mutex);
2497
2498 switch (cmd) {
2499 case SEP_IOCTLSETCALLERID:
2500 mutex_lock(&sep->ioctl_mutex);
2501 error = sep_set_caller_id_handler(sep, arg);
2502 mutex_unlock(&sep->ioctl_mutex);
2503 break;
2504 default:
2505 error = sep_ioctl(filp, cmd, arg);
2506 break;
2507 }
2508 return error;
2509 }
2510
2511 /**
2512 * sep_request_daemon_ioctl - ioctl for daemon
2513 * @filp: pointer to struct file
2514 * @cmd: command
2515 * @arg: pointer to argument structure
2516 *
2517 * Called by the request daemon to perform ioctls on the daemon device
2518 */
2519 static long sep_request_daemon_ioctl(struct file *filp, u32 cmd,
2520 unsigned long arg)
2521 {
2522
2523 long error;
2524 struct sep_device *sep = filp->private_data;
2525
2526 /* Check that the command is for SEP device */
2527 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER)
2528 return -ENOTTY;
2529
2530 /* Only one process can access ioctl at any given time */
2531 mutex_lock(&sep->ioctl_mutex);
2532
2533 switch (cmd) {
2534 case SEP_IOCSENDSEPRPLYCOMMAND:
2535 /* Send reply command to SEP */
2536 error = sep_req_daemon_send_reply_command_handler(sep);
2537 break;
2538 case SEP_IOCENDTRANSACTION:
2539 /*
2540 * End req daemon transaction, do nothing
2541 * will be removed upon update in middleware
2542 * API library
2543 */
2544 error = 0;
2545 break;
2546 default:
2547 error = -ENOTTY;
2548 }
2549 mutex_unlock(&sep->ioctl_mutex);
2550 return error;
2551 }
2552
2553 /**
2554 * sep_inthandler - interrupt handler
2555 * @irq: interrupt
2556 * @dev_id: device id
2557 */
2558 static irqreturn_t sep_inthandler(int irq, void *dev_id)
2559 {
2560 irqreturn_t int_error = IRQ_HANDLED;
2561 unsigned long lck_flags;
2562 u32 reg_val, reg_val2 = 0;
2563 struct sep_device *sep = dev_id;
2564
2565 /* Read the IRR register to check if this is SEP interrupt */
2566 reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
2567
2568 if (reg_val & (0x1 << 13)) {
2569 /* Lock and update the counter of reply messages */
2570 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
2571 sep->reply_ct++;
2572 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
2573
2574 dev_dbg(&sep->pdev->dev, "sep int: send_ct %lx reply_ct %lx\n",
2575 sep->send_ct, sep->reply_ct);
2576
2577 /* Is this printf or daemon request? */
2578 reg_val2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
2579 dev_dbg(&sep->pdev->dev,
2580 "SEP Interrupt - reg2 is %08x\n", reg_val2);
2581
2582 if ((reg_val2 >> 30) & 0x1) {
2583 dev_dbg(&sep->pdev->dev, "int: printf request\n");
2584 wake_up(&sep->event_request_daemon);
2585 } else if (reg_val2 >> 31) {
2586 dev_dbg(&sep->pdev->dev, "int: daemon request\n");
2587 wake_up(&sep->event_request_daemon);
2588 } else {
2589 dev_dbg(&sep->pdev->dev, "int: SEP reply\n");
2590 wake_up(&sep->event);
2591 }
2592 } else {
2593 dev_dbg(&sep->pdev->dev, "int: not SEP interrupt\n");
2594 int_error = IRQ_NONE;
2595 }
2596 if (int_error == IRQ_HANDLED)
2597 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
2598
2599 return int_error;
2600 }
2601
2602 /**
2603 * sep_reconfig_shared_area - reconfigure shared area
2604 * @sep: pointer to struct sep_device
2605 *
2606 * Reconfig the shared area between HOST and SEP - needed in case
2607 * the DX_CC_Init function was called before OS loading.
2608 */
2609 static int sep_reconfig_shared_area(struct sep_device *sep)
2610 {
2611 int ret_val;
2612
2613 /* use to limit waiting for SEP */
2614 unsigned long end_time;
2615
2616 /* Send the new SHARED MESSAGE AREA to the SEP */
2617 dev_dbg(&sep->pdev->dev, "reconfig shared; sending %08llx to sep\n",
2618 (unsigned long long)sep->shared_bus);
2619
2620 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
2621
2622 /* Poll for SEP response */
2623 ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2624
2625 end_time = jiffies + (WAIT_TIME * HZ);
2626
2627 while ((time_before(jiffies, end_time)) && (ret_val != 0xffffffff) &&
2628 (ret_val != sep->shared_bus))
2629 ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2630
2631 /* Check the return value (register) */
2632 if (ret_val != sep->shared_bus) {
2633 dev_warn(&sep->pdev->dev, "could not reconfig shared area\n");
2634 dev_warn(&sep->pdev->dev, "result was %x\n", ret_val);
2635 ret_val = -ENOMEM;
2636 } else
2637 ret_val = 0;
2638
2639 dev_dbg(&sep->pdev->dev, "reconfig shared area end\n");
2640 return ret_val;
2641 }
2642
2643 /* File operation for singleton SEP operations */
2644 static const struct file_operations singleton_file_operations = {
2645 .owner = THIS_MODULE,
2646 .unlocked_ioctl = sep_singleton_ioctl,
2647 .poll = sep_poll,
2648 .open = sep_singleton_open,
2649 .release = sep_singleton_release,
2650 .mmap = sep_mmap,
2651 };
2652
2653 /* File operation for daemon operations */
2654 static const struct file_operations daemon_file_operations = {
2655 .owner = THIS_MODULE,
2656 .unlocked_ioctl = sep_request_daemon_ioctl,
2657 .poll = sep_request_daemon_poll,
2658 .open = sep_request_daemon_open,
2659 .release = sep_request_daemon_release,
2660 .mmap = sep_request_daemon_mmap,
2661 };
2662
2663 /* The files operations structure of the driver */
2664 static const struct file_operations sep_file_operations = {
2665 .owner = THIS_MODULE,
2666 .unlocked_ioctl = sep_ioctl,
2667 .poll = sep_poll,
2668 .open = sep_open,
2669 .release = sep_release,
2670 .mmap = sep_mmap,
2671 };
2672
2673 /**
2674 * sep_register_driver_with_fs - register misc devices
2675 * @sep: pointer to struct sep_device
2676 *
2677 * This function registers the driver with the file system
2678 */
2679 static int sep_register_driver_with_fs(struct sep_device *sep)
2680 {
2681 int ret_val;
2682
2683 sep->miscdev_sep.minor = MISC_DYNAMIC_MINOR;
2684 sep->miscdev_sep.name = SEP_DEV_NAME;
2685 sep->miscdev_sep.fops = &sep_file_operations;
2686
2687 sep->miscdev_singleton.minor = MISC_DYNAMIC_MINOR;
2688 sep->miscdev_singleton.name = SEP_DEV_SINGLETON;
2689 sep->miscdev_singleton.fops = &singleton_file_operations;
2690
2691 sep->miscdev_daemon.minor = MISC_DYNAMIC_MINOR;
2692 sep->miscdev_daemon.name = SEP_DEV_DAEMON;
2693 sep->miscdev_daemon.fops = &daemon_file_operations;
2694
2695 ret_val = misc_register(&sep->miscdev_sep);
2696 if (ret_val) {
2697 dev_warn(&sep->pdev->dev, "misc reg fails for SEP %x\n",
2698 ret_val);
2699 return ret_val;
2700 }
2701
2702 ret_val = misc_register(&sep->miscdev_singleton);
2703 if (ret_val) {
2704 dev_warn(&sep->pdev->dev, "misc reg fails for sing %x\n",
2705 ret_val);
2706 misc_deregister(&sep->miscdev_sep);
2707 return ret_val;
2708 }
2709
2710 ret_val = misc_register(&sep->miscdev_daemon);
2711 if (ret_val) {
2712 dev_warn(&sep->pdev->dev, "misc reg fails for dmn %x\n",
2713 ret_val);
2714 misc_deregister(&sep->miscdev_sep);
2715 misc_deregister(&sep->miscdev_singleton);
2716
2717 return ret_val;
2718 }
2719 return ret_val;
2720 }
2721
2722
2723 /**
2724 * sep_probe - probe a matching PCI device
2725 * @pdev: pci_device
2726 * @end: pci_device_id
2727 *
2728 * Attempt to set up and configure a SEP device that has been
2729 * discovered by the PCI layer.
2730 */
2731 static int __devinit sep_probe(struct pci_dev *pdev,
2732 const struct pci_device_id *ent)
2733 {
2734 int error = 0;
2735 struct sep_device *sep;
2736
2737 if (sep_dev != NULL) {
2738 dev_warn(&pdev->dev, "only one SEP supported.\n");
2739 return -EBUSY;
2740 }
2741
2742 /* Enable the device */
2743 error = pci_enable_device(pdev);
2744 if (error) {
2745 dev_warn(&pdev->dev, "error enabling pci device\n");
2746 goto end_function;
2747 }
2748
2749 /* Allocate the sep_device structure for this device */
2750 sep_dev = kzalloc(sizeof(struct sep_device), GFP_ATOMIC);
2751 if (sep_dev == NULL) {
2752 dev_warn(&pdev->dev,
2753 "can't kmalloc the sep_device structure\n");
2754 error = -ENOMEM;
2755 goto end_function_disable_device;
2756 }
2757
2758 /*
2759 * We're going to use another variable for actually
2760 * working with the device; this way, if we have
2761 * multiple devices in the future, it would be easier
2762 * to make appropriate changes
2763 */
2764 sep = sep_dev;
2765
2766 sep->pdev = pci_dev_get(pdev);
2767
2768 init_waitqueue_head(&sep->event);
2769 init_waitqueue_head(&sep->event_request_daemon);
2770 spin_lock_init(&sep->snd_rply_lck);
2771 mutex_init(&sep->sep_mutex);
2772 mutex_init(&sep->ioctl_mutex);
2773
2774 dev_dbg(&sep->pdev->dev, "sep probe: PCI obtained, device being prepared\n");
2775 dev_dbg(&sep->pdev->dev, "revision is %d\n", sep->pdev->revision);
2776
2777 /* Set up our register area */
2778 sep->reg_physical_addr = pci_resource_start(sep->pdev, 0);
2779 if (!sep->reg_physical_addr) {
2780 dev_warn(&sep->pdev->dev, "Error getting register start\n");
2781 error = -ENODEV;
2782 goto end_function_free_sep_dev;
2783 }
2784
2785 sep->reg_physical_end = pci_resource_end(sep->pdev, 0);
2786 if (!sep->reg_physical_end) {
2787 dev_warn(&sep->pdev->dev, "Error getting register end\n");
2788 error = -ENODEV;
2789 goto end_function_free_sep_dev;
2790 }
2791
2792 sep->reg_addr = ioremap_nocache(sep->reg_physical_addr,
2793 (size_t)(sep->reg_physical_end - sep->reg_physical_addr + 1));
2794 if (!sep->reg_addr) {
2795 dev_warn(&sep->pdev->dev, "Error getting register virtual\n");
2796 error = -ENODEV;
2797 goto end_function_free_sep_dev;
2798 }
2799
2800 dev_dbg(&sep->pdev->dev,
2801 "Register area start %llx end %llx virtual %p\n",
2802 (unsigned long long)sep->reg_physical_addr,
2803 (unsigned long long)sep->reg_physical_end,
2804 sep->reg_addr);
2805
2806 /* Allocate the shared area */
2807 sep->shared_size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2808 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES +
2809 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES +
2810 SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES +
2811 SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2812
2813 if (sep_map_and_alloc_shared_area(sep)) {
2814 error = -ENOMEM;
2815 /* Allocation failed */
2816 goto end_function_error;
2817 }
2818
2819 /* Clear ICR register */
2820 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
2821
2822 /* Set the IMR register - open only GPR 2 */
2823 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
2824
2825 /* Read send/receive counters from SEP */
2826 sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
2827 sep->reply_ct &= 0x3FFFFFFF;
2828 sep->send_ct = sep->reply_ct;
2829
2830 /* Get the interrupt line */
2831 error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED,
2832 "sep_driver", sep);
2833
2834 if (error)
2835 goto end_function_deallocate_sep_shared_area;
2836
2837 /* The new chip requires a shared area reconfigure */
2838 if (sep->pdev->revision == 4) { /* Only for new chip */
2839 error = sep_reconfig_shared_area(sep);
2840 if (error)
2841 goto end_function_free_irq;
2842 }
2843 /* Finally magic up the device nodes */
2844 /* Register driver with the fs */
2845 error = sep_register_driver_with_fs(sep);
2846 if (error == 0)
2847 /* Success */
2848 return 0;
2849
2850 end_function_free_irq:
2851 free_irq(pdev->irq, sep);
2852
2853 end_function_deallocate_sep_shared_area:
2854 /* De-allocate shared area */
2855 sep_unmap_and_free_shared_area(sep);
2856
2857 end_function_error:
2858 iounmap(sep->reg_addr);
2859
2860 end_function_free_sep_dev:
2861 pci_dev_put(sep_dev->pdev);
2862 kfree(sep_dev);
2863 sep_dev = NULL;
2864
2865 end_function_disable_device:
2866 pci_disable_device(pdev);
2867
2868 end_function:
2869 return error;
2870 }
2871
2872 static void sep_remove(struct pci_dev *pdev)
2873 {
2874 struct sep_device *sep = sep_dev;
2875
2876 /* Unregister from fs */
2877 misc_deregister(&sep->miscdev_sep);
2878 misc_deregister(&sep->miscdev_singleton);
2879 misc_deregister(&sep->miscdev_daemon);
2880
2881 /* Free the irq */
2882 free_irq(sep->pdev->irq, sep);
2883
2884 /* Free the shared area */
2885 sep_unmap_and_free_shared_area(sep_dev);
2886 iounmap((void *) sep_dev->reg_addr);
2887 }
2888
2889 static DEFINE_PCI_DEVICE_TABLE(sep_pci_id_tbl) = {
2890 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MFLD_PCI_DEVICE_ID)},
2891 {0}
2892 };
2893
2894 MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
2895
2896 /* Field for registering driver to PCI device */
2897 static struct pci_driver sep_pci_driver = {
2898 .name = "sep_sec_driver",
2899 .id_table = sep_pci_id_tbl,
2900 .probe = sep_probe,
2901 .remove = sep_remove
2902 };
2903
2904
2905 /**
2906 * sep_init - init function
2907 *
2908 * Module load time. Register the PCI device driver.
2909 */
2910 static int __init sep_init(void)
2911 {
2912 return pci_register_driver(&sep_pci_driver);
2913 }
2914
2915
2916 /**
2917 * sep_exit - called to unload driver
2918 *
2919 * Drop the misc devices then remove and unmap the various resources
2920 * that are not released by the driver remove method.
2921 */
2922 static void __exit sep_exit(void)
2923 {
2924 pci_unregister_driver(&sep_pci_driver);
2925 }
2926
2927
2928 module_init(sep_init);
2929 module_exit(sep_exit);
2930
2931 MODULE_LICENSE("GPL");
This page took 0.089189 seconds and 4 git commands to generate.