Staging: sep: tidy firmware load
[deliverable/linux.git] / drivers / staging / sep / sep_driver.c
1 /*
2 *
3 * sep_driver.c - Security Processor Driver main group of functions
4 *
5 * Copyright(c) 2009 Intel Corporation. All rights reserved.
6 * Copyright(c) 2009 Discretix. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 *
22 * CONTACTS:
23 *
24 * Mark Allyn mark.a.allyn@intel.com
25 *
26 * CHANGES:
27 *
28 * 2009.06.26 Initial publish
29 *
30 */
31
32 #include <linux/init.h>
33 #include <linux/module.h>
34 #include <linux/fs.h>
35 #include <linux/cdev.h>
36 #include <linux/kdev_t.h>
37 #include <linux/mutex.h>
38 #include <linux/mm.h>
39 #include <linux/poll.h>
40 #include <linux/wait.h>
41 #include <linux/pci.h>
42 #include <linux/firmware.h>
43 #include <asm/ioctl.h>
44 #include <linux/ioport.h>
45 #include <asm/io.h>
46 #include <linux/interrupt.h>
47 #include <linux/pagemap.h>
48 #include <asm/cacheflush.h>
49 #include "sep_driver_hw_defs.h"
50 #include "sep_driver_config.h"
51 #include "sep_driver_api.h"
52 #include "sep_dev.h"
53
54 #if SEP_DRIVER_ARM_DEBUG_MODE
55
56 #define CRYS_SEP_ROM_length 0x4000
57 #define CRYS_SEP_ROM_start_address 0x8000C000UL
58 #define CRYS_SEP_ROM_start_address_offset 0xC000UL
59 #define SEP_ROM_BANK_register 0x80008420UL
60 #define SEP_ROM_BANK_register_offset 0x8420UL
61 #define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0x82000000
62
63 /*
64 * THESE 2 definitions are specific to the board - must be
65 * defined during integration
66 */
67 #define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0xFF0D0000
68
69 /* 2M size */
70
71 static void sep_load_rom_code(struct sep_device *sep)
72 {
73 /* Index variables */
74 unsigned long i, k, j;
75 u32 reg;
76 u32 error;
77 u32 warning;
78
79 /* Loading ROM from SEP_ROM_image.h file */
80 k = sizeof(CRYS_SEP_ROM);
81
82 edbg("SEP Driver: DX_CC_TST_SepRomLoader start\n");
83
84 edbg("SEP Driver: k is %lu\n", k);
85 edbg("SEP Driver: sep->reg_addr is %p\n", sep->reg_addr);
86 edbg("SEP Driver: CRYS_SEP_ROM_start_address_offset is %p\n", CRYS_SEP_ROM_start_address_offset);
87
88 for (i = 0; i < 4; i++) {
89 /* write bank */
90 sep_write_reg(sep, SEP_ROM_BANK_register_offset, i);
91
92 for (j = 0; j < CRYS_SEP_ROM_length / 4; j++) {
93 sep_write_reg(sep, CRYS_SEP_ROM_start_address_offset + 4 * j, CRYS_SEP_ROM[i * 0x1000 + j]);
94
95 k = k - 4;
96
97 if (k == 0) {
98 j = CRYS_SEP_ROM_length;
99 i = 4;
100 }
101 }
102 }
103
104 /* reset the SEP */
105 sep_write_reg(sep, HW_HOST_SEP_SW_RST_REG_ADDR, 0x1);
106
107 /* poll for SEP ROM boot finish */
108 do
109 reg = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
110 while (!reg);
111
112 edbg("SEP Driver: ROM polling ended\n");
113
114 switch (reg) {
115 case 0x1:
116 /* fatal error - read erro status from GPRO */
117 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
118 edbg("SEP Driver: ROM polling case 1\n");
119 break;
120 case 0x4:
121 /* Cold boot ended successfully */
122 case 0x8:
123 /* Warmboot ended successfully */
124 case 0x10:
125 /* ColdWarm boot ended successfully */
126 error = 0;
127 case 0x2:
128 /* Boot First Phase ended */
129 warning = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
130 case 0x20:
131 edbg("SEP Driver: ROM polling case %d\n", reg);
132 break;
133 }
134
135 }
136
137 #else
138 static void sep_load_rom_code(struct sep_device *sep) { }
139 #endif /* SEP_DRIVER_ARM_DEBUG_MODE */
140
141
142
143 /*----------------------------------------
144 DEFINES
145 -----------------------------------------*/
146
147 #define BASE_ADDRESS_FOR_SYSTEM 0xfffc0000
148 #define SEP_RAR_IO_MEM_REGION_SIZE 0x40000
149
150 /*--------------------------------------------
151 GLOBAL variables
152 --------------------------------------------*/
153
154 /* debug messages level */
155 static int debug;
156 module_param(debug, int , 0);
157 MODULE_PARM_DESC(debug, "Flag to enable SEP debug messages");
158
159 /* Keep this a single static object for now to keep the conversion easy */
160
161 static struct sep_device sep_instance;
162 static struct sep_device *sep_dev = &sep_instance;
163
164 /*
165 mutex for the access to the internals of the sep driver
166 */
167 static DEFINE_MUTEX(sep_mutex);
168
169
170 /* wait queue head (event) of the driver */
171 static DECLARE_WAIT_QUEUE_HEAD(sep_event);
172
173 /**
174 * sep_load_firmware - copy firmware cache/resident
175 * @sep: device we are loading
176 *
177 * This functions copies the cache and resident from their source
178 * location into destination shared memory.
179 */
180
181 static int sep_load_firmware(struct sep_device *sep)
182 {
183 const struct firmware *fw;
184 char *cache_name = "cache.image.bin";
185 char *res_name = "resident.image.bin";
186 int error;
187
188 edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
189 edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus);
190
191 sep->rar_region_addr = sep->rar_addr;
192 sep->cache_bus = sep->rar_bus;
193 sep->cache_addr = sep->rar_addr;
194
195 /* load cache */
196 error = request_firmware(&fw, cache_name, &sep->pdev->dev);
197 if (error) {
198 edbg("SEP Driver:cant request cache fw\n");
199 return error;
200 }
201 edbg("SEP Driver:cache %08Zx@%p\n", fw->size, (void *) fw->data);
202
203 memcpy(sep->cache_addr, (void *)fw->data, fw->size);
204 sep->cache_size = fw->size;
205 release_firmware(fw);
206
207 sep->resident_bus = sep->cache_bus + sep->cache_size;
208 sep->resident_addr = sep->cache_addr + sep->cache_size;
209
210 /* load resident */
211 error = request_firmware(&fw, res_name, &sep->pdev->dev);
212 if (error) {
213 edbg("SEP Driver:cant request res fw\n");
214 return error;
215 }
216 edbg("sep: res %08Zx@%p\n", fw->size, (void *)fw->data);
217
218 memcpy(sep->resident_addr, (void *) fw->data, fw->size);
219 sep->resident_size = fw->size;
220 release_firmware(fw);
221
222 edbg("sep: resident v %p b %08llx cache v %p b %08llx\n",
223 sep->resident_addr, (unsigned long long)sep->resident_bus,
224 sep->cache_addr, (unsigned long long)sep->cache_bus);
225 return 0;
226 }
227
228 /**
229 * sep_map_and_alloc_shared_area - allocate shared block
230 * @sep: security processor
231 * @size: size of shared area
232 *
233 * Allocate a shared buffer in host memory that can be used by both the
234 * kernel and also the hardware interface via DMA.
235 */
236
237 static int sep_map_and_alloc_shared_area(struct sep_device *sep,
238 unsigned long size)
239 {
240 /* shared_addr = ioremap_nocache(0xda00000,shared_area_size); */
241 sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev, size,
242 &sep->shared_bus, GFP_KERNEL);
243
244 if (!sep->shared_addr) {
245 edbg("sep_driver :shared memory dma_alloc_coherent failed\n");
246 return -ENOMEM;
247 }
248 sep->shared_area = sep->shared_addr;
249 /* set the bus address of the shared area */
250 sep->shared_area_bus = sep->shared_bus;
251 edbg("sep: shared_area %ld bytes @%p (bus %08llx)\n",
252 size, sep->shared_addr, (unsigned long long)sep->shared_bus);
253 return 0;
254 }
255
256 /**
257 * sep_unmap_and_free_shared_area - free shared block
258 * @sep: security processor
259 *
260 * Free the shared area allocated to the security processor. The
261 * processor must have finished with this and any final posted
262 * writes cleared before we do so.
263 */
264 static void sep_unmap_and_free_shared_area(struct sep_device *sep, int size)
265 {
266 dma_free_coherent(&sep->pdev->dev, size,
267 sep->shared_area, sep->shared_area_bus);
268 }
269
270 /**
271 * sep_shared_area_virt_to_bus - convert bus/virt addresses
272 *
273 * Returns the bus address inside the shared area according
274 * to the virtual address.
275 */
276
277 static dma_addr_t sep_shared_area_virt_to_bus(struct sep_device *sep,
278 void *virt_address)
279 {
280 dma_addr_t pa = sep->shared_bus + (virt_address - sep->shared_addr);
281 edbg("sep: virt to bus b %08llx v %p\n", pa, virt_address);
282 return pa;
283 }
284
285 /**
286 * sep_shared_area_bus_to_virt - convert bus/virt addresses
287 *
288 * Returns virtual address inside the shared area according
289 * to the bus address.
290 */
291
292 static void *sep_shared_area_bus_to_virt(struct sep_device *sep,
293 dma_addr_t bus_address)
294 {
295 return sep->shared_addr + (bus_address - sep->shared_bus);
296 }
297
298
299 /*----------------------------------------------------------------------
300 open function of the character driver - must only lock the mutex
301 must also release the memory data pool allocations
302 ------------------------------------------------------------------------*/
303 static int sep_open(struct inode *inode, struct file *filp)
304 {
305 int error = 0;
306
307 dbg("SEP Driver:--------> open start\n");
308
309 /* check the blocking mode */
310 if (filp->f_flags & O_NDELAY)
311 error = mutex_trylock(&sep_mutex);
312 else
313 /* lock mutex */
314 mutex_lock(&sep_mutex);
315
316 /* check the error */
317 if (error) {
318 edbg("SEP Driver: down_interruptible failed\n");
319 goto end_function;
320 }
321 /* Bind to the device, we only have one which makes it easy */
322 filp->private_data = sep_dev;
323 if (sep_dev == NULL)
324 return -ENODEV;
325
326 /* release data pool allocations */
327 sep_dev->data_pool_bytes_allocated = 0;
328
329
330 end_function:
331 dbg("SEP Driver:<-------- open end\n");
332 return error;
333 }
334
335
336
337
338 /*------------------------------------------------------------
339 release function
340 -------------------------------------------------------------*/
341 static int sep_release(struct inode *inode_ptr, struct file *filp)
342 {
343 struct sep_driver *sep = filp->private_data;
344 dbg("----------->SEP Driver: sep_release start\n");
345
346 #if 0 /*!SEP_DRIVER_POLLING_MODE */
347 /* close IMR */
348 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
349 /* release IRQ line */
350 free_irq(SEP_DIRVER_IRQ_NUM, sep);
351
352 #endif
353 /* unlock the sep mutex */
354 mutex_unlock(&sep_mutex);
355 dbg("SEP Driver:<-------- sep_release end\n");
356 return 0;
357 }
358
359
360
361
362 /*---------------------------------------------------------------
363 map function - this functions maps the message shared area
364 -----------------------------------------------------------------*/
365 static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
366 {
367 dma_addr_t bus_addr;
368 struct sep_device *sep = filp->private_data;
369
370 dbg("-------->SEP Driver: mmap start\n");
371
372 /* check that the size of the mapped range is as the size of the message
373 shared area */
374 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
375 edbg("SEP Driver mmap requested size is more than allowed\n");
376 printk(KERN_WARNING "SEP Driver mmap requested size is more \
377 than allowed\n");
378 printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_end);
379 printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_start);
380 return -EAGAIN;
381 }
382
383 edbg("SEP Driver:sep->message_shared_area_addr is %p\n", sep->message_shared_area_addr);
384
385 /* get bus address */
386 bus_addr = sep->shared_area_bus;
387
388 edbg("SEP Driver: phys_addr is %08llx\n", (unsigned long long)bus_addr);
389
390 if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
391 edbg("SEP Driver remap_page_range failed\n");
392 printk(KERN_WARNING "SEP Driver remap_page_range failed\n");
393 return -EAGAIN;
394 }
395
396 dbg("SEP Driver:<-------- mmap end\n");
397
398 return 0;
399 }
400
401
402 /*-----------------------------------------------
403 poll function
404 *----------------------------------------------*/
405 static unsigned int sep_poll(struct file *filp, poll_table * wait)
406 {
407 unsigned long count;
408 unsigned int mask = 0;
409 unsigned long retval = 0; /* flow id */
410 struct sep_device *sep = filp->private_data;
411
412 dbg("---------->SEP Driver poll: start\n");
413
414
415 #if SEP_DRIVER_POLLING_MODE
416
417 while (sep->send_ct != (retval & 0x7FFFFFFF)) {
418 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
419
420 for (count = 0; count < 10 * 4; count += 4)
421 edbg("Poll Debug Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_area + SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES + count)));
422 }
423
424 sep->reply_ct++;
425 #else
426 /* add the event to the polling wait table */
427 poll_wait(filp, &sep_event, wait);
428
429 #endif
430
431 edbg("sep->send_ct is %lu\n", sep->send_ct);
432 edbg("sep->reply_ct is %lu\n", sep->reply_ct);
433
434 /* check if the data is ready */
435 if (sep->send_ct == sep->reply_ct) {
436 for (count = 0; count < 12 * 4; count += 4)
437 edbg("Sep Mesg Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_area + count)));
438
439 for (count = 0; count < 10 * 4; count += 4)
440 edbg("Debug Data Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_area + 0x1800 + count)));
441
442 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
443 edbg("retval is %lu\n", retval);
444 /* check if the this is sep reply or request */
445 if (retval >> 31) {
446 edbg("SEP Driver: sep request in\n");
447 /* request */
448 mask |= POLLOUT | POLLWRNORM;
449 } else {
450 edbg("SEP Driver: sep reply in\n");
451 mask |= POLLIN | POLLRDNORM;
452 }
453 }
454 dbg("SEP Driver:<-------- poll exit\n");
455 return mask;
456 }
457
458 /*
459 calculates time and sets it at the predefined address
460 */
461 static int sep_set_time(struct sep_device *sep, unsigned long *address_ptr, unsigned long *time_in_sec_ptr)
462 {
463 struct timeval time;
464 /* address of time in the kernel */
465 u32 *time_addr;
466
467
468 dbg("SEP Driver:--------> sep_set_time start\n");
469
470 do_gettimeofday(&time);
471
472 /* set value in the SYSTEM MEMORY offset */
473 time_addr = sep->message_shared_area_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
474
475 time_addr[0] = SEP_TIME_VAL_TOKEN;
476 time_addr[1] = time.tv_sec;
477
478 edbg("SEP Driver:time.tv_sec is %lu\n", time.tv_sec);
479 edbg("SEP Driver:time_addr is %p\n", time_addr);
480 edbg("SEP Driver:sep->message_shared_area_addr is %p\n", sep->message_shared_area_addr);
481
482 /* set the output parameters if needed */
483 if (address_ptr)
484 *address_ptr = sep_shared_area_virt_to_bus(sep, time_addr);
485
486 if (time_in_sec_ptr)
487 *time_in_sec_ptr = time.tv_sec;
488
489 dbg("SEP Driver:<-------- sep_set_time end\n");
490
491 return 0;
492 }
493
494 /*
495 This function raises interrupt to SEP that signals that is has a new
496 command from HOST
497 */
498 static void sep_send_command_handler(struct sep_device *sep)
499 {
500 unsigned long count;
501
502 dbg("SEP Driver:--------> sep_send_command_handler start\n");
503 sep_set_time(sep, 0, 0);
504
505 /* flash cache */
506 flush_cache_all();
507
508 for (count = 0; count < 12 * 4; count += 4)
509 edbg("Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_area + count)));
510
511 /* update counter */
512 sep->send_ct++;
513 /* send interrupt to SEP */
514 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
515 dbg("SEP Driver:<-------- sep_send_command_handler end\n");
516 return;
517 }
518
519 /*
520 This function raises interrupt to SEPm that signals that is has a
521 new command from HOST
522 */
523 static void sep_send_reply_command_handler(struct sep_device *sep)
524 {
525 unsigned long count;
526
527 dbg("SEP Driver:--------> sep_send_reply_command_handler start\n");
528
529 /* flash cache */
530 flush_cache_all();
531 for (count = 0; count < 12 * 4; count += 4)
532 edbg("Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_area + count)));
533 /* update counter */
534 sep->send_ct++;
535 /* send the interrupt to SEP */
536 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep->send_ct);
537 /* update both counters */
538 sep->send_ct++;
539 sep->reply_ct++;
540 dbg("SEP Driver:<-------- sep_send_reply_command_handler end\n");
541 }
542
543 /*
544 This function handles the allocate data pool memory request
545 This function returns calculates the bus address of the
546 allocated memory, and the offset of this area from the mapped address.
547 Therefore, the FVOs in user space can calculate the exact virtual
548 address of this allocated memory
549 */
550 static int sep_allocate_data_pool_memory_handler(struct sep_device *sep,
551 unsigned long arg)
552 {
553 int error;
554 struct sep_driver_alloc_t command_args;
555
556 dbg("SEP Driver:--------> sep_allocate_data_pool_memory_handler start\n");
557
558 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_alloc_t));
559 if (error)
560 goto end_function;
561
562 /* allocate memory */
563 if ((sep->data_pool_bytes_allocated + command_args.num_bytes) > SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
564 error = -ENOMEM;
565 goto end_function;
566 }
567
568 /* set the virtual and bus address */
569 command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
570 command_args.phys_address = sep->shared_area_bus + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
571
572 /* write the memory back to the user space */
573 error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_alloc_t));
574 if (error)
575 goto end_function;
576
577 /* set the allocation */
578 sep->data_pool_bytes_allocated += command_args.num_bytes;
579
580 end_function:
581 dbg("SEP Driver:<-------- sep_allocate_data_pool_memory_handler end\n");
582 return error;
583 }
584
585 /*
586 This function handles write into allocated data pool command
587 */
588 static int sep_write_into_data_pool_handler(struct sep_device *sep, unsigned long arg)
589 {
590 int error;
591 void *virt_address;
592 unsigned long va;
593 unsigned long app_in_address;
594 unsigned long num_bytes;
595 void *data_pool_area_addr;
596
597 dbg("SEP Driver:--------> sep_write_into_data_pool_handler start\n");
598
599 /* get the application address */
600 error = get_user(app_in_address, &(((struct sep_driver_write_t *) arg)->app_address));
601 if (error)
602 goto end_function;
603
604 /* get the virtual kernel address address */
605 error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
606 if (error)
607 goto end_function;
608 virt_address = (void *)va;
609
610 /* get the number of bytes */
611 error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
612 if (error)
613 goto end_function;
614
615 /* calculate the start of the data pool */
616 data_pool_area_addr = sep->shared_area + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
617
618
619 /* check that the range of the virtual kernel address is correct */
620 if (virt_address < data_pool_area_addr || virt_address > (data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES)) {
621 error = -EINVAL;
622 goto end_function;
623 }
624 /* copy the application data */
625 error = copy_from_user(virt_address, (void *) app_in_address, num_bytes);
626 end_function:
627 dbg("SEP Driver:<-------- sep_write_into_data_pool_handler end\n");
628 return error;
629 }
630
631 /*
632 this function handles the read from data pool command
633 */
634 static int sep_read_from_data_pool_handler(struct sep_device *sep, unsigned long arg)
635 {
636 int error;
637 /* virtual address of dest application buffer */
638 unsigned long app_out_address;
639 /* virtual address of the data pool */
640 unsigned long va;
641 void *virt_address;
642 unsigned long num_bytes;
643 void *data_pool_area_addr;
644
645 dbg("SEP Driver:--------> sep_read_from_data_pool_handler start\n");
646
647 /* get the application address */
648 error = get_user(app_out_address, &(((struct sep_driver_write_t *) arg)->app_address));
649 if (error)
650 goto end_function;
651
652 /* get the virtual kernel address address */
653 error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
654 if (error)
655 goto end_function;
656 virt_address = (void *)va;
657
658 /* get the number of bytes */
659 error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
660 if (error)
661 goto end_function;
662
663 /* calculate the start of the data pool */
664 data_pool_area_addr = sep->shared_area + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
665
666 /* FIXME: These are incomplete all over the driver: what about + len
667 and when doing that also overflows */
668 /* check that the range of the virtual kernel address is correct */
669 if (virt_address < data_pool_area_addr || virt_address > data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
670 error = -EINVAL;
671 goto end_function;
672 }
673
674 /* copy the application data */
675 error = copy_to_user((void *) app_out_address, virt_address, num_bytes);
676 end_function:
677 dbg("SEP Driver:<-------- sep_read_from_data_pool_handler end\n");
678 return error;
679 }
680
681 /*
682 This function releases all the application virtual buffer physical pages,
683 that were previously locked
684 */
685 static int sep_free_dma_pages(struct page **page_array_ptr, unsigned long num_pages, unsigned long dirtyFlag)
686 {
687 unsigned long count;
688
689 if (dirtyFlag) {
690 for (count = 0; count < num_pages; count++) {
691 /* the out array was written, therefore the data was changed */
692 if (!PageReserved(page_array_ptr[count]))
693 SetPageDirty(page_array_ptr[count]);
694 page_cache_release(page_array_ptr[count]);
695 }
696 } else {
697 /* free in pages - the data was only read, therefore no update was done
698 on those pages */
699 for (count = 0; count < num_pages; count++)
700 page_cache_release(page_array_ptr[count]);
701 }
702
703 if (page_array_ptr)
704 /* free the array */
705 kfree(page_array_ptr);
706
707 return 0;
708 }
709
710 /*
711 This function locks all the physical pages of the kernel virtual buffer
712 and construct a basic lli array, where each entry holds the physical
713 page address and the size that application data holds in this physical pages
714 */
715 static int sep_lock_kernel_pages(struct sep_device *sep,
716 unsigned long kernel_virt_addr,
717 unsigned long data_size,
718 unsigned long *num_pages_ptr,
719 struct sep_lli_entry_t **lli_array_ptr,
720 struct page ***page_array_ptr)
721 {
722 int error = 0;
723 /* the the page of the end address of the user space buffer */
724 unsigned long end_page;
725 /* the page of the start address of the user space buffer */
726 unsigned long start_page;
727 /* the range in pages */
728 unsigned long num_pages;
729 struct sep_lli_entry_t *lli_array;
730 /* next kernel address to map */
731 unsigned long next_kernel_address;
732 unsigned long count;
733
734 dbg("SEP Driver:--------> sep_lock_kernel_pages start\n");
735
736 /* set start and end pages and num pages */
737 end_page = (kernel_virt_addr + data_size - 1) >> PAGE_SHIFT;
738 start_page = kernel_virt_addr >> PAGE_SHIFT;
739 num_pages = end_page - start_page + 1;
740
741 edbg("SEP Driver: kernel_virt_addr is %08lx\n", kernel_virt_addr);
742 edbg("SEP Driver: data_size is %lu\n", data_size);
743 edbg("SEP Driver: start_page is %lx\n", start_page);
744 edbg("SEP Driver: end_page is %lx\n", end_page);
745 edbg("SEP Driver: num_pages is %lu\n", num_pages);
746
747 lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
748 if (!lli_array) {
749 edbg("SEP Driver: kmalloc for lli_array failed\n");
750 error = -ENOMEM;
751 goto end_function;
752 }
753
754 /* set the start address of the first page - app data may start not at
755 the beginning of the page */
756 lli_array[0].physical_address = (unsigned long) virt_to_phys((unsigned long *) kernel_virt_addr);
757
758 /* check that not all the data is in the first page only */
759 if ((PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK))) >= data_size)
760 lli_array[0].block_size = data_size;
761 else
762 lli_array[0].block_size = PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK));
763
764 /* debug print */
765 dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
766
767 /* advance the address to the start of the next page */
768 next_kernel_address = (kernel_virt_addr & PAGE_MASK) + PAGE_SIZE;
769
770 /* go from the second page to the prev before last */
771 for (count = 1; count < (num_pages - 1); count++) {
772 lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
773 lli_array[count].block_size = PAGE_SIZE;
774
775 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
776 next_kernel_address += PAGE_SIZE;
777 }
778
779 /* if more then 1 pages locked - then update for the last page size needed */
780 if (num_pages > 1) {
781 /* update the address of the last page */
782 lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
783
784 /* set the size of the last page */
785 lli_array[count].block_size = (kernel_virt_addr + data_size) & (~PAGE_MASK);
786
787 if (lli_array[count].block_size == 0) {
788 dbg("app_virt_addr is %08lx\n", kernel_virt_addr);
789 dbg("data_size is %lu\n", data_size);
790 while (1);
791 }
792
793 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
794 }
795 /* set output params */
796 *lli_array_ptr = lli_array;
797 *num_pages_ptr = num_pages;
798 *page_array_ptr = 0;
799 end_function:
800 dbg("SEP Driver:<-------- sep_lock_kernel_pages end\n");
801 return 0;
802 }
803
804 /*
805 This function locks all the physical pages of the application virtual buffer
806 and construct a basic lli array, where each entry holds the physical page
807 address and the size that application data holds in this physical pages
808 */
809 static int sep_lock_user_pages(struct sep_device *sep,
810 unsigned long app_virt_addr,
811 unsigned long data_size,
812 unsigned long *num_pages_ptr,
813 struct sep_lli_entry_t **lli_array_ptr,
814 struct page ***page_array_ptr)
815 {
816 int error = 0;
817 /* the the page of the end address of the user space buffer */
818 unsigned long end_page;
819 /* the page of the start address of the user space buffer */
820 unsigned long start_page;
821 /* the range in pages */
822 unsigned long num_pages;
823 struct page **page_array;
824 struct sep_lli_entry_t *lli_array;
825 unsigned long count;
826 int result;
827
828 dbg("SEP Driver:--------> sep_lock_user_pages start\n");
829
830 /* set start and end pages and num pages */
831 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
832 start_page = app_virt_addr >> PAGE_SHIFT;
833 num_pages = end_page - start_page + 1;
834
835 edbg("SEP Driver: app_virt_addr is %08lx\n", app_virt_addr);
836 edbg("SEP Driver: data_size is %lu\n", data_size);
837 edbg("SEP Driver: start_page is %lu\n", start_page);
838 edbg("SEP Driver: end_page is %lu\n", end_page);
839 edbg("SEP Driver: num_pages is %lu\n", num_pages);
840
841 /* allocate array of pages structure pointers */
842 page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
843 if (!page_array) {
844 edbg("SEP Driver: kmalloc for page_array failed\n");
845
846 error = -ENOMEM;
847 goto end_function;
848 }
849
850 lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
851 if (!lli_array) {
852 edbg("SEP Driver: kmalloc for lli_array failed\n");
853
854 error = -ENOMEM;
855 goto end_function_with_error1;
856 }
857
858 /* convert the application virtual address into a set of physical */
859 down_read(&current->mm->mmap_sem);
860 result = get_user_pages(current, current->mm, app_virt_addr, num_pages, 1, 0, page_array, 0);
861 up_read(&current->mm->mmap_sem);
862
863 /* check the number of pages locked - if not all then exit with error */
864 if (result != num_pages) {
865 dbg("SEP Driver: not all pages locked by get_user_pages\n");
866
867 error = -ENOMEM;
868 goto end_function_with_error2;
869 }
870
871 /* flush the cache */
872 for (count = 0; count < num_pages; count++)
873 flush_dcache_page(page_array[count]);
874
875 /* set the start address of the first page - app data may start not at
876 the beginning of the page */
877 lli_array[0].physical_address = ((unsigned long) page_to_phys(page_array[0])) + (app_virt_addr & (~PAGE_MASK));
878
879 /* check that not all the data is in the first page only */
880 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
881 lli_array[0].block_size = data_size;
882 else
883 lli_array[0].block_size = PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
884
885 /* debug print */
886 dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
887
888 /* go from the second page to the prev before last */
889 for (count = 1; count < (num_pages - 1); count++) {
890 lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
891 lli_array[count].block_size = PAGE_SIZE;
892
893 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
894 }
895
896 /* if more then 1 pages locked - then update for the last page size needed */
897 if (num_pages > 1) {
898 /* update the address of the last page */
899 lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
900
901 /* set the size of the last page */
902 lli_array[count].block_size = (app_virt_addr + data_size) & (~PAGE_MASK);
903
904 if (lli_array[count].block_size == 0) {
905 dbg("app_virt_addr is %08lx\n", app_virt_addr);
906 dbg("data_size is %lu\n", data_size);
907 while (1);
908 }
909 edbg("lli_array[%lu].physical_address is %08lx, \
910 lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
911 }
912
913 /* set output params */
914 *lli_array_ptr = lli_array;
915 *num_pages_ptr = num_pages;
916 *page_array_ptr = page_array;
917 goto end_function;
918
919 end_function_with_error2:
920 /* release the cache */
921 for (count = 0; count < num_pages; count++)
922 page_cache_release(page_array[count]);
923 kfree(lli_array);
924 end_function_with_error1:
925 kfree(page_array);
926 end_function:
927 dbg("SEP Driver:<-------- sep_lock_user_pages end\n");
928 return 0;
929 }
930
931
932 /*
933 this function calculates the size of data that can be inserted into the lli
934 table from this array the condition is that either the table is full
935 (all etnries are entered), or there are no more entries in the lli array
936 */
937 static unsigned long sep_calculate_lli_table_max_size(struct sep_lli_entry_t *lli_in_array_ptr, unsigned long num_array_entries)
938 {
939 unsigned long table_data_size = 0;
940 unsigned long counter;
941
942 /* calculate the data in the out lli table if till we fill the whole
943 table or till the data has ended */
944 for (counter = 0; (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) && (counter < num_array_entries); counter++)
945 table_data_size += lli_in_array_ptr[counter].block_size;
946 return table_data_size;
947 }
948
949 /*
950 this functions builds ont lli table from the lli_array according to
951 the given size of data
952 */
953 static void sep_build_lli_table(struct sep_lli_entry_t *lli_array_ptr, struct sep_lli_entry_t *lli_table_ptr, unsigned long *num_processed_entries_ptr, unsigned long *num_table_entries_ptr, unsigned long table_data_size)
954 {
955 unsigned long curr_table_data_size;
956 /* counter of lli array entry */
957 unsigned long array_counter;
958
959 dbg("SEP Driver:--------> sep_build_lli_table start\n");
960
961 /* init currrent table data size and lli array entry counter */
962 curr_table_data_size = 0;
963 array_counter = 0;
964 *num_table_entries_ptr = 1;
965
966 edbg("SEP Driver:table_data_size is %lu\n", table_data_size);
967
968 /* fill the table till table size reaches the needed amount */
969 while (curr_table_data_size < table_data_size) {
970 /* update the number of entries in table */
971 (*num_table_entries_ptr)++;
972
973 lli_table_ptr->physical_address = lli_array_ptr[array_counter].physical_address;
974 lli_table_ptr->block_size = lli_array_ptr[array_counter].block_size;
975 curr_table_data_size += lli_table_ptr->block_size;
976
977 edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
978 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
979 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
980
981 /* check for overflow of the table data */
982 if (curr_table_data_size > table_data_size) {
983 edbg("SEP Driver:curr_table_data_size > table_data_size\n");
984
985 /* update the size of block in the table */
986 lli_table_ptr->block_size -= (curr_table_data_size - table_data_size);
987
988 /* update the physical address in the lli array */
989 lli_array_ptr[array_counter].physical_address += lli_table_ptr->block_size;
990
991 /* update the block size left in the lli array */
992 lli_array_ptr[array_counter].block_size = (curr_table_data_size - table_data_size);
993 } else
994 /* advance to the next entry in the lli_array */
995 array_counter++;
996
997 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
998 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
999
1000 /* move to the next entry in table */
1001 lli_table_ptr++;
1002 }
1003
1004 /* set the info entry to default */
1005 lli_table_ptr->physical_address = 0xffffffff;
1006 lli_table_ptr->block_size = 0;
1007
1008 edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
1009 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1010 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1011
1012 /* set the output parameter */
1013 *num_processed_entries_ptr += array_counter;
1014
1015 edbg("SEP Driver:*num_processed_entries_ptr is %lu\n", *num_processed_entries_ptr);
1016 dbg("SEP Driver:<-------- sep_build_lli_table end\n");
1017 return;
1018 }
1019
1020 /*
1021 this function goes over the list of the print created tables and
1022 prints all the data
1023 */
1024 static void sep_debug_print_lli_tables(struct sep_device *sep, struct sep_lli_entry_t *lli_table_ptr, unsigned long num_table_entries, unsigned long table_data_size)
1025 {
1026 unsigned long table_count;
1027 unsigned long entries_count;
1028
1029 dbg("SEP Driver:--------> sep_debug_print_lli_tables start\n");
1030
1031 table_count = 1;
1032 while ((unsigned long) lli_table_ptr != 0xffffffff) {
1033 edbg("SEP Driver: lli table %08lx, table_data_size is %lu\n", table_count, table_data_size);
1034 edbg("SEP Driver: num_table_entries is %lu\n", num_table_entries);
1035
1036 /* print entries of the table (without info entry) */
1037 for (entries_count = 0; entries_count < num_table_entries; entries_count++, lli_table_ptr++) {
1038 edbg("SEP Driver:lli_table_ptr address is %08lx\n", (unsigned long) lli_table_ptr);
1039 edbg("SEP Driver:phys address is %08lx block size is %lu\n", lli_table_ptr->physical_address, lli_table_ptr->block_size);
1040 }
1041
1042 /* point to the info entry */
1043 lli_table_ptr--;
1044
1045 edbg("SEP Driver:phys lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1046 edbg("SEP Driver:phys lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1047
1048
1049 table_data_size = lli_table_ptr->block_size & 0xffffff;
1050 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1051 lli_table_ptr = (struct sep_lli_entry_t *)
1052 (lli_table_ptr->physical_address);
1053
1054 edbg("SEP Driver:phys table_data_size is %lu num_table_entries is %lu lli_table_ptr is%lu\n", table_data_size, num_table_entries, (unsigned long) lli_table_ptr);
1055
1056 if ((unsigned long) lli_table_ptr != 0xffffffff)
1057 lli_table_ptr = (struct sep_lli_entry_t *) sep_shared_area_bus_to_virt(sep, (unsigned long) lli_table_ptr);
1058
1059 table_count++;
1060 }
1061 dbg("SEP Driver:<-------- sep_debug_print_lli_tables end\n");
1062 }
1063
1064
1065 /*
1066 This function prepares only input DMA table for synhronic symmetric
1067 operations (HASH)
1068 */
1069 static int sep_prepare_input_dma_table(struct sep_device *sep,
1070 unsigned long app_virt_addr,
1071 unsigned long data_size,
1072 unsigned long block_size,
1073 unsigned long *lli_table_ptr,
1074 unsigned long *num_entries_ptr,
1075 unsigned long *table_data_size_ptr,
1076 bool isKernelVirtualAddress)
1077 {
1078 /* pointer to the info entry of the table - the last entry */
1079 struct sep_lli_entry_t *info_entry_ptr;
1080 /* array of pointers ot page */
1081 struct sep_lli_entry_t *lli_array_ptr;
1082 /* points to the first entry to be processed in the lli_in_array */
1083 unsigned long current_entry;
1084 /* num entries in the virtual buffer */
1085 unsigned long sep_lli_entries;
1086 /* lli table pointer */
1087 struct sep_lli_entry_t *in_lli_table_ptr;
1088 /* the total data in one table */
1089 unsigned long table_data_size;
1090 /* number of entries in lli table */
1091 unsigned long num_entries_in_table;
1092 /* next table address */
1093 void *lli_table_alloc_addr;
1094 unsigned long result;
1095
1096 dbg("SEP Driver:--------> sep_prepare_input_dma_table start\n");
1097
1098 edbg("SEP Driver:data_size is %lu\n", data_size);
1099 edbg("SEP Driver:block_size is %lu\n", block_size);
1100
1101 /* initialize the pages pointers */
1102 sep->in_page_array = 0;
1103 sep->in_num_pages = 0;
1104
1105 if (data_size == 0) {
1106 /* special case - created 2 entries table with zero data */
1107 in_lli_table_ptr = (struct sep_lli_entry_t *) (sep->shared_area + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES);
1108 /* FIXME: Should the entry below not be for _bus */
1109 in_lli_table_ptr->physical_address = (unsigned long)sep->shared_area + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1110 in_lli_table_ptr->block_size = 0;
1111
1112 in_lli_table_ptr++;
1113 in_lli_table_ptr->physical_address = 0xFFFFFFFF;
1114 in_lli_table_ptr->block_size = 0;
1115
1116 *lli_table_ptr = sep->shared_area_bus + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1117 *num_entries_ptr = 2;
1118 *table_data_size_ptr = 0;
1119
1120 goto end_function;
1121 }
1122
1123 /* check if the pages are in Kernel Virtual Address layout */
1124 if (isKernelVirtualAddress == true)
1125 /* lock the pages of the kernel buffer and translate them to pages */
1126 result = sep_lock_kernel_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
1127 else
1128 /* lock the pages of the user buffer and translate them to pages */
1129 result = sep_lock_user_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
1130
1131 if (result)
1132 return result;
1133
1134 edbg("SEP Driver:output sep->in_num_pages is %lu\n", sep->in_num_pages);
1135
1136 current_entry = 0;
1137 info_entry_ptr = 0;
1138 sep_lli_entries = sep->in_num_pages;
1139
1140 /* initiate to point after the message area */
1141 lli_table_alloc_addr = sep->shared_area + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1142
1143 /* loop till all the entries in in array are not processed */
1144 while (current_entry < sep_lli_entries) {
1145 /* set the new input and output tables */
1146 in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1147
1148 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1149
1150 /* calculate the maximum size of data for input table */
1151 table_data_size = sep_calculate_lli_table_max_size(&lli_array_ptr[current_entry], (sep_lli_entries - current_entry));
1152
1153 /* now calculate the table size so that it will be module block size */
1154 table_data_size = (table_data_size / block_size) * block_size;
1155
1156 edbg("SEP Driver:output table_data_size is %lu\n", table_data_size);
1157
1158 /* construct input lli table */
1159 sep_build_lli_table(&lli_array_ptr[current_entry], in_lli_table_ptr, &current_entry, &num_entries_in_table, table_data_size);
1160
1161 if (info_entry_ptr == 0) {
1162 /* set the output parameters to physical addresses */
1163 *lli_table_ptr = sep_shared_area_virt_to_bus(sep, in_lli_table_ptr);
1164 *num_entries_ptr = num_entries_in_table;
1165 *table_data_size_ptr = table_data_size;
1166
1167 edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_ptr);
1168 } else {
1169 /* update the info entry of the previous in table */
1170 info_entry_ptr->physical_address = sep_shared_area_virt_to_bus(sep, in_lli_table_ptr);
1171 info_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
1172 }
1173
1174 /* save the pointer to the info entry of the current tables */
1175 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1176 }
1177
1178 /* print input tables */
1179 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1180 sep_shared_area_bus_to_virt(sep, *lli_table_ptr), *num_entries_ptr, *table_data_size_ptr);
1181
1182 /* the array of the pages */
1183 kfree(lli_array_ptr);
1184 end_function:
1185 dbg("SEP Driver:<-------- sep_prepare_input_dma_table end\n");
1186 return 0;
1187
1188 }
1189
1190 /*
1191 This function creates the input and output dma tables for
1192 symmetric operations (AES/DES) according to the block size from LLI arays
1193 */
1194 static int sep_construct_dma_tables_from_lli(struct sep_device *sep,
1195 struct sep_lli_entry_t *lli_in_array,
1196 unsigned long sep_in_lli_entries,
1197 struct sep_lli_entry_t *lli_out_array,
1198 unsigned long sep_out_lli_entries,
1199 unsigned long block_size, unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr)
1200 {
1201 /* points to the area where next lli table can be allocated: keep void *
1202 as there is pointer scaling to fix otherwise */
1203 void *lli_table_alloc_addr;
1204 /* input lli table */
1205 struct sep_lli_entry_t *in_lli_table_ptr;
1206 /* output lli table */
1207 struct sep_lli_entry_t *out_lli_table_ptr;
1208 /* pointer to the info entry of the table - the last entry */
1209 struct sep_lli_entry_t *info_in_entry_ptr;
1210 /* pointer to the info entry of the table - the last entry */
1211 struct sep_lli_entry_t *info_out_entry_ptr;
1212 /* points to the first entry to be processed in the lli_in_array */
1213 unsigned long current_in_entry;
1214 /* points to the first entry to be processed in the lli_out_array */
1215 unsigned long current_out_entry;
1216 /* max size of the input table */
1217 unsigned long in_table_data_size;
1218 /* max size of the output table */
1219 unsigned long out_table_data_size;
1220 /* flag te signifies if this is the first tables build from the arrays */
1221 unsigned long first_table_flag;
1222 /* the data size that should be in table */
1223 unsigned long table_data_size;
1224 /* number of etnries in the input table */
1225 unsigned long num_entries_in_table;
1226 /* number of etnries in the output table */
1227 unsigned long num_entries_out_table;
1228
1229 dbg("SEP Driver:--------> sep_construct_dma_tables_from_lli start\n");
1230
1231 /* initiate to pint after the message area */
1232 lli_table_alloc_addr = sep->shared_area + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1233
1234 current_in_entry = 0;
1235 current_out_entry = 0;
1236 first_table_flag = 1;
1237 info_in_entry_ptr = 0;
1238 info_out_entry_ptr = 0;
1239
1240 /* loop till all the entries in in array are not processed */
1241 while (current_in_entry < sep_in_lli_entries) {
1242 /* set the new input and output tables */
1243 in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1244
1245 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1246
1247 /* set the first output tables */
1248 out_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1249
1250 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1251
1252 /* calculate the maximum size of data for input table */
1253 in_table_data_size = sep_calculate_lli_table_max_size(&lli_in_array[current_in_entry], (sep_in_lli_entries - current_in_entry));
1254
1255 /* calculate the maximum size of data for output table */
1256 out_table_data_size = sep_calculate_lli_table_max_size(&lli_out_array[current_out_entry], (sep_out_lli_entries - current_out_entry));
1257
1258 edbg("SEP Driver:in_table_data_size is %lu\n", in_table_data_size);
1259 edbg("SEP Driver:out_table_data_size is %lu\n", out_table_data_size);
1260
1261 /* check where the data is smallest */
1262 table_data_size = in_table_data_size;
1263 if (table_data_size > out_table_data_size)
1264 table_data_size = out_table_data_size;
1265
1266 /* now calculate the table size so that it will be module block size */
1267 table_data_size = (table_data_size / block_size) * block_size;
1268
1269 dbg("SEP Driver:table_data_size is %lu\n", table_data_size);
1270
1271 /* construct input lli table */
1272 sep_build_lli_table(&lli_in_array[current_in_entry], in_lli_table_ptr, &current_in_entry, &num_entries_in_table, table_data_size);
1273
1274 /* construct output lli table */
1275 sep_build_lli_table(&lli_out_array[current_out_entry], out_lli_table_ptr, &current_out_entry, &num_entries_out_table, table_data_size);
1276
1277 /* if info entry is null - this is the first table built */
1278 if (info_in_entry_ptr == 0) {
1279 /* set the output parameters to physical addresses */
1280 *lli_table_in_ptr = sep_shared_area_virt_to_bus(sep, in_lli_table_ptr);
1281 *in_num_entries_ptr = num_entries_in_table;
1282 *lli_table_out_ptr = sep_shared_area_virt_to_bus(sep, out_lli_table_ptr);
1283 *out_num_entries_ptr = num_entries_out_table;
1284 *table_data_size_ptr = table_data_size;
1285
1286 edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_in_ptr);
1287 edbg("SEP Driver:output lli_table_out_ptr is %08lx\n", *lli_table_out_ptr);
1288 } else {
1289 /* update the info entry of the previous in table */
1290 info_in_entry_ptr->physical_address = sep_shared_area_virt_to_bus(sep, in_lli_table_ptr);
1291 info_in_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
1292
1293 /* update the info entry of the previous in table */
1294 info_out_entry_ptr->physical_address = sep_shared_area_virt_to_bus(sep, out_lli_table_ptr);
1295 info_out_entry_ptr->block_size = ((num_entries_out_table) << 24) | (table_data_size);
1296 }
1297
1298 /* save the pointer to the info entry of the current tables */
1299 info_in_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1300 info_out_entry_ptr = out_lli_table_ptr + num_entries_out_table - 1;
1301
1302 edbg("SEP Driver:output num_entries_out_table is %lu\n", (unsigned long) num_entries_out_table);
1303 edbg("SEP Driver:output info_in_entry_ptr is %lu\n", (unsigned long) info_in_entry_ptr);
1304 edbg("SEP Driver:output info_out_entry_ptr is %lu\n", (unsigned long) info_out_entry_ptr);
1305 }
1306
1307 /* print input tables */
1308 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1309 sep_shared_area_bus_to_virt(sep, *lli_table_in_ptr), *in_num_entries_ptr, *table_data_size_ptr);
1310 /* print output tables */
1311 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1312 sep_shared_area_bus_to_virt(sep, *lli_table_out_ptr), *out_num_entries_ptr, *table_data_size_ptr);
1313 dbg("SEP Driver:<-------- sep_construct_dma_tables_from_lli end\n");
1314 return 0;
1315 }
1316
1317
1318 /*
1319 This function builds input and output DMA tables for synhronic
1320 symmetric operations (AES, DES). It also checks that each table
1321 is of the modular block size
1322 */
1323 static int sep_prepare_input_output_dma_table(struct sep_device *sep,
1324 unsigned long app_virt_in_addr,
1325 unsigned long app_virt_out_addr,
1326 unsigned long data_size,
1327 unsigned long block_size,
1328 unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr, bool isKernelVirtualAddress)
1329 {
1330 /* array of pointers of page */
1331 struct sep_lli_entry_t *lli_in_array;
1332 /* array of pointers of page */
1333 struct sep_lli_entry_t *lli_out_array;
1334 int result = 0;
1335
1336 dbg("SEP Driver:--------> sep_prepare_input_output_dma_table start\n");
1337
1338 /* initialize the pages pointers */
1339 sep->in_page_array = 0;
1340 sep->out_page_array = 0;
1341
1342 /* check if the pages are in Kernel Virtual Address layout */
1343 if (isKernelVirtualAddress == true) {
1344 /* lock the pages of the kernel buffer and translate them to pages */
1345 result = sep_lock_kernel_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
1346 if (result) {
1347 edbg("SEP Driver: sep_lock_kernel_pages for input virtual buffer failed\n");
1348 goto end_function;
1349 }
1350 } else {
1351 /* lock the pages of the user buffer and translate them to pages */
1352 result = sep_lock_user_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
1353 if (result) {
1354 edbg("SEP Driver: sep_lock_user_pages for input virtual buffer failed\n");
1355 goto end_function;
1356 }
1357 }
1358
1359 if (isKernelVirtualAddress == true) {
1360 result = sep_lock_kernel_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
1361 if (result) {
1362 edbg("SEP Driver: sep_lock_kernel_pages for output virtual buffer failed\n");
1363 goto end_function_with_error1;
1364 }
1365 } else {
1366 result = sep_lock_user_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
1367 if (result) {
1368 edbg("SEP Driver: sep_lock_user_pages for output virtual buffer failed\n");
1369 goto end_function_with_error1;
1370 }
1371 }
1372 edbg("sep->in_num_pages is %lu\n", sep->in_num_pages);
1373 edbg("sep->out_num_pages is %lu\n", sep->out_num_pages);
1374 edbg("SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n", SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1375
1376
1377 /* call the fucntion that creates table from the lli arrays */
1378 result = sep_construct_dma_tables_from_lli(sep, lli_in_array, sep->in_num_pages, lli_out_array, sep->out_num_pages, block_size, lli_table_in_ptr, lli_table_out_ptr, in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr);
1379 if (result) {
1380 edbg("SEP Driver: sep_construct_dma_tables_from_lli failed\n");
1381 goto end_function_with_error2;
1382 }
1383
1384 /* fall through - free the lli entry arrays */
1385 dbg("in_num_entries_ptr is %08lx\n", *in_num_entries_ptr);
1386 dbg("out_num_entries_ptr is %08lx\n", *out_num_entries_ptr);
1387 dbg("table_data_size_ptr is %08lx\n", *table_data_size_ptr);
1388 end_function_with_error2:
1389 kfree(lli_out_array);
1390 end_function_with_error1:
1391 kfree(lli_in_array);
1392 end_function:
1393 dbg("SEP Driver:<-------- sep_prepare_input_output_dma_table end result = %d\n", (int) result);
1394 return result;
1395
1396 }
1397
1398 /*
1399 this function handles tha request for creation of the DMA table
1400 for the synchronic symmetric operations (AES,DES)
1401 */
1402 static int sep_create_sync_dma_tables_handler(struct sep_device *sep,
1403 unsigned long arg)
1404 {
1405 int error;
1406 /* command arguments */
1407 struct sep_driver_build_sync_table_t command_args;
1408
1409 dbg("SEP Driver:--------> sep_create_sync_dma_tables_handler start\n");
1410
1411 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_sync_table_t));
1412 if (error)
1413 goto end_function;
1414
1415 edbg("app_in_address is %08lx\n", command_args.app_in_address);
1416 edbg("app_out_address is %08lx\n", command_args.app_out_address);
1417 edbg("data_size is %lu\n", command_args.data_in_size);
1418 edbg("block_size is %lu\n", command_args.block_size);
1419
1420 /* check if we need to build only input table or input/output */
1421 if (command_args.app_out_address)
1422 /* prepare input and output tables */
1423 error = sep_prepare_input_output_dma_table(sep,
1424 command_args.app_in_address,
1425 command_args.app_out_address,
1426 command_args.data_in_size,
1427 command_args.block_size,
1428 &command_args.in_table_address,
1429 &command_args.out_table_address, &command_args.in_table_num_entries, &command_args.out_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
1430 else
1431 /* prepare input tables */
1432 error = sep_prepare_input_dma_table(sep,
1433 command_args.app_in_address,
1434 command_args.data_in_size, command_args.block_size, &command_args.in_table_address, &command_args.in_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
1435
1436 if (error)
1437 goto end_function;
1438 /* copy to user */
1439 if (copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_build_sync_table_t)))
1440 error = -EFAULT;
1441 end_function:
1442 dbg("SEP Driver:<-------- sep_create_sync_dma_tables_handler end\n");
1443 return error;
1444 }
1445
1446 /*
1447 this function handles the request for freeing dma table for synhronic actions
1448 */
1449 static int sep_free_dma_table_data_handler(struct sep_device *sep)
1450 {
1451 dbg("SEP Driver:--------> sep_free_dma_table_data_handler start\n");
1452
1453 /* free input pages array */
1454 sep_free_dma_pages(sep->in_page_array, sep->in_num_pages, 0);
1455
1456 /* free output pages array if needed */
1457 if (sep->out_page_array)
1458 sep_free_dma_pages(sep->out_page_array, sep->out_num_pages, 1);
1459
1460 /* reset all the values */
1461 sep->in_page_array = 0;
1462 sep->out_page_array = 0;
1463 sep->in_num_pages = 0;
1464 sep->out_num_pages = 0;
1465 dbg("SEP Driver:<-------- sep_free_dma_table_data_handler end\n");
1466 return 0;
1467 }
1468
1469 /*
1470 this function find a space for the new flow dma table
1471 */
1472 static int sep_find_free_flow_dma_table_space(struct sep_device *sep,
1473 unsigned long **table_address_ptr)
1474 {
1475 int error = 0;
1476 /* pointer to the id field of the flow dma table */
1477 unsigned long *start_table_ptr;
1478 /* Do not make start_addr unsigned long * unless fixing the offset
1479 computations ! */
1480 void *flow_dma_area_start_addr;
1481 unsigned long *flow_dma_area_end_addr;
1482 /* maximum table size in words */
1483 unsigned long table_size_in_words;
1484
1485 /* find the start address of the flow DMA table area */
1486 flow_dma_area_start_addr = sep->shared_area + SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1487
1488 /* set end address of the flow table area */
1489 flow_dma_area_end_addr = flow_dma_area_start_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES;
1490
1491 /* set table size in words */
1492 table_size_in_words = SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE * (sizeof(struct sep_lli_entry_t) / sizeof(long)) + 2;
1493
1494 /* set the pointer to the start address of DMA area */
1495 start_table_ptr = flow_dma_area_start_addr;
1496
1497 /* find the space for the next table */
1498 while (((*start_table_ptr & 0x7FFFFFFF) != 0) && start_table_ptr < flow_dma_area_end_addr)
1499 start_table_ptr += table_size_in_words;
1500
1501 /* check if we reached the end of floa tables area */
1502 if (start_table_ptr >= flow_dma_area_end_addr)
1503 error = -1;
1504 else
1505 *table_address_ptr = start_table_ptr;
1506
1507 return error;
1508 }
1509
1510 /*
1511 This function creates one DMA table for flow and returns its data,
1512 and pointer to its info entry
1513 */
1514 static int sep_prepare_one_flow_dma_table(struct sep_device *sep,
1515 unsigned long virt_buff_addr,
1516 unsigned long virt_buff_size,
1517 struct sep_lli_entry_t *table_data,
1518 struct sep_lli_entry_t **info_entry_ptr,
1519 struct sep_flow_context_t *flow_data_ptr,
1520 bool isKernelVirtualAddress)
1521 {
1522 int error;
1523 /* the range in pages */
1524 unsigned long lli_array_size;
1525 struct sep_lli_entry_t *lli_array;
1526 struct sep_lli_entry_t *flow_dma_table_entry_ptr;
1527 unsigned long *start_dma_table_ptr;
1528 /* total table data counter */
1529 unsigned long dma_table_data_count;
1530 /* pointer that will keep the pointer to the pages of the virtual buffer */
1531 struct page **page_array_ptr;
1532 unsigned long entry_count;
1533
1534 /* find the space for the new table */
1535 error = sep_find_free_flow_dma_table_space(sep, &start_dma_table_ptr);
1536 if (error)
1537 goto end_function;
1538
1539 /* check if the pages are in Kernel Virtual Address layout */
1540 if (isKernelVirtualAddress == true)
1541 /* lock kernel buffer in the memory */
1542 error = sep_lock_kernel_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
1543 else
1544 /* lock user buffer in the memory */
1545 error = sep_lock_user_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
1546
1547 if (error)
1548 goto end_function;
1549
1550 /* set the pointer to page array at the beginning of table - this table is
1551 now considered taken */
1552 *start_dma_table_ptr = lli_array_size;
1553
1554 /* point to the place of the pages pointers of the table */
1555 start_dma_table_ptr++;
1556
1557 /* set the pages pointer */
1558 *start_dma_table_ptr = (unsigned long) page_array_ptr;
1559
1560 /* set the pointer to the first entry */
1561 flow_dma_table_entry_ptr = (struct sep_lli_entry_t *) (++start_dma_table_ptr);
1562
1563 /* now create the entries for table */
1564 for (dma_table_data_count = entry_count = 0; entry_count < lli_array_size; entry_count++) {
1565 flow_dma_table_entry_ptr->physical_address = lli_array[entry_count].physical_address;
1566
1567 flow_dma_table_entry_ptr->block_size = lli_array[entry_count].block_size;
1568
1569 /* set the total data of a table */
1570 dma_table_data_count += lli_array[entry_count].block_size;
1571
1572 flow_dma_table_entry_ptr++;
1573 }
1574
1575 /* set the physical address */
1576 table_data->physical_address = virt_to_phys(start_dma_table_ptr);
1577
1578 /* set the num_entries and total data size */
1579 table_data->block_size = ((lli_array_size + 1) << SEP_NUM_ENTRIES_OFFSET_IN_BITS) | (dma_table_data_count);
1580
1581 /* set the info entry */
1582 flow_dma_table_entry_ptr->physical_address = 0xffffffff;
1583 flow_dma_table_entry_ptr->block_size = 0;
1584
1585 /* set the pointer to info entry */
1586 *info_entry_ptr = flow_dma_table_entry_ptr;
1587
1588 /* the array of the lli entries */
1589 kfree(lli_array);
1590 end_function:
1591 return error;
1592 }
1593
1594
1595
1596 /*
1597 This function creates a list of tables for flow and returns the data for
1598 the first and last tables of the list
1599 */
1600 static int sep_prepare_flow_dma_tables(struct sep_device *sep,
1601 unsigned long num_virtual_buffers,
1602 unsigned long first_buff_addr, struct sep_flow_context_t *flow_data_ptr, struct sep_lli_entry_t *first_table_data_ptr, struct sep_lli_entry_t *last_table_data_ptr, bool isKernelVirtualAddress)
1603 {
1604 int error;
1605 unsigned long virt_buff_addr;
1606 unsigned long virt_buff_size;
1607 struct sep_lli_entry_t table_data;
1608 struct sep_lli_entry_t *info_entry_ptr;
1609 struct sep_lli_entry_t *prev_info_entry_ptr;
1610 unsigned long i;
1611
1612 /* init vars */
1613 error = 0;
1614 prev_info_entry_ptr = 0;
1615
1616 /* init the first table to default */
1617 table_data.physical_address = 0xffffffff;
1618 first_table_data_ptr->physical_address = 0xffffffff;
1619 table_data.block_size = 0;
1620
1621 for (i = 0; i < num_virtual_buffers; i++) {
1622 /* get the virtual buffer address */
1623 error = get_user(virt_buff_addr, &first_buff_addr);
1624 if (error)
1625 goto end_function;
1626
1627 /* get the virtual buffer size */
1628 first_buff_addr++;
1629 error = get_user(virt_buff_size, &first_buff_addr);
1630 if (error)
1631 goto end_function;
1632
1633 /* advance the address to point to the next pair of address|size */
1634 first_buff_addr++;
1635
1636 /* now prepare the one flow LLI table from the data */
1637 error = sep_prepare_one_flow_dma_table(sep, virt_buff_addr, virt_buff_size, &table_data, &info_entry_ptr, flow_data_ptr, isKernelVirtualAddress);
1638 if (error)
1639 goto end_function;
1640
1641 if (i == 0) {
1642 /* if this is the first table - save it to return to the user
1643 application */
1644 *first_table_data_ptr = table_data;
1645
1646 /* set the pointer to info entry */
1647 prev_info_entry_ptr = info_entry_ptr;
1648 } else {
1649 /* not first table - the previous table info entry should
1650 be updated */
1651 prev_info_entry_ptr->block_size = (0x1 << SEP_INT_FLAG_OFFSET_IN_BITS) | (table_data.block_size);
1652
1653 /* set the pointer to info entry */
1654 prev_info_entry_ptr = info_entry_ptr;
1655 }
1656 }
1657
1658 /* set the last table data */
1659 *last_table_data_ptr = table_data;
1660 end_function:
1661 return error;
1662 }
1663
1664 /*
1665 this function goes over all the flow tables connected to the given
1666 table and deallocate them
1667 */
1668 static void sep_deallocated_flow_tables(struct sep_lli_entry_t *first_table_ptr)
1669 {
1670 /* id pointer */
1671 unsigned long *table_ptr;
1672 /* end address of the flow dma area */
1673 unsigned long num_entries;
1674 unsigned long num_pages;
1675 struct page **pages_ptr;
1676 /* maximum table size in words */
1677 struct sep_lli_entry_t *info_entry_ptr;
1678
1679 /* set the pointer to the first table */
1680 table_ptr = (unsigned long *) first_table_ptr->physical_address;
1681
1682 /* set the num of entries */
1683 num_entries = (first_table_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS)
1684 & SEP_NUM_ENTRIES_MASK;
1685
1686 /* go over all the connected tables */
1687 while (*table_ptr != 0xffffffff) {
1688 /* get number of pages */
1689 num_pages = *(table_ptr - 2);
1690
1691 /* get the pointer to the pages */
1692 pages_ptr = (struct page **) (*(table_ptr - 1));
1693
1694 /* free the pages */
1695 sep_free_dma_pages(pages_ptr, num_pages, 1);
1696
1697 /* goto to the info entry */
1698 info_entry_ptr = ((struct sep_lli_entry_t *) table_ptr) + (num_entries - 1);
1699
1700 table_ptr = (unsigned long *) info_entry_ptr->physical_address;
1701 num_entries = (info_entry_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1702 }
1703
1704 return;
1705 }
1706
1707 /**
1708 * sep_find_flow_context - find a flow
1709 * @sep: the SEP we are working with
1710 * @flow_id: flow identifier
1711 *
1712 * Returns a pointer the matching flow, or NULL if the flow does not
1713 * exist.
1714 */
1715
1716 static struct sep_flow_context_t *sep_find_flow_context(struct sep_device *sep,
1717 unsigned long flow_id)
1718 {
1719 int count;
1720 /*
1721 * always search for flow with id default first - in case we
1722 * already started working on the flow there can be no situation
1723 * when 2 flows are with default flag
1724 */
1725 for (count = 0; count < SEP_DRIVER_NUM_FLOWS; count++) {
1726 if (sep->flows[count].flow_id == flow_id)
1727 return &sep->flows[count];
1728 }
1729 return NULL;
1730 }
1731
1732
1733 /*
1734 this function handles the request to create the DMA tables for flow
1735 */
1736 static int sep_create_flow_dma_tables_handler(struct sep_device *sep,
1737 unsigned long arg)
1738 {
1739 int error;
1740 struct sep_driver_build_flow_table_t command_args;
1741 /* first table - output */
1742 struct sep_lli_entry_t first_table_data;
1743 /* dma table data */
1744 struct sep_lli_entry_t last_table_data;
1745 /* pointer to the info entry of the previuos DMA table */
1746 struct sep_lli_entry_t *prev_info_entry_ptr;
1747 /* pointer to the flow data strucutre */
1748 struct sep_flow_context_t *flow_context_ptr;
1749
1750 dbg("SEP Driver:--------> sep_create_flow_dma_tables_handler start\n");
1751
1752 /* init variables */
1753 prev_info_entry_ptr = 0;
1754 first_table_data.physical_address = 0xffffffff;
1755
1756 /* find the free structure for flow data */
1757 flow_context_ptr = sep_find_flow_context(sep, SEP_FREE_FLOW_ID);
1758 if (flow_context_ptr == NULL)
1759 goto end_function;
1760
1761 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_flow_table_t));
1762 if (error)
1763 goto end_function;
1764
1765 /* create flow tables */
1766 error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
1767 if (error)
1768 goto end_function_with_error;
1769
1770 /* check if flow is static */
1771 if (!command_args.flow_type)
1772 /* point the info entry of the last to the info entry of the first */
1773 last_table_data = first_table_data;
1774
1775 /* set output params */
1776 command_args.first_table_addr = first_table_data.physical_address;
1777 command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
1778 command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
1779
1780 /* send the parameters to user application */
1781 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_build_flow_table_t));
1782 if (error)
1783 goto end_function_with_error;
1784
1785 /* all the flow created - update the flow entry with temp id */
1786 flow_context_ptr->flow_id = SEP_TEMP_FLOW_ID;
1787
1788 /* set the processing tables data in the context */
1789 if (command_args.input_output_flag == SEP_DRIVER_IN_FLAG)
1790 flow_context_ptr->input_tables_in_process = first_table_data;
1791 else
1792 flow_context_ptr->output_tables_in_process = first_table_data;
1793
1794 goto end_function;
1795
1796 end_function_with_error:
1797 /* free the allocated tables */
1798 sep_deallocated_flow_tables(&first_table_data);
1799 end_function:
1800 dbg("SEP Driver:<-------- sep_create_flow_dma_tables_handler end\n");
1801 return error;
1802 }
1803
1804 /*
1805 this function handles add tables to flow
1806 */
1807 static int sep_add_flow_tables_handler(struct sep_device *sep, unsigned long arg)
1808 {
1809 int error;
1810 unsigned long num_entries;
1811 struct sep_driver_add_flow_table_t command_args;
1812 struct sep_flow_context_t *flow_context_ptr;
1813 /* first dma table data */
1814 struct sep_lli_entry_t first_table_data;
1815 /* last dma table data */
1816 struct sep_lli_entry_t last_table_data;
1817 /* pointer to the info entry of the current DMA table */
1818 struct sep_lli_entry_t *info_entry_ptr;
1819
1820 dbg("SEP Driver:--------> sep_add_flow_tables_handler start\n");
1821
1822 /* get input parameters */
1823 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_flow_table_t));
1824 if (error)
1825 goto end_function;
1826
1827 /* find the flow structure for the flow id */
1828 flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
1829 if (flow_context_ptr == NULL)
1830 goto end_function;
1831
1832 /* prepare the flow dma tables */
1833 error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
1834 if (error)
1835 goto end_function_with_error;
1836
1837 /* now check if there is already an existing add table for this flow */
1838 if (command_args.inputOutputFlag == SEP_DRIVER_IN_FLAG) {
1839 /* this buffer was for input buffers */
1840 if (flow_context_ptr->input_tables_flag) {
1841 /* add table already exists - add the new tables to the end
1842 of the previous */
1843 num_entries = (flow_context_ptr->last_input_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1844
1845 info_entry_ptr = (struct sep_lli_entry_t *)
1846 (flow_context_ptr->last_input_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
1847
1848 /* connect to list of tables */
1849 *info_entry_ptr = first_table_data;
1850
1851 /* set the first table data */
1852 first_table_data = flow_context_ptr->first_input_table;
1853 } else {
1854 /* set the input flag */
1855 flow_context_ptr->input_tables_flag = 1;
1856
1857 /* set the first table data */
1858 flow_context_ptr->first_input_table = first_table_data;
1859 }
1860 /* set the last table data */
1861 flow_context_ptr->last_input_table = last_table_data;
1862 } else { /* this is output tables */
1863
1864 /* this buffer was for input buffers */
1865 if (flow_context_ptr->output_tables_flag) {
1866 /* add table already exists - add the new tables to
1867 the end of the previous */
1868 num_entries = (flow_context_ptr->last_output_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1869
1870 info_entry_ptr = (struct sep_lli_entry_t *)
1871 (flow_context_ptr->last_output_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
1872
1873 /* connect to list of tables */
1874 *info_entry_ptr = first_table_data;
1875
1876 /* set the first table data */
1877 first_table_data = flow_context_ptr->first_output_table;
1878 } else {
1879 /* set the input flag */
1880 flow_context_ptr->output_tables_flag = 1;
1881
1882 /* set the first table data */
1883 flow_context_ptr->first_output_table = first_table_data;
1884 }
1885 /* set the last table data */
1886 flow_context_ptr->last_output_table = last_table_data;
1887 }
1888
1889 /* set output params */
1890 command_args.first_table_addr = first_table_data.physical_address;
1891 command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
1892 command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
1893
1894 /* send the parameters to user application */
1895 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_add_flow_table_t));
1896 end_function_with_error:
1897 /* free the allocated tables */
1898 sep_deallocated_flow_tables(&first_table_data);
1899 end_function:
1900 dbg("SEP Driver:<-------- sep_add_flow_tables_handler end\n");
1901 return error;
1902 }
1903
1904 /*
1905 this function add the flow add message to the specific flow
1906 */
1907 static int sep_add_flow_tables_message_handler(struct sep_device *sep, unsigned long arg)
1908 {
1909 int error;
1910 struct sep_driver_add_message_t command_args;
1911 struct sep_flow_context_t *flow_context_ptr;
1912
1913 dbg("SEP Driver:--------> sep_add_flow_tables_message_handler start\n");
1914
1915 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_message_t));
1916 if (error)
1917 goto end_function;
1918
1919 /* check input */
1920 if (command_args.message_size_in_bytes > SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES) {
1921 error = -ENOMEM;
1922 goto end_function;
1923 }
1924
1925 /* find the flow context */
1926 flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
1927 if (flow_context_ptr == NULL)
1928 goto end_function;
1929
1930 /* copy the message into context */
1931 flow_context_ptr->message_size_in_bytes = command_args.message_size_in_bytes;
1932 error = copy_from_user(flow_context_ptr->message, (void *) command_args.message_address, command_args.message_size_in_bytes);
1933 end_function:
1934 dbg("SEP Driver:<-------- sep_add_flow_tables_message_handler end\n");
1935 return error;
1936 }
1937
1938
1939 /*
1940 this function returns the bus and virtual addresses of the static pool
1941 */
1942 static int sep_get_static_pool_addr_handler(struct sep_device *sep, unsigned long arg)
1943 {
1944 int error;
1945 struct sep_driver_static_pool_addr_t command_args;
1946
1947 dbg("SEP Driver:--------> sep_get_static_pool_addr_handler start\n");
1948
1949 /*prepare the output parameters in the struct */
1950 command_args.physical_static_address = sep->shared_area_bus + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
1951 command_args.virtual_static_address = (unsigned long)sep->shared_area + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
1952
1953 edbg("SEP Driver:bus_static_address is %08lx, virtual_static_address %08lx\n", command_args.physical_static_address, command_args.virtual_static_address);
1954
1955 /* send the parameters to user application */
1956 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_static_pool_addr_t));
1957 dbg("SEP Driver:<-------- sep_get_static_pool_addr_handler end\n");
1958 return error;
1959 }
1960
1961 /*
1962 this address gets the offset of the physical address from the start
1963 of the mapped area
1964 */
1965 static int sep_get_physical_mapped_offset_handler(struct sep_device *sep, unsigned long arg)
1966 {
1967 int error;
1968 struct sep_driver_get_mapped_offset_t command_args;
1969
1970 dbg("SEP Driver:--------> sep_get_physical_mapped_offset_handler start\n");
1971
1972 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_get_mapped_offset_t));
1973 if (error)
1974 goto end_function;
1975
1976 if (command_args.physical_address < sep->shared_area_bus) {
1977 error = -EINVAL;
1978 goto end_function;
1979 }
1980
1981 /*prepare the output parameters in the struct */
1982 command_args.offset = command_args.physical_address - sep->shared_area_bus;
1983
1984 edbg("SEP Driver:bus_address is %08lx, offset is %lu\n", command_args.physical_address, command_args.offset);
1985
1986 /* send the parameters to user application */
1987 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_get_mapped_offset_t));
1988 end_function:
1989 dbg("SEP Driver:<-------- sep_get_physical_mapped_offset_handler end\n");
1990 return error;
1991 }
1992
1993
1994 /*
1995 ?
1996 */
1997 static int sep_start_handler(struct sep_device *sep)
1998 {
1999 unsigned long reg_val;
2000 unsigned long error = 0;
2001
2002 dbg("SEP Driver:--------> sep_start_handler start\n");
2003
2004 /* wait in polling for message from SEP */
2005 do
2006 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2007 while (!reg_val);
2008
2009 /* check the value */
2010 if (reg_val == 0x1)
2011 /* fatal error - read error status from GPRO */
2012 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2013 dbg("SEP Driver:<-------- sep_start_handler end\n");
2014 return error;
2015 }
2016
2017 /*
2018 this function handles the request for SEP initialization
2019 */
2020 static int sep_init_handler(struct sep_device *sep, unsigned long arg)
2021 {
2022 unsigned long message_word;
2023 unsigned long *message_ptr;
2024 struct sep_driver_init_t command_args;
2025 unsigned long counter;
2026 unsigned long error;
2027 unsigned long reg_val;
2028
2029 dbg("SEP Driver:--------> sep_init_handler start\n");
2030 error = 0;
2031
2032 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_init_t));
2033
2034 dbg("SEP Driver:--------> sep_init_handler - finished copy_from_user \n");
2035
2036 if (error)
2037 goto end_function;
2038
2039 /* PATCH - configure the DMA to single -burst instead of multi-burst */
2040 /*sep_configure_dma_burst(); */
2041
2042 dbg("SEP Driver:--------> sep_init_handler - finished sep_configure_dma_burst \n");
2043
2044 message_ptr = (unsigned long *) command_args.message_addr;
2045
2046 /* set the base address of the SRAM */
2047 sep_write_reg(sep, HW_SRAM_ADDR_REG_ADDR, HW_CC_SRAM_BASE_ADDRESS);
2048
2049 for (counter = 0; counter < command_args.message_size_in_words; counter++, message_ptr++) {
2050 get_user(message_word, message_ptr);
2051 /* write data to SRAM */
2052 sep_write_reg(sep, HW_SRAM_DATA_REG_ADDR, message_word);
2053 edbg("SEP Driver:message_word is %lu\n", message_word);
2054 /* wait for write complete */
2055 sep_wait_sram_write(sep);
2056 }
2057 dbg("SEP Driver:--------> sep_init_handler - finished getting messages from user space\n");
2058 /* signal SEP */
2059 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x1);
2060
2061 do
2062 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2063 while (!(reg_val & 0xFFFFFFFD));
2064
2065 dbg("SEP Driver:--------> sep_init_handler - finished waiting for reg_val & 0xFFFFFFFD \n");
2066
2067 /* check the value */
2068 if (reg_val == 0x1) {
2069 edbg("SEP Driver:init failed\n");
2070
2071 error = sep_read_reg(sep, 0x8060);
2072 edbg("SEP Driver:sw monitor is %lu\n", error);
2073
2074 /* fatal error - read erro status from GPRO */
2075 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2076 edbg("SEP Driver:error is %lu\n", error);
2077 }
2078 end_function:
2079 dbg("SEP Driver:<-------- sep_init_handler end\n");
2080 return error;
2081
2082 }
2083
2084 /*
2085 this function handles the request cache and resident reallocation
2086 */
2087 static int sep_realloc_cache_resident_handler(struct sep_device *sep,
2088 unsigned long arg)
2089 {
2090 struct sep_driver_realloc_cache_resident_t command_args;
2091 int error;
2092
2093 /* copy cache and resident to the their intended locations */
2094 error = sep_load_firmware(sep);
2095 if (error)
2096 return error;
2097
2098 command_args.new_base_addr = sep->shared_area_bus;
2099
2100 /* find the new base address according to the lowest address between
2101 cache, resident and shared area */
2102 if (sep->resident_bus < command_args.new_base_addr)
2103 command_args.new_base_addr = sep->resident_bus;
2104 if (sep->cache_bus < command_args.new_base_addr)
2105 command_args.new_base_addr = sep->cache_bus;
2106
2107 /* set the return parameters */
2108 command_args.new_cache_addr = sep->cache_bus;
2109 command_args.new_resident_addr = sep->resident_bus;
2110
2111 /* set the new shared area */
2112 command_args.new_shared_area_addr = sep->shared_area_bus;
2113
2114 edbg("SEP Driver:command_args.new_shared_area is %08llx\n", command_args.new_shared_area_addr);
2115 edbg("SEP Driver:command_args.new_base_addr is %08llx\n", command_args.new_base_addr);
2116 edbg("SEP Driver:command_args.new_resident_addr is %08llx\n", command_args.new_resident_addr);
2117 edbg("SEP Driver:command_args.new_cache_addr is %08llx\n", command_args.new_cache_addr);
2118
2119 /* return to user */
2120 if (copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_realloc_cache_resident_t)))
2121 return -EFAULT;
2122 return 0;
2123 }
2124
2125 /*
2126 this function handles the request for get time
2127 */
2128 static int sep_get_time_handler(struct sep_device *sep, unsigned long arg)
2129 {
2130 int error;
2131 struct sep_driver_get_time_t command_args;
2132
2133 error = sep_set_time(sep, &command_args.time_physical_address, &command_args.time_value);
2134 if (error == 0)
2135 error = copy_to_user((void __user *)arg,
2136 &command_args, sizeof(struct sep_driver_get_time_t));
2137 return error;
2138
2139 }
2140
2141 /*
2142 This API handles the end transaction request
2143 */
2144 static int sep_end_transaction_handler(struct sep_device *sep, unsigned long arg)
2145 {
2146 dbg("SEP Driver:--------> sep_end_transaction_handler start\n");
2147
2148 #if 0 /*!SEP_DRIVER_POLLING_MODE */
2149 /* close IMR */
2150 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
2151
2152 /* release IRQ line */
2153 free_irq(SEP_DIRVER_IRQ_NUM, sep);
2154
2155 /* lock the sep mutex */
2156 mutex_unlock(&sep_mutex);
2157 #endif
2158
2159 dbg("SEP Driver:<-------- sep_end_transaction_handler end\n");
2160
2161 return 0;
2162 }
2163
2164
2165 /*
2166 This function handler the set flow id command
2167 */
2168 static int sep_set_flow_id_handler(struct sep_device *sep, unsigned long arg)
2169 {
2170 int error;
2171 unsigned long flow_id;
2172 struct sep_flow_context_t *flow_data_ptr;
2173
2174 dbg("------------>SEP Driver: sep_set_flow_id_handler start\n");
2175
2176 error = get_user(flow_id, &(((struct sep_driver_set_flow_id_t *) arg)->flow_id));
2177 if (error)
2178 goto end_function;
2179
2180 /* find the flow data structure that was just used for creating new flow
2181 - its id should be default */
2182 flow_data_ptr = sep_find_flow_context(sep, SEP_TEMP_FLOW_ID);
2183 if (flow_data_ptr == NULL)
2184 goto end_function;
2185
2186 /* set flow id */
2187 flow_data_ptr->flow_id = flow_id;
2188
2189 end_function:
2190 dbg("SEP Driver:<-------- sep_set_flow_id_handler end\n");
2191 return error;
2192 }
2193
2194
2195
2196
2197
2198 static int sep_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
2199 {
2200 int error = 0;
2201 struct sep_device *sep = filp->private_data;
2202
2203 dbg("------------>SEP Driver: ioctl start\n");
2204
2205 edbg("SEP Driver: cmd is %x\n", cmd);
2206
2207 switch (cmd) {
2208 case SEP_IOCSENDSEPCOMMAND:
2209 /* send command to SEP */
2210 sep_send_command_handler(sep);
2211 edbg("SEP Driver: after sep_send_command_handler\n");
2212 break;
2213 case SEP_IOCSENDSEPRPLYCOMMAND:
2214 /* send reply command to SEP */
2215 sep_send_reply_command_handler(sep);
2216 break;
2217 case SEP_IOCALLOCDATAPOLL:
2218 /* allocate data pool */
2219 error = sep_allocate_data_pool_memory_handler(sep, arg);
2220 break;
2221 case SEP_IOCWRITEDATAPOLL:
2222 /* write data into memory pool */
2223 error = sep_write_into_data_pool_handler(sep, arg);
2224 break;
2225 case SEP_IOCREADDATAPOLL:
2226 /* read data from data pool into application memory */
2227 error = sep_read_from_data_pool_handler(sep, arg);
2228 break;
2229 case SEP_IOCCREATESYMDMATABLE:
2230 /* create dma table for synhronic operation */
2231 error = sep_create_sync_dma_tables_handler(sep, arg);
2232 break;
2233 case SEP_IOCCREATEFLOWDMATABLE:
2234 /* create flow dma tables */
2235 error = sep_create_flow_dma_tables_handler(sep, arg);
2236 break;
2237 case SEP_IOCFREEDMATABLEDATA:
2238 /* free the pages */
2239 error = sep_free_dma_table_data_handler(sep);
2240 break;
2241 case SEP_IOCSETFLOWID:
2242 /* set flow id */
2243 error = sep_set_flow_id_handler(sep, arg);
2244 break;
2245 case SEP_IOCADDFLOWTABLE:
2246 /* add tables to the dynamic flow */
2247 error = sep_add_flow_tables_handler(sep, arg);
2248 break;
2249 case SEP_IOCADDFLOWMESSAGE:
2250 /* add message of add tables to flow */
2251 error = sep_add_flow_tables_message_handler(sep, arg);
2252 break;
2253 case SEP_IOCSEPSTART:
2254 /* start command to sep */
2255 error = sep_start_handler(sep);
2256 break;
2257 case SEP_IOCSEPINIT:
2258 /* init command to sep */
2259 error = sep_init_handler(sep, arg);
2260 break;
2261 case SEP_IOCGETSTATICPOOLADDR:
2262 /* get the physical and virtual addresses of the static pool */
2263 error = sep_get_static_pool_addr_handler(sep, arg);
2264 break;
2265 case SEP_IOCENDTRANSACTION:
2266 error = sep_end_transaction_handler(sep, arg);
2267 break;
2268 case SEP_IOCREALLOCCACHERES:
2269 error = sep_realloc_cache_resident_handler(sep, arg);
2270 break;
2271 case SEP_IOCGETMAPPEDADDROFFSET:
2272 error = sep_get_physical_mapped_offset_handler(sep, arg);
2273 break;
2274 case SEP_IOCGETIME:
2275 error = sep_get_time_handler(sep, arg);
2276 break;
2277 default:
2278 error = -ENOTTY;
2279 break;
2280 }
2281 dbg("SEP Driver:<-------- ioctl end\n");
2282 return error;
2283 }
2284
2285
2286
2287 #if !SEP_DRIVER_POLLING_MODE
2288
2289 /* handler for flow done interrupt */
2290
2291 static void sep_flow_done_handler(struct work_struct *work)
2292 {
2293 struct sep_flow_context_t *flow_data_ptr;
2294
2295 /* obtain the mutex */
2296 mutex_lock(&sep_mutex);
2297
2298 /* get the pointer to context */
2299 flow_data_ptr = (struct sep_flow_context_t *) work;
2300
2301 /* free all the current input tables in sep */
2302 sep_deallocated_flow_tables(&flow_data_ptr->input_tables_in_process);
2303
2304 /* free all the current tables output tables in SEP (if needed) */
2305 if (flow_data_ptr->output_tables_in_process.physical_address != 0xffffffff)
2306 sep_deallocated_flow_tables(&flow_data_ptr->output_tables_in_process);
2307
2308 /* check if we have additional tables to be sent to SEP only input
2309 flag may be checked */
2310 if (flow_data_ptr->input_tables_flag) {
2311 /* copy the message to the shared RAM and signal SEP */
2312 memcpy((void *) flow_data_ptr->message, (void *) sep->shared_area, flow_data_ptr->message_size_in_bytes);
2313
2314 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, 0x2);
2315 }
2316 mutex_unlock(&sep_mutex);
2317 }
2318 /*
2319 interrupt handler function
2320 */
2321 static irqreturn_t sep_inthandler(int irq, void *dev_id)
2322 {
2323 irqreturn_t int_error;
2324 unsigned long reg_val;
2325 unsigned long flow_id;
2326 struct sep_flow_context_t *flow_context_ptr;
2327 struct sep_device *sep = dev_id;
2328
2329 int_error = IRQ_HANDLED;
2330
2331 /* read the IRR register to check if this is SEP interrupt */
2332 reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
2333 edbg("SEP Interrupt - reg is %08lx\n", reg_val);
2334
2335 /* check if this is the flow interrupt */
2336 if (0 /*reg_val & (0x1 << 11) */ ) {
2337 /* read GPRO to find out the which flow is done */
2338 flow_id = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
2339
2340 /* find the contex of the flow */
2341 flow_context_ptr = sep_find_flow_context(sep, flow_id >> 28);
2342 if (flow_context_ptr == NULL)
2343 goto end_function_with_error;
2344
2345 /* queue the work */
2346 INIT_WORK(&flow_context_ptr->flow_wq, sep_flow_done_handler);
2347 queue_work(sep->flow_wq, &flow_context_ptr->flow_wq);
2348
2349 } else {
2350 /* check if this is reply interrupt from SEP */
2351 if (reg_val & (0x1 << 13)) {
2352 /* update the counter of reply messages */
2353 sep->reply_ct++;
2354 /* wake up the waiting process */
2355 wake_up(&sep_event);
2356 } else {
2357 int_error = IRQ_NONE;
2358 goto end_function;
2359 }
2360 }
2361 end_function_with_error:
2362 /* clear the interrupt */
2363 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
2364 end_function:
2365 return int_error;
2366 }
2367
2368 #endif
2369
2370
2371
2372 #if 0
2373
2374 static void sep_wait_busy(struct sep_device *sep)
2375 {
2376 u32 reg;
2377
2378 do {
2379 reg = sep_read_reg(sep, HW_HOST_SEP_BUSY_REG_ADDR);
2380 } while (reg);
2381 }
2382
2383 /*
2384 PATCH for configuring the DMA to single burst instead of multi-burst
2385 */
2386 static void sep_configure_dma_burst(struct sep_device *sep)
2387 {
2388 #define HW_AHB_RD_WR_BURSTS_REG_ADDR 0x0E10UL
2389
2390 dbg("SEP Driver:<-------- sep_configure_dma_burst start \n");
2391
2392 /* request access to registers from SEP */
2393 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
2394
2395 dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (write reg) \n");
2396
2397 sep_wait_busy(sep);
2398
2399 dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (while(revVal) wait loop) \n");
2400
2401 /* set the DMA burst register to single burst */
2402 sep_write_reg(sep, HW_AHB_RD_WR_BURSTS_REG_ADDR, 0x0UL);
2403
2404 /* release the sep busy */
2405 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x0UL);
2406 sep_wait_busy(sep);
2407
2408 dbg("SEP Driver:<-------- sep_configure_dma_burst done \n");
2409
2410 }
2411
2412 #endif
2413
2414 /*
2415 Function that is activaed on the succesful probe of the SEP device
2416 */
2417 static int __devinit sep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2418 {
2419 int error = 0;
2420 struct sep_device *sep;
2421 int counter;
2422 int size; /* size of memory for allocation */
2423
2424 edbg("Sep pci probe starting\n");
2425 if (sep_dev != NULL) {
2426 dev_warn(&pdev->dev, "only one SEP supported.\n");
2427 return -EBUSY;
2428 }
2429
2430 /* enable the device */
2431 error = pci_enable_device(pdev);
2432 if (error) {
2433 edbg("error enabling pci device\n");
2434 goto end_function;
2435 }
2436
2437 /* set the pci dev pointer */
2438 sep_dev = &sep_instance;
2439 sep = &sep_instance;
2440
2441 edbg("sep->shared_area = %lx\n", (unsigned long) &sep->shared_area);
2442 /* transaction counter that coordinates the transactions between SEP
2443 and HOST */
2444 sep->send_ct = 0;
2445 /* counter for the messages from sep */
2446 sep->reply_ct = 0;
2447 /* counter for the number of bytes allocated in the pool
2448 for the current transaction */
2449 sep->data_pool_bytes_allocated = 0;
2450
2451 /* calculate the total size for allocation */
2452 size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2453 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2454
2455 /* allocate the shared area */
2456 if (sep_map_and_alloc_shared_area(sep, size)) {
2457 error = -ENOMEM;
2458 /* allocation failed */
2459 goto end_function_error;
2460 }
2461 /* now set the memory regions */
2462 sep->message_shared_area_addr = sep->shared_area;
2463
2464 edbg("SEP Driver: sep->message_shared_area_addr is %p\n", sep->message_shared_area_addr);
2465
2466 #if (SEP_DRIVER_RECONFIG_MESSAGE_AREA == 1)
2467 /* send the new SHARED MESSAGE AREA to the SEP */
2468 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_area_bus);
2469
2470 /* poll for SEP response */
2471 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2472 while (retval != 0xffffffff && retval != sep->shared_area_bus)
2473 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2474
2475 /* check the return value (register) */
2476 if (retval != sep->shared_area_bus) {
2477 error = -ENOMEM;
2478 goto end_function_deallocate_sep_shared_area;
2479 }
2480 #endif
2481 /* init the flow contextes */
2482 for (counter = 0; counter < SEP_DRIVER_NUM_FLOWS; counter++)
2483 sep->flows[counter].flow_id = SEP_FREE_FLOW_ID;
2484
2485 sep->flow_wq = create_singlethread_workqueue("sepflowwq");
2486 if (sep->flow_wq == NULL) {
2487 error = -ENOMEM;
2488 edbg("sep_driver:flow queue creation failed\n");
2489 goto end_function_deallocate_sep_shared_area;
2490 }
2491 edbg("SEP Driver: create flow workqueue \n");
2492 /* load the rom code */
2493 sep_load_rom_code(sep);
2494
2495 sep->pdev = pci_dev_get(pdev);
2496
2497 /* get the io memory start address */
2498 sep->io_bus = pci_resource_start(pdev, 0);
2499 if (!sep->io_bus) {
2500 edbg("SEP Driver error pci resource start\n");
2501 goto end_function_deallocate_sep_shared_area;
2502 }
2503
2504 /* get the io memory end address */
2505 sep->io_end_bus = pci_resource_end(pdev, 0);
2506 if (!sep->io_end_bus) {
2507 edbg("SEP Driver error pci resource end\n");
2508 goto end_function_deallocate_sep_shared_area;
2509 }
2510
2511 sep->io_memory_size = sep->io_end_bus - sep->io_bus + 1;
2512
2513 edbg("SEP Driver:io_bus is %08lx\n", sep->io_bus);
2514
2515 edbg("SEP Driver:io_memory_end_phyaical_address is %08lx\n", sep->io_end_bus);
2516
2517 edbg("SEP Driver:io_memory_size is %08lx\n", sep->io_memory_size);
2518
2519 sep->io_addr = ioremap_nocache(sep->io_bus, sep->io_memory_size);
2520 if (!sep->io_addr) {
2521 edbg("SEP Driver error ioremap of io memory\n");
2522 goto end_function_deallocate_sep_shared_area;
2523 }
2524
2525 edbg("SEP Driver:io_addr is %p\n", sep->io_addr);
2526
2527 sep->reg_addr = (void __iomem *) sep->io_addr;
2528
2529 /* set up system base address and shared memory location */
2530
2531 sep->rar_addr = dma_alloc_coherent(&sep->pdev->dev,
2532 2 * SEP_RAR_IO_MEM_REGION_SIZE,
2533 &sep->rar_bus, GFP_KERNEL);
2534
2535 if (!sep->rar_addr) {
2536 edbg("SEP Driver:can't allocate rar\n");
2537 goto end_function_uniomap;
2538 }
2539
2540 edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus);
2541 edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
2542
2543 #if !SEP_DRIVER_POLLING_MODE
2544
2545 edbg("SEP Driver: about to write IMR and ICR REG_ADDR\n");
2546
2547 /* clear ICR register */
2548 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
2549
2550 /* set the IMR register - open only GPR 2 */
2551 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
2552
2553 edbg("SEP Driver: about to call request_irq\n");
2554 /* get the interrupt line */
2555 error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED, "sep_driver", sep);
2556 if (error)
2557 goto end_function_free_res;
2558
2559 goto end_function;
2560 edbg("SEP Driver: about to write IMR REG_ADDR");
2561
2562 /* set the IMR register - open only GPR 2 */
2563 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
2564
2565 end_function_free_res:
2566 dma_free_coherent(&sep->pdev->dev, 2 * SEP_RAR_IO_MEM_REGION_SIZE,
2567 sep->rar_addr, sep->rar_bus);
2568 #endif /* SEP_DRIVER_POLLING_MODE */
2569 end_function_uniomap:
2570 iounmap(sep->io_addr);
2571 end_function_deallocate_sep_shared_area:
2572 /* de-allocate shared area */
2573 sep_unmap_and_free_shared_area(sep, size);
2574 end_function_error:
2575 sep_dev = NULL;
2576 end_function:
2577 return error;
2578 }
2579
2580 static struct pci_device_id sep_pci_id_tbl[] = {
2581 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080c)},
2582 {0}
2583 };
2584
2585 MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
2586
2587 /* field for registering driver to PCI device */
2588 static struct pci_driver sep_pci_driver = {
2589 .name = "sep_sec_driver",
2590 .id_table = sep_pci_id_tbl,
2591 .probe = sep_probe
2592 /* FIXME: remove handler */
2593 };
2594
2595 /* major and minor device numbers */
2596 static dev_t sep_devno;
2597
2598 /* the files operations structure of the driver */
2599 static struct file_operations sep_file_operations = {
2600 .owner = THIS_MODULE,
2601 .ioctl = sep_ioctl,
2602 .poll = sep_poll,
2603 .open = sep_open,
2604 .release = sep_release,
2605 .mmap = sep_mmap,
2606 };
2607
2608
2609 /* cdev struct of the driver */
2610 static struct cdev sep_cdev;
2611
2612 /*
2613 this function registers the driver to the file system
2614 */
2615 static int sep_register_driver_to_fs(void)
2616 {
2617 int ret_val = alloc_chrdev_region(&sep_devno, 0, 1, "sep_sec_driver");
2618 if (ret_val) {
2619 edbg("sep_driver:major number allocation failed, retval is %d\n", ret_val);
2620 goto end_function;
2621 }
2622
2623 /* init cdev */
2624 cdev_init(&sep_cdev, &sep_file_operations);
2625 sep_cdev.owner = THIS_MODULE;
2626
2627 /* register the driver with the kernel */
2628 ret_val = cdev_add(&sep_cdev, sep_devno, 1);
2629
2630 if (ret_val) {
2631 edbg("sep_driver:cdev_add failed, retval is %d\n", ret_val);
2632 goto end_function_unregister_devnum;
2633 }
2634
2635 goto end_function;
2636
2637 end_function_unregister_devnum:
2638
2639 /* unregister dev numbers */
2640 unregister_chrdev_region(sep_devno, 1);
2641
2642 end_function:
2643 return ret_val;
2644 }
2645
2646
2647 /*--------------------------------------------------------------
2648 init function
2649 ----------------------------------------------------------------*/
2650 static int __init sep_init(void)
2651 {
2652 int ret_val = 0;
2653 dbg("SEP Driver:-------->Init start\n");
2654 /* FIXME: Probe can occur before we are ready to survive a probe */
2655 ret_val = pci_register_driver(&sep_pci_driver);
2656 if (ret_val) {
2657 edbg("sep_driver:sep_driver_to_device failed, ret_val is %d\n", ret_val);
2658 goto end_function_unregister_from_fs;
2659 }
2660 /* register driver to fs */
2661 ret_val = sep_register_driver_to_fs();
2662 if (ret_val)
2663 goto end_function_unregister_pci;
2664 goto end_function;
2665 end_function_unregister_pci:
2666 pci_unregister_driver(&sep_pci_driver);
2667 end_function_unregister_from_fs:
2668 /* unregister from fs */
2669 cdev_del(&sep_cdev);
2670 /* unregister dev numbers */
2671 unregister_chrdev_region(sep_devno, 1);
2672 end_function:
2673 dbg("SEP Driver:<-------- Init end\n");
2674 return ret_val;
2675 }
2676
2677
2678 /*-------------------------------------------------------------
2679 exit function
2680 --------------------------------------------------------------*/
2681 static void __exit sep_exit(void)
2682 {
2683 int size;
2684
2685 dbg("SEP Driver:--------> Exit start\n");
2686
2687 /* unregister from fs */
2688 cdev_del(&sep_cdev);
2689 /* unregister dev numbers */
2690 unregister_chrdev_region(sep_devno, 1);
2691 /* calculate the total size for de-allocation */
2692 size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2693 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2694 /* FIXME: We need to do this in the unload for the device */
2695 /* free shared area */
2696 if (sep_dev) {
2697 sep_unmap_and_free_shared_area(sep_dev, size);
2698 edbg("SEP Driver: free pages SEP SHARED AREA \n");
2699 iounmap((void *) sep_dev->reg_addr);
2700 edbg("SEP Driver: iounmap \n");
2701 }
2702 edbg("SEP Driver: release_mem_region \n");
2703 dbg("SEP Driver:<-------- Exit end\n");
2704 }
2705
2706
2707 module_init(sep_init);
2708 module_exit(sep_exit);
2709
2710 MODULE_LICENSE("GPL");
This page took 0.192567 seconds and 5 git commands to generate.