26c8a2cfe8da2fe335ca50adf5d6723ad3341b5d
[deliverable/linux.git] / drivers / gpu / drm / exynos / exynos_drm_ipp.c
1 /*
2 * Copyright (C) 2012 Samsung Electronics Co.Ltd
3 * Authors:
4 * Eunchul Kim <chulspro.kim@samsung.com>
5 * Jinyoung Jeon <jy0.jeon@samsung.com>
6 * Sangmin Lee <lsmin.lee@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 */
14 #include <linux/kernel.h>
15 #include <linux/platform_device.h>
16 #include <linux/types.h>
17 #include <linux/clk.h>
18 #include <linux/pm_runtime.h>
19
20 #include <drm/drmP.h>
21 #include <drm/exynos_drm.h>
22 #include "exynos_drm_drv.h"
23 #include "exynos_drm_gem.h"
24 #include "exynos_drm_ipp.h"
25 #include "exynos_drm_iommu.h"
26
27 /*
28 * IPP stands for Image Post Processing and
29 * supports image scaler/rotator and input/output DMA operations.
30 * using FIMC, GSC, Rotator, so on.
31 * IPP is integration device driver of same attribute h/w
32 */
33
34 /*
35 * TODO
36 * 1. expand command control id.
37 * 2. integrate property and config.
38 * 3. removed send_event id check routine.
39 * 4. compare send_event id if needed.
40 * 5. free subdrv_remove notifier callback list if needed.
41 * 6. need to check subdrv_open about multi-open.
42 * 7. need to power_on implement power and sysmmu ctrl.
43 */
44
45 #define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev))
46 #define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M)
47
48 /* platform device pointer for ipp device. */
49 static struct platform_device *exynos_drm_ipp_pdev;
50
51 /*
52 * A structure of event.
53 *
54 * @base: base of event.
55 * @event: ipp event.
56 */
57 struct drm_exynos_ipp_send_event {
58 struct drm_pending_event base;
59 struct drm_exynos_ipp_event event;
60 };
61
62 /*
63 * A structure of memory node.
64 *
65 * @list: list head to memory queue information.
66 * @ops_id: id of operations.
67 * @prop_id: id of property.
68 * @buf_id: id of buffer.
69 * @buf_info: gem objects and dma address, size.
70 * @filp: a pointer to drm_file.
71 */
72 struct drm_exynos_ipp_mem_node {
73 struct list_head list;
74 enum drm_exynos_ops_id ops_id;
75 u32 prop_id;
76 u32 buf_id;
77 struct drm_exynos_ipp_buf_info buf_info;
78 struct drm_file *filp;
79 };
80
81 /*
82 * A structure of ipp context.
83 *
84 * @subdrv: prepare initialization using subdrv.
85 * @ipp_lock: lock for synchronization of access to ipp_idr.
86 * @prop_lock: lock for synchronization of access to prop_idr.
87 * @ipp_idr: ipp driver idr.
88 * @prop_idr: property idr.
89 * @event_workq: event work queue.
90 * @cmd_workq: command work queue.
91 */
92 struct ipp_context {
93 struct exynos_drm_subdrv subdrv;
94 struct mutex ipp_lock;
95 struct mutex prop_lock;
96 struct idr ipp_idr;
97 struct idr prop_idr;
98 struct workqueue_struct *event_workq;
99 struct workqueue_struct *cmd_workq;
100 };
101
102 static LIST_HEAD(exynos_drm_ippdrv_list);
103 static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
104 static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
105
106 int exynos_platform_device_ipp_register(void)
107 {
108 struct platform_device *pdev;
109
110 if (exynos_drm_ipp_pdev)
111 return -EEXIST;
112
113 pdev = platform_device_register_simple("exynos-drm-ipp", -1, NULL, 0);
114 if (IS_ERR(pdev))
115 return PTR_ERR(pdev);
116
117 exynos_drm_ipp_pdev = pdev;
118
119 return 0;
120 }
121
122 void exynos_platform_device_ipp_unregister(void)
123 {
124 if (exynos_drm_ipp_pdev) {
125 platform_device_unregister(exynos_drm_ipp_pdev);
126 exynos_drm_ipp_pdev = NULL;
127 }
128 }
129
130 int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
131 {
132 mutex_lock(&exynos_drm_ippdrv_lock);
133 list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
134 mutex_unlock(&exynos_drm_ippdrv_lock);
135
136 return 0;
137 }
138
139 int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
140 {
141 mutex_lock(&exynos_drm_ippdrv_lock);
142 list_del(&ippdrv->drv_list);
143 mutex_unlock(&exynos_drm_ippdrv_lock);
144
145 return 0;
146 }
147
148 static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
149 u32 *idp)
150 {
151 int ret;
152
153 /* do the allocation under our mutexlock */
154 mutex_lock(lock);
155 ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL);
156 mutex_unlock(lock);
157 if (ret < 0)
158 return ret;
159
160 *idp = ret;
161 return 0;
162 }
163
164 static void ipp_remove_id(struct idr *id_idr, struct mutex *lock, u32 id)
165 {
166 mutex_lock(lock);
167 idr_remove(id_idr, id);
168 mutex_unlock(lock);
169 }
170
171 static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
172 {
173 void *obj;
174
175 DRM_DEBUG_KMS("id[%d]\n", id);
176
177 mutex_lock(lock);
178
179 /* find object using handle */
180 obj = idr_find(id_idr, id);
181 if (!obj) {
182 DRM_ERROR("failed to find object.\n");
183 mutex_unlock(lock);
184 return ERR_PTR(-ENODEV);
185 }
186
187 mutex_unlock(lock);
188
189 return obj;
190 }
191
192 static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv *ippdrv,
193 enum drm_exynos_ipp_cmd cmd)
194 {
195 /*
196 * check dedicated flag and WB, OUTPUT operation with
197 * power on state.
198 */
199 if (ippdrv->dedicated || (!ipp_is_m2m_cmd(cmd) &&
200 !pm_runtime_suspended(ippdrv->dev)))
201 return true;
202
203 return false;
204 }
205
206 static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
207 struct drm_exynos_ipp_property *property)
208 {
209 struct exynos_drm_ippdrv *ippdrv;
210 u32 ipp_id = property->ipp_id;
211
212 DRM_DEBUG_KMS("ipp_id[%d]\n", ipp_id);
213
214 if (ipp_id) {
215 /* find ipp driver using idr */
216 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
217 ipp_id);
218 if (IS_ERR(ippdrv)) {
219 DRM_ERROR("not found ipp%d driver.\n", ipp_id);
220 return ippdrv;
221 }
222
223 /*
224 * WB, OUTPUT opertion not supported multi-operation.
225 * so, make dedicated state at set property ioctl.
226 * when ipp driver finished operations, clear dedicated flags.
227 */
228 if (ipp_check_dedicated(ippdrv, property->cmd)) {
229 DRM_ERROR("already used choose device.\n");
230 return ERR_PTR(-EBUSY);
231 }
232
233 /*
234 * This is necessary to find correct device in ipp drivers.
235 * ipp drivers have different abilities,
236 * so need to check property.
237 */
238 if (ippdrv->check_property &&
239 ippdrv->check_property(ippdrv->dev, property)) {
240 DRM_ERROR("not support property.\n");
241 return ERR_PTR(-EINVAL);
242 }
243
244 return ippdrv;
245 } else {
246 /*
247 * This case is search all ipp driver for finding.
248 * user application don't set ipp_id in this case,
249 * so ipp subsystem search correct driver in driver list.
250 */
251 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
252 if (ipp_check_dedicated(ippdrv, property->cmd)) {
253 DRM_DEBUG_KMS("used device.\n");
254 continue;
255 }
256
257 if (ippdrv->check_property &&
258 ippdrv->check_property(ippdrv->dev, property)) {
259 DRM_DEBUG_KMS("not support property.\n");
260 continue;
261 }
262
263 return ippdrv;
264 }
265
266 DRM_ERROR("not support ipp driver operations.\n");
267 }
268
269 return ERR_PTR(-ENODEV);
270 }
271
272 static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
273 {
274 struct exynos_drm_ippdrv *ippdrv;
275 struct drm_exynos_ipp_cmd_node *c_node;
276 int count = 0;
277
278 DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
279
280 /*
281 * This case is search ipp driver by prop_id handle.
282 * sometimes, ipp subsystem find driver by prop_id.
283 * e.g PAUSE state, queue buf, command control.
284 */
285 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
286 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", count++, (int)ippdrv);
287
288 mutex_lock(&ippdrv->cmd_lock);
289 list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
290 if (c_node->property.prop_id == prop_id) {
291 mutex_unlock(&ippdrv->cmd_lock);
292 return ippdrv;
293 }
294 }
295 mutex_unlock(&ippdrv->cmd_lock);
296 }
297
298 return ERR_PTR(-ENODEV);
299 }
300
301 int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
302 struct drm_file *file)
303 {
304 struct drm_exynos_file_private *file_priv = file->driver_priv;
305 struct device *dev = file_priv->ipp_dev;
306 struct ipp_context *ctx = get_ipp_context(dev);
307 struct drm_exynos_ipp_prop_list *prop_list = data;
308 struct exynos_drm_ippdrv *ippdrv;
309 int count = 0;
310
311 if (!ctx) {
312 DRM_ERROR("invalid context.\n");
313 return -EINVAL;
314 }
315
316 if (!prop_list) {
317 DRM_ERROR("invalid property parameter.\n");
318 return -EINVAL;
319 }
320
321 DRM_DEBUG_KMS("ipp_id[%d]\n", prop_list->ipp_id);
322
323 if (!prop_list->ipp_id) {
324 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
325 count++;
326
327 /*
328 * Supports ippdrv list count for user application.
329 * First step user application getting ippdrv count.
330 * and second step getting ippdrv capability using ipp_id.
331 */
332 prop_list->count = count;
333 } else {
334 /*
335 * Getting ippdrv capability by ipp_id.
336 * some device not supported wb, output interface.
337 * so, user application detect correct ipp driver
338 * using this ioctl.
339 */
340 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
341 prop_list->ipp_id);
342 if (IS_ERR(ippdrv)) {
343 DRM_ERROR("not found ipp%d driver.\n",
344 prop_list->ipp_id);
345 return PTR_ERR(ippdrv);
346 }
347
348 *prop_list = ippdrv->prop_list;
349 }
350
351 return 0;
352 }
353
354 static void ipp_print_property(struct drm_exynos_ipp_property *property,
355 int idx)
356 {
357 struct drm_exynos_ipp_config *config = &property->config[idx];
358 struct drm_exynos_pos *pos = &config->pos;
359 struct drm_exynos_sz *sz = &config->sz;
360
361 DRM_DEBUG_KMS("prop_id[%d]ops[%s]fmt[0x%x]\n",
362 property->prop_id, idx ? "dst" : "src", config->fmt);
363
364 DRM_DEBUG_KMS("pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
365 pos->x, pos->y, pos->w, pos->h,
366 sz->hsize, sz->vsize, config->flip, config->degree);
367 }
368
369 static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
370 {
371 struct exynos_drm_ippdrv *ippdrv;
372 struct drm_exynos_ipp_cmd_node *c_node;
373 u32 prop_id = property->prop_id;
374
375 DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
376
377 ippdrv = ipp_find_drv_by_handle(prop_id);
378 if (IS_ERR(ippdrv)) {
379 DRM_ERROR("failed to get ipp driver.\n");
380 return -EINVAL;
381 }
382
383 /*
384 * Find command node using command list in ippdrv.
385 * when we find this command no using prop_id.
386 * return property information set in this command node.
387 */
388 mutex_lock(&ippdrv->cmd_lock);
389 list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
390 if ((c_node->property.prop_id == prop_id) &&
391 (c_node->state == IPP_STATE_STOP)) {
392 mutex_unlock(&ippdrv->cmd_lock);
393 DRM_DEBUG_KMS("found cmd[%d]ippdrv[0x%x]\n",
394 property->cmd, (int)ippdrv);
395
396 c_node->property = *property;
397 return 0;
398 }
399 }
400 mutex_unlock(&ippdrv->cmd_lock);
401
402 DRM_ERROR("failed to search property.\n");
403
404 return -EINVAL;
405 }
406
407 static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
408 {
409 struct drm_exynos_ipp_cmd_work *cmd_work;
410
411 cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
412 if (!cmd_work)
413 return ERR_PTR(-ENOMEM);
414
415 INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
416
417 return cmd_work;
418 }
419
420 static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
421 {
422 struct drm_exynos_ipp_event_work *event_work;
423
424 event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
425 if (!event_work)
426 return ERR_PTR(-ENOMEM);
427
428 INIT_WORK(&event_work->work, ipp_sched_event);
429
430 return event_work;
431 }
432
433 int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
434 struct drm_file *file)
435 {
436 struct drm_exynos_file_private *file_priv = file->driver_priv;
437 struct device *dev = file_priv->ipp_dev;
438 struct ipp_context *ctx = get_ipp_context(dev);
439 struct drm_exynos_ipp_property *property = data;
440 struct exynos_drm_ippdrv *ippdrv;
441 struct drm_exynos_ipp_cmd_node *c_node;
442 int ret, i;
443
444 if (!ctx) {
445 DRM_ERROR("invalid context.\n");
446 return -EINVAL;
447 }
448
449 if (!property) {
450 DRM_ERROR("invalid property parameter.\n");
451 return -EINVAL;
452 }
453
454 /*
455 * This is log print for user application property.
456 * user application set various property.
457 */
458 for_each_ipp_ops(i)
459 ipp_print_property(property, i);
460
461 /*
462 * set property ioctl generated new prop_id.
463 * but in this case already asigned prop_id using old set property.
464 * e.g PAUSE state. this case supports find current prop_id and use it
465 * instead of allocation.
466 */
467 if (property->prop_id) {
468 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
469 return ipp_find_and_set_property(property);
470 }
471
472 /* find ipp driver using ipp id */
473 ippdrv = ipp_find_driver(ctx, property);
474 if (IS_ERR(ippdrv)) {
475 DRM_ERROR("failed to get ipp driver.\n");
476 return -EINVAL;
477 }
478
479 /* allocate command node */
480 c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
481 if (!c_node)
482 return -ENOMEM;
483
484 /* create property id */
485 ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
486 &property->prop_id);
487 if (ret) {
488 DRM_ERROR("failed to create id.\n");
489 goto err_clear;
490 }
491
492 DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
493 property->prop_id, property->cmd, (int)ippdrv);
494
495 /* stored property information and ippdrv in private data */
496 c_node->dev = dev;
497 c_node->property = *property;
498 c_node->state = IPP_STATE_IDLE;
499
500 c_node->start_work = ipp_create_cmd_work();
501 if (IS_ERR(c_node->start_work)) {
502 DRM_ERROR("failed to create start work.\n");
503 goto err_remove_id;
504 }
505
506 c_node->stop_work = ipp_create_cmd_work();
507 if (IS_ERR(c_node->stop_work)) {
508 DRM_ERROR("failed to create stop work.\n");
509 goto err_free_start;
510 }
511
512 c_node->event_work = ipp_create_event_work();
513 if (IS_ERR(c_node->event_work)) {
514 DRM_ERROR("failed to create event work.\n");
515 goto err_free_stop;
516 }
517
518 mutex_init(&c_node->lock);
519 mutex_init(&c_node->mem_lock);
520 mutex_init(&c_node->event_lock);
521
522 init_completion(&c_node->start_complete);
523 init_completion(&c_node->stop_complete);
524
525 for_each_ipp_ops(i)
526 INIT_LIST_HEAD(&c_node->mem_list[i]);
527
528 INIT_LIST_HEAD(&c_node->event_list);
529 mutex_lock(&ippdrv->cmd_lock);
530 list_add_tail(&c_node->list, &ippdrv->cmd_list);
531 mutex_unlock(&ippdrv->cmd_lock);
532
533 /* make dedicated state without m2m */
534 if (!ipp_is_m2m_cmd(property->cmd))
535 ippdrv->dedicated = true;
536
537 return 0;
538
539 err_free_stop:
540 kfree(c_node->stop_work);
541 err_free_start:
542 kfree(c_node->start_work);
543 err_remove_id:
544 ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock, property->prop_id);
545 err_clear:
546 kfree(c_node);
547 return ret;
548 }
549
550 static void ipp_clean_cmd_node(struct ipp_context *ctx,
551 struct drm_exynos_ipp_cmd_node *c_node)
552 {
553 /* delete list */
554 list_del(&c_node->list);
555
556 ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock,
557 c_node->property.prop_id);
558
559 /* destroy mutex */
560 mutex_destroy(&c_node->lock);
561 mutex_destroy(&c_node->mem_lock);
562 mutex_destroy(&c_node->event_lock);
563
564 /* free command node */
565 kfree(c_node->start_work);
566 kfree(c_node->stop_work);
567 kfree(c_node->event_work);
568 kfree(c_node);
569 }
570
571 static bool ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
572 {
573 switch (c_node->property.cmd) {
574 case IPP_CMD_WB:
575 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
576 case IPP_CMD_OUTPUT:
577 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]);
578 case IPP_CMD_M2M:
579 default:
580 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]) &&
581 !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
582 }
583 }
584
585 static struct drm_exynos_ipp_mem_node
586 *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
587 struct drm_exynos_ipp_queue_buf *qbuf)
588 {
589 struct drm_exynos_ipp_mem_node *m_node;
590 struct list_head *head;
591 int count = 0;
592
593 DRM_DEBUG_KMS("buf_id[%d]\n", qbuf->buf_id);
594
595 /* source/destination memory list */
596 head = &c_node->mem_list[qbuf->ops_id];
597
598 /* find memory node from memory list */
599 list_for_each_entry(m_node, head, list) {
600 DRM_DEBUG_KMS("count[%d]m_node[0x%x]\n", count++, (int)m_node);
601
602 /* compare buffer id */
603 if (m_node->buf_id == qbuf->buf_id)
604 return m_node;
605 }
606
607 return NULL;
608 }
609
610 static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
611 struct drm_exynos_ipp_cmd_node *c_node,
612 struct drm_exynos_ipp_mem_node *m_node)
613 {
614 struct exynos_drm_ipp_ops *ops = NULL;
615 int ret = 0;
616
617 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
618
619 if (!m_node) {
620 DRM_ERROR("invalid queue node.\n");
621 return -EFAULT;
622 }
623
624 DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
625
626 /* get operations callback */
627 ops = ippdrv->ops[m_node->ops_id];
628 if (!ops) {
629 DRM_ERROR("not support ops.\n");
630 return -EFAULT;
631 }
632
633 /* set address and enable irq */
634 if (ops->set_addr) {
635 ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
636 m_node->buf_id, IPP_BUF_ENQUEUE);
637 if (ret) {
638 DRM_ERROR("failed to set addr.\n");
639 return ret;
640 }
641 }
642
643 return ret;
644 }
645
646 static struct drm_exynos_ipp_mem_node
647 *ipp_get_mem_node(struct drm_device *drm_dev,
648 struct drm_file *file,
649 struct drm_exynos_ipp_cmd_node *c_node,
650 struct drm_exynos_ipp_queue_buf *qbuf)
651 {
652 struct drm_exynos_ipp_mem_node *m_node;
653 struct drm_exynos_ipp_buf_info *buf_info;
654 int i;
655
656 m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
657 if (!m_node)
658 return ERR_PTR(-ENOMEM);
659
660 buf_info = &m_node->buf_info;
661
662 /* operations, buffer id */
663 m_node->ops_id = qbuf->ops_id;
664 m_node->prop_id = qbuf->prop_id;
665 m_node->buf_id = qbuf->buf_id;
666
667 DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id);
668 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
669
670 for_each_ipp_planar(i) {
671 DRM_DEBUG_KMS("i[%d]handle[0x%x]\n", i, qbuf->handle[i]);
672
673 /* get dma address by handle */
674 if (qbuf->handle[i]) {
675 dma_addr_t *addr;
676
677 addr = exynos_drm_gem_get_dma_addr(drm_dev,
678 qbuf->handle[i], file);
679 if (IS_ERR(addr)) {
680 DRM_ERROR("failed to get addr.\n");
681 goto err_clear;
682 }
683
684 buf_info->handles[i] = qbuf->handle[i];
685 buf_info->base[i] = *addr;
686 DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%lx]\n", i,
687 buf_info->base[i], buf_info->handles[i]);
688 }
689 }
690
691 m_node->filp = file;
692 mutex_lock(&c_node->mem_lock);
693 list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
694 mutex_unlock(&c_node->mem_lock);
695
696 return m_node;
697
698 err_clear:
699 kfree(m_node);
700 return ERR_PTR(-EFAULT);
701 }
702
703 static int ipp_put_mem_node(struct drm_device *drm_dev,
704 struct drm_exynos_ipp_cmd_node *c_node,
705 struct drm_exynos_ipp_mem_node *m_node)
706 {
707 int i;
708
709 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
710
711 if (!m_node) {
712 DRM_ERROR("invalid dequeue node.\n");
713 return -EFAULT;
714 }
715
716 DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
717
718 /* put gem buffer */
719 for_each_ipp_planar(i) {
720 unsigned long handle = m_node->buf_info.handles[i];
721 if (handle)
722 exynos_drm_gem_put_dma_addr(drm_dev, handle,
723 m_node->filp);
724 }
725
726 /* delete list in queue */
727 list_del(&m_node->list);
728 kfree(m_node);
729
730 return 0;
731 }
732
733 static void ipp_free_event(struct drm_pending_event *event)
734 {
735 kfree(event);
736 }
737
738 static int ipp_get_event(struct drm_device *drm_dev,
739 struct drm_file *file,
740 struct drm_exynos_ipp_cmd_node *c_node,
741 struct drm_exynos_ipp_queue_buf *qbuf)
742 {
743 struct drm_exynos_ipp_send_event *e;
744 unsigned long flags;
745
746 DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id);
747
748 e = kzalloc(sizeof(*e), GFP_KERNEL);
749 if (!e) {
750 spin_lock_irqsave(&drm_dev->event_lock, flags);
751 file->event_space += sizeof(e->event);
752 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
753 return -ENOMEM;
754 }
755
756 /* make event */
757 e->event.base.type = DRM_EXYNOS_IPP_EVENT;
758 e->event.base.length = sizeof(e->event);
759 e->event.user_data = qbuf->user_data;
760 e->event.prop_id = qbuf->prop_id;
761 e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
762 e->base.event = &e->event.base;
763 e->base.file_priv = file;
764 e->base.destroy = ipp_free_event;
765 mutex_lock(&c_node->event_lock);
766 list_add_tail(&e->base.link, &c_node->event_list);
767 mutex_unlock(&c_node->event_lock);
768
769 return 0;
770 }
771
772 static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
773 struct drm_exynos_ipp_queue_buf *qbuf)
774 {
775 struct drm_exynos_ipp_send_event *e, *te;
776 int count = 0;
777
778 mutex_lock(&c_node->event_lock);
779 list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
780 DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e);
781
782 /*
783 * qbuf == NULL condition means all event deletion.
784 * stop operations want to delete all event list.
785 * another case delete only same buf id.
786 */
787 if (!qbuf) {
788 /* delete list */
789 list_del(&e->base.link);
790 kfree(e);
791 }
792
793 /* compare buffer id */
794 if (qbuf && (qbuf->buf_id ==
795 e->event.buf_id[EXYNOS_DRM_OPS_DST])) {
796 /* delete list */
797 list_del(&e->base.link);
798 kfree(e);
799 goto out_unlock;
800 }
801 }
802
803 out_unlock:
804 mutex_unlock(&c_node->event_lock);
805 return;
806 }
807
808 static void ipp_handle_cmd_work(struct device *dev,
809 struct exynos_drm_ippdrv *ippdrv,
810 struct drm_exynos_ipp_cmd_work *cmd_work,
811 struct drm_exynos_ipp_cmd_node *c_node)
812 {
813 struct ipp_context *ctx = get_ipp_context(dev);
814
815 cmd_work->ippdrv = ippdrv;
816 cmd_work->c_node = c_node;
817 queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work);
818 }
819
820 static int ipp_queue_buf_with_run(struct device *dev,
821 struct drm_exynos_ipp_cmd_node *c_node,
822 struct drm_exynos_ipp_mem_node *m_node,
823 struct drm_exynos_ipp_queue_buf *qbuf)
824 {
825 struct exynos_drm_ippdrv *ippdrv;
826 struct drm_exynos_ipp_property *property;
827 struct exynos_drm_ipp_ops *ops;
828 int ret;
829
830 ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
831 if (IS_ERR(ippdrv)) {
832 DRM_ERROR("failed to get ipp driver.\n");
833 return -EFAULT;
834 }
835
836 ops = ippdrv->ops[qbuf->ops_id];
837 if (!ops) {
838 DRM_ERROR("failed to get ops.\n");
839 return -EFAULT;
840 }
841
842 property = &c_node->property;
843
844 if (c_node->state != IPP_STATE_START) {
845 DRM_DEBUG_KMS("bypass for invalid state.\n");
846 return 0;
847 }
848
849 mutex_lock(&c_node->mem_lock);
850 if (!ipp_check_mem_list(c_node)) {
851 mutex_unlock(&c_node->mem_lock);
852 DRM_DEBUG_KMS("empty memory.\n");
853 return 0;
854 }
855
856 /*
857 * If set destination buffer and enabled clock,
858 * then m2m operations need start operations at queue_buf
859 */
860 if (ipp_is_m2m_cmd(property->cmd)) {
861 struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work;
862
863 cmd_work->ctrl = IPP_CTRL_PLAY;
864 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
865 } else {
866 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
867 if (ret) {
868 mutex_unlock(&c_node->mem_lock);
869 DRM_ERROR("failed to set m node.\n");
870 return ret;
871 }
872 }
873 mutex_unlock(&c_node->mem_lock);
874
875 return 0;
876 }
877
878 static void ipp_clean_queue_buf(struct drm_device *drm_dev,
879 struct drm_exynos_ipp_cmd_node *c_node,
880 struct drm_exynos_ipp_queue_buf *qbuf)
881 {
882 struct drm_exynos_ipp_mem_node *m_node, *tm_node;
883
884 /* delete list */
885 mutex_lock(&c_node->mem_lock);
886 list_for_each_entry_safe(m_node, tm_node,
887 &c_node->mem_list[qbuf->ops_id], list) {
888 if (m_node->buf_id == qbuf->buf_id &&
889 m_node->ops_id == qbuf->ops_id)
890 ipp_put_mem_node(drm_dev, c_node, m_node);
891 }
892 mutex_unlock(&c_node->mem_lock);
893 }
894
895 int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
896 struct drm_file *file)
897 {
898 struct drm_exynos_file_private *file_priv = file->driver_priv;
899 struct device *dev = file_priv->ipp_dev;
900 struct ipp_context *ctx = get_ipp_context(dev);
901 struct drm_exynos_ipp_queue_buf *qbuf = data;
902 struct drm_exynos_ipp_cmd_node *c_node;
903 struct drm_exynos_ipp_mem_node *m_node;
904 int ret;
905
906 if (!qbuf) {
907 DRM_ERROR("invalid buf parameter.\n");
908 return -EINVAL;
909 }
910
911 if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
912 DRM_ERROR("invalid ops parameter.\n");
913 return -EINVAL;
914 }
915
916 DRM_DEBUG_KMS("prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
917 qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
918 qbuf->buf_id, qbuf->buf_type);
919
920 /* find command node */
921 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
922 qbuf->prop_id);
923 if (IS_ERR(c_node)) {
924 DRM_ERROR("failed to get command node.\n");
925 return PTR_ERR(c_node);
926 }
927
928 /* buffer control */
929 switch (qbuf->buf_type) {
930 case IPP_BUF_ENQUEUE:
931 /* get memory node */
932 m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf);
933 if (IS_ERR(m_node)) {
934 DRM_ERROR("failed to get m_node.\n");
935 return PTR_ERR(m_node);
936 }
937
938 /*
939 * first step get event for destination buffer.
940 * and second step when M2M case run with destination buffer
941 * if needed.
942 */
943 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
944 /* get event for destination buffer */
945 ret = ipp_get_event(drm_dev, file, c_node, qbuf);
946 if (ret) {
947 DRM_ERROR("failed to get event.\n");
948 goto err_clean_node;
949 }
950
951 /*
952 * M2M case run play control for streaming feature.
953 * other case set address and waiting.
954 */
955 ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf);
956 if (ret) {
957 DRM_ERROR("failed to run command.\n");
958 goto err_clean_node;
959 }
960 }
961 break;
962 case IPP_BUF_DEQUEUE:
963 mutex_lock(&c_node->lock);
964
965 /* put event for destination buffer */
966 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
967 ipp_put_event(c_node, qbuf);
968
969 ipp_clean_queue_buf(drm_dev, c_node, qbuf);
970
971 mutex_unlock(&c_node->lock);
972 break;
973 default:
974 DRM_ERROR("invalid buffer control.\n");
975 return -EINVAL;
976 }
977
978 return 0;
979
980 err_clean_node:
981 DRM_ERROR("clean memory nodes.\n");
982
983 ipp_clean_queue_buf(drm_dev, c_node, qbuf);
984 return ret;
985 }
986
987 static bool exynos_drm_ipp_check_valid(struct device *dev,
988 enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
989 {
990 if (ctrl != IPP_CTRL_PLAY) {
991 if (pm_runtime_suspended(dev)) {
992 DRM_ERROR("pm:runtime_suspended.\n");
993 goto err_status;
994 }
995 }
996
997 switch (ctrl) {
998 case IPP_CTRL_PLAY:
999 if (state != IPP_STATE_IDLE)
1000 goto err_status;
1001 break;
1002 case IPP_CTRL_STOP:
1003 if (state == IPP_STATE_STOP)
1004 goto err_status;
1005 break;
1006 case IPP_CTRL_PAUSE:
1007 if (state != IPP_STATE_START)
1008 goto err_status;
1009 break;
1010 case IPP_CTRL_RESUME:
1011 if (state != IPP_STATE_STOP)
1012 goto err_status;
1013 break;
1014 default:
1015 DRM_ERROR("invalid state.\n");
1016 goto err_status;
1017 }
1018
1019 return true;
1020
1021 err_status:
1022 DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
1023 return false;
1024 }
1025
1026 int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
1027 struct drm_file *file)
1028 {
1029 struct drm_exynos_file_private *file_priv = file->driver_priv;
1030 struct exynos_drm_ippdrv *ippdrv = NULL;
1031 struct device *dev = file_priv->ipp_dev;
1032 struct ipp_context *ctx = get_ipp_context(dev);
1033 struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
1034 struct drm_exynos_ipp_cmd_work *cmd_work;
1035 struct drm_exynos_ipp_cmd_node *c_node;
1036
1037 if (!ctx) {
1038 DRM_ERROR("invalid context.\n");
1039 return -EINVAL;
1040 }
1041
1042 if (!cmd_ctrl) {
1043 DRM_ERROR("invalid control parameter.\n");
1044 return -EINVAL;
1045 }
1046
1047 DRM_DEBUG_KMS("ctrl[%d]prop_id[%d]\n",
1048 cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1049
1050 ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id);
1051 if (IS_ERR(ippdrv)) {
1052 DRM_ERROR("failed to get ipp driver.\n");
1053 return PTR_ERR(ippdrv);
1054 }
1055
1056 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
1057 cmd_ctrl->prop_id);
1058 if (IS_ERR(c_node)) {
1059 DRM_ERROR("invalid command node list.\n");
1060 return PTR_ERR(c_node);
1061 }
1062
1063 if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
1064 c_node->state)) {
1065 DRM_ERROR("invalid state.\n");
1066 return -EINVAL;
1067 }
1068
1069 switch (cmd_ctrl->ctrl) {
1070 case IPP_CTRL_PLAY:
1071 if (pm_runtime_suspended(ippdrv->dev))
1072 pm_runtime_get_sync(ippdrv->dev);
1073
1074 c_node->state = IPP_STATE_START;
1075
1076 cmd_work = c_node->start_work;
1077 cmd_work->ctrl = cmd_ctrl->ctrl;
1078 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1079 break;
1080 case IPP_CTRL_STOP:
1081 cmd_work = c_node->stop_work;
1082 cmd_work->ctrl = cmd_ctrl->ctrl;
1083 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1084
1085 if (!wait_for_completion_timeout(&c_node->stop_complete,
1086 msecs_to_jiffies(300))) {
1087 DRM_ERROR("timeout stop:prop_id[%d]\n",
1088 c_node->property.prop_id);
1089 }
1090
1091 c_node->state = IPP_STATE_STOP;
1092 ippdrv->dedicated = false;
1093 mutex_lock(&ippdrv->cmd_lock);
1094 ipp_clean_cmd_node(ctx, c_node);
1095
1096 if (list_empty(&ippdrv->cmd_list))
1097 pm_runtime_put_sync(ippdrv->dev);
1098 mutex_unlock(&ippdrv->cmd_lock);
1099 break;
1100 case IPP_CTRL_PAUSE:
1101 cmd_work = c_node->stop_work;
1102 cmd_work->ctrl = cmd_ctrl->ctrl;
1103 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1104
1105 if (!wait_for_completion_timeout(&c_node->stop_complete,
1106 msecs_to_jiffies(200))) {
1107 DRM_ERROR("timeout stop:prop_id[%d]\n",
1108 c_node->property.prop_id);
1109 }
1110
1111 c_node->state = IPP_STATE_STOP;
1112 break;
1113 case IPP_CTRL_RESUME:
1114 c_node->state = IPP_STATE_START;
1115 cmd_work = c_node->start_work;
1116 cmd_work->ctrl = cmd_ctrl->ctrl;
1117 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1118 break;
1119 default:
1120 DRM_ERROR("could not support this state currently.\n");
1121 return -EINVAL;
1122 }
1123
1124 DRM_DEBUG_KMS("done ctrl[%d]prop_id[%d]\n",
1125 cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1126
1127 return 0;
1128 }
1129
1130 int exynos_drm_ippnb_register(struct notifier_block *nb)
1131 {
1132 return blocking_notifier_chain_register(
1133 &exynos_drm_ippnb_list, nb);
1134 }
1135
1136 int exynos_drm_ippnb_unregister(struct notifier_block *nb)
1137 {
1138 return blocking_notifier_chain_unregister(
1139 &exynos_drm_ippnb_list, nb);
1140 }
1141
1142 int exynos_drm_ippnb_send_event(unsigned long val, void *v)
1143 {
1144 return blocking_notifier_call_chain(
1145 &exynos_drm_ippnb_list, val, v);
1146 }
1147
1148 static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
1149 struct drm_exynos_ipp_property *property)
1150 {
1151 struct exynos_drm_ipp_ops *ops = NULL;
1152 bool swap = false;
1153 int ret, i;
1154
1155 if (!property) {
1156 DRM_ERROR("invalid property parameter.\n");
1157 return -EINVAL;
1158 }
1159
1160 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
1161
1162 /* reset h/w block */
1163 if (ippdrv->reset &&
1164 ippdrv->reset(ippdrv->dev)) {
1165 DRM_ERROR("failed to reset.\n");
1166 return -EINVAL;
1167 }
1168
1169 /* set source,destination operations */
1170 for_each_ipp_ops(i) {
1171 struct drm_exynos_ipp_config *config =
1172 &property->config[i];
1173
1174 ops = ippdrv->ops[i];
1175 if (!ops || !config) {
1176 DRM_ERROR("not support ops and config.\n");
1177 return -EINVAL;
1178 }
1179
1180 /* set format */
1181 if (ops->set_fmt) {
1182 ret = ops->set_fmt(ippdrv->dev, config->fmt);
1183 if (ret) {
1184 DRM_ERROR("not support format.\n");
1185 return ret;
1186 }
1187 }
1188
1189 /* set transform for rotation, flip */
1190 if (ops->set_transf) {
1191 ret = ops->set_transf(ippdrv->dev, config->degree,
1192 config->flip, &swap);
1193 if (ret) {
1194 DRM_ERROR("not support tranf.\n");
1195 return -EINVAL;
1196 }
1197 }
1198
1199 /* set size */
1200 if (ops->set_size) {
1201 ret = ops->set_size(ippdrv->dev, swap, &config->pos,
1202 &config->sz);
1203 if (ret) {
1204 DRM_ERROR("not support size.\n");
1205 return ret;
1206 }
1207 }
1208 }
1209
1210 return 0;
1211 }
1212
1213 static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1214 struct drm_exynos_ipp_cmd_node *c_node)
1215 {
1216 struct drm_exynos_ipp_mem_node *m_node;
1217 struct drm_exynos_ipp_property *property = &c_node->property;
1218 struct list_head *head;
1219 int ret, i;
1220
1221 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
1222
1223 /* store command info in ippdrv */
1224 ippdrv->c_node = c_node;
1225
1226 mutex_lock(&c_node->mem_lock);
1227 if (!ipp_check_mem_list(c_node)) {
1228 DRM_DEBUG_KMS("empty memory.\n");
1229 ret = -ENOMEM;
1230 goto err_unlock;
1231 }
1232
1233 /* set current property in ippdrv */
1234 ret = ipp_set_property(ippdrv, property);
1235 if (ret) {
1236 DRM_ERROR("failed to set property.\n");
1237 ippdrv->c_node = NULL;
1238 goto err_unlock;
1239 }
1240
1241 /* check command */
1242 switch (property->cmd) {
1243 case IPP_CMD_M2M:
1244 for_each_ipp_ops(i) {
1245 /* source/destination memory list */
1246 head = &c_node->mem_list[i];
1247
1248 m_node = list_first_entry(head,
1249 struct drm_exynos_ipp_mem_node, list);
1250
1251 DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node);
1252
1253 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1254 if (ret) {
1255 DRM_ERROR("failed to set m node.\n");
1256 goto err_unlock;
1257 }
1258 }
1259 break;
1260 case IPP_CMD_WB:
1261 /* destination memory list */
1262 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1263
1264 list_for_each_entry(m_node, head, list) {
1265 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1266 if (ret) {
1267 DRM_ERROR("failed to set m node.\n");
1268 goto err_unlock;
1269 }
1270 }
1271 break;
1272 case IPP_CMD_OUTPUT:
1273 /* source memory list */
1274 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1275
1276 list_for_each_entry(m_node, head, list) {
1277 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1278 if (ret) {
1279 DRM_ERROR("failed to set m node.\n");
1280 goto err_unlock;
1281 }
1282 }
1283 break;
1284 default:
1285 DRM_ERROR("invalid operations.\n");
1286 ret = -EINVAL;
1287 goto err_unlock;
1288 }
1289 mutex_unlock(&c_node->mem_lock);
1290
1291 DRM_DEBUG_KMS("cmd[%d]\n", property->cmd);
1292
1293 /* start operations */
1294 if (ippdrv->start) {
1295 ret = ippdrv->start(ippdrv->dev, property->cmd);
1296 if (ret) {
1297 DRM_ERROR("failed to start ops.\n");
1298 ippdrv->c_node = NULL;
1299 return ret;
1300 }
1301 }
1302
1303 return 0;
1304
1305 err_unlock:
1306 mutex_unlock(&c_node->mem_lock);
1307 ippdrv->c_node = NULL;
1308 return ret;
1309 }
1310
1311 static int ipp_stop_property(struct drm_device *drm_dev,
1312 struct exynos_drm_ippdrv *ippdrv,
1313 struct drm_exynos_ipp_cmd_node *c_node)
1314 {
1315 struct drm_exynos_ipp_mem_node *m_node, *tm_node;
1316 struct drm_exynos_ipp_property *property = &c_node->property;
1317 struct list_head *head;
1318 int ret = 0, i;
1319
1320 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
1321
1322 /* put event */
1323 ipp_put_event(c_node, NULL);
1324
1325 mutex_lock(&c_node->mem_lock);
1326
1327 /* check command */
1328 switch (property->cmd) {
1329 case IPP_CMD_M2M:
1330 for_each_ipp_ops(i) {
1331 /* source/destination memory list */
1332 head = &c_node->mem_list[i];
1333
1334 list_for_each_entry_safe(m_node, tm_node,
1335 head, list) {
1336 ret = ipp_put_mem_node(drm_dev, c_node,
1337 m_node);
1338 if (ret) {
1339 DRM_ERROR("failed to put m_node.\n");
1340 goto err_clear;
1341 }
1342 }
1343 }
1344 break;
1345 case IPP_CMD_WB:
1346 /* destination memory list */
1347 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1348
1349 list_for_each_entry_safe(m_node, tm_node, head, list) {
1350 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1351 if (ret) {
1352 DRM_ERROR("failed to put m_node.\n");
1353 goto err_clear;
1354 }
1355 }
1356 break;
1357 case IPP_CMD_OUTPUT:
1358 /* source memory list */
1359 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1360
1361 list_for_each_entry_safe(m_node, tm_node, head, list) {
1362 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1363 if (ret) {
1364 DRM_ERROR("failed to put m_node.\n");
1365 goto err_clear;
1366 }
1367 }
1368 break;
1369 default:
1370 DRM_ERROR("invalid operations.\n");
1371 ret = -EINVAL;
1372 goto err_clear;
1373 }
1374
1375 err_clear:
1376 mutex_unlock(&c_node->mem_lock);
1377
1378 /* stop operations */
1379 if (ippdrv->stop)
1380 ippdrv->stop(ippdrv->dev, property->cmd);
1381
1382 return ret;
1383 }
1384
1385 void ipp_sched_cmd(struct work_struct *work)
1386 {
1387 struct drm_exynos_ipp_cmd_work *cmd_work =
1388 (struct drm_exynos_ipp_cmd_work *)work;
1389 struct exynos_drm_ippdrv *ippdrv;
1390 struct drm_exynos_ipp_cmd_node *c_node;
1391 struct drm_exynos_ipp_property *property;
1392 int ret;
1393
1394 ippdrv = cmd_work->ippdrv;
1395 if (!ippdrv) {
1396 DRM_ERROR("invalid ippdrv list.\n");
1397 return;
1398 }
1399
1400 c_node = cmd_work->c_node;
1401 if (!c_node) {
1402 DRM_ERROR("invalid command node list.\n");
1403 return;
1404 }
1405
1406 mutex_lock(&c_node->lock);
1407
1408 property = &c_node->property;
1409
1410 switch (cmd_work->ctrl) {
1411 case IPP_CTRL_PLAY:
1412 case IPP_CTRL_RESUME:
1413 ret = ipp_start_property(ippdrv, c_node);
1414 if (ret) {
1415 DRM_ERROR("failed to start property:prop_id[%d]\n",
1416 c_node->property.prop_id);
1417 goto err_unlock;
1418 }
1419
1420 /*
1421 * M2M case supports wait_completion of transfer.
1422 * because M2M case supports single unit operation
1423 * with multiple queue.
1424 * M2M need to wait completion of data transfer.
1425 */
1426 if (ipp_is_m2m_cmd(property->cmd)) {
1427 if (!wait_for_completion_timeout
1428 (&c_node->start_complete, msecs_to_jiffies(200))) {
1429 DRM_ERROR("timeout event:prop_id[%d]\n",
1430 c_node->property.prop_id);
1431 goto err_unlock;
1432 }
1433 }
1434 break;
1435 case IPP_CTRL_STOP:
1436 case IPP_CTRL_PAUSE:
1437 ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
1438 c_node);
1439 if (ret) {
1440 DRM_ERROR("failed to stop property.\n");
1441 goto err_unlock;
1442 }
1443
1444 complete(&c_node->stop_complete);
1445 break;
1446 default:
1447 DRM_ERROR("unknown control type\n");
1448 break;
1449 }
1450
1451 DRM_DEBUG_KMS("ctrl[%d] done.\n", cmd_work->ctrl);
1452
1453 err_unlock:
1454 mutex_unlock(&c_node->lock);
1455 }
1456
1457 static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
1458 struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
1459 {
1460 struct drm_device *drm_dev = ippdrv->drm_dev;
1461 struct drm_exynos_ipp_property *property = &c_node->property;
1462 struct drm_exynos_ipp_mem_node *m_node;
1463 struct drm_exynos_ipp_queue_buf qbuf;
1464 struct drm_exynos_ipp_send_event *e;
1465 struct list_head *head;
1466 struct timeval now;
1467 unsigned long flags;
1468 u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
1469 int ret, i;
1470
1471 for_each_ipp_ops(i)
1472 DRM_DEBUG_KMS("%s buf_id[%d]\n", i ? "dst" : "src", buf_id[i]);
1473
1474 if (!drm_dev) {
1475 DRM_ERROR("failed to get drm_dev.\n");
1476 return -EINVAL;
1477 }
1478
1479 if (!property) {
1480 DRM_ERROR("failed to get property.\n");
1481 return -EINVAL;
1482 }
1483
1484 mutex_lock(&c_node->event_lock);
1485 if (list_empty(&c_node->event_list)) {
1486 DRM_DEBUG_KMS("event list is empty.\n");
1487 ret = 0;
1488 goto err_event_unlock;
1489 }
1490
1491 mutex_lock(&c_node->mem_lock);
1492 if (!ipp_check_mem_list(c_node)) {
1493 DRM_DEBUG_KMS("empty memory.\n");
1494 ret = 0;
1495 goto err_mem_unlock;
1496 }
1497
1498 /* check command */
1499 switch (property->cmd) {
1500 case IPP_CMD_M2M:
1501 for_each_ipp_ops(i) {
1502 /* source/destination memory list */
1503 head = &c_node->mem_list[i];
1504
1505 m_node = list_first_entry(head,
1506 struct drm_exynos_ipp_mem_node, list);
1507
1508 tbuf_id[i] = m_node->buf_id;
1509 DRM_DEBUG_KMS("%s buf_id[%d]\n",
1510 i ? "dst" : "src", tbuf_id[i]);
1511
1512 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1513 if (ret)
1514 DRM_ERROR("failed to put m_node.\n");
1515 }
1516 break;
1517 case IPP_CMD_WB:
1518 /* clear buf for finding */
1519 memset(&qbuf, 0x0, sizeof(qbuf));
1520 qbuf.ops_id = EXYNOS_DRM_OPS_DST;
1521 qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
1522
1523 /* get memory node entry */
1524 m_node = ipp_find_mem_node(c_node, &qbuf);
1525 if (!m_node) {
1526 DRM_ERROR("empty memory node.\n");
1527 ret = -ENOMEM;
1528 goto err_mem_unlock;
1529 }
1530
1531 tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
1532
1533 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1534 if (ret)
1535 DRM_ERROR("failed to put m_node.\n");
1536 break;
1537 case IPP_CMD_OUTPUT:
1538 /* source memory list */
1539 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1540
1541 m_node = list_first_entry(head,
1542 struct drm_exynos_ipp_mem_node, list);
1543
1544 tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
1545
1546 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1547 if (ret)
1548 DRM_ERROR("failed to put m_node.\n");
1549 break;
1550 default:
1551 DRM_ERROR("invalid operations.\n");
1552 ret = -EINVAL;
1553 goto err_mem_unlock;
1554 }
1555 mutex_unlock(&c_node->mem_lock);
1556
1557 if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
1558 DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
1559 tbuf_id[1], buf_id[1], property->prop_id);
1560
1561 /*
1562 * command node have event list of destination buffer
1563 * If destination buffer enqueue to mem list,
1564 * then we make event and link to event list tail.
1565 * so, we get first event for first enqueued buffer.
1566 */
1567 e = list_first_entry(&c_node->event_list,
1568 struct drm_exynos_ipp_send_event, base.link);
1569
1570 do_gettimeofday(&now);
1571 DRM_DEBUG_KMS("tv_sec[%ld]tv_usec[%ld]\n", now.tv_sec, now.tv_usec);
1572 e->event.tv_sec = now.tv_sec;
1573 e->event.tv_usec = now.tv_usec;
1574 e->event.prop_id = property->prop_id;
1575
1576 /* set buffer id about source destination */
1577 for_each_ipp_ops(i)
1578 e->event.buf_id[i] = tbuf_id[i];
1579
1580 spin_lock_irqsave(&drm_dev->event_lock, flags);
1581 list_move_tail(&e->base.link, &e->base.file_priv->event_list);
1582 wake_up_interruptible(&e->base.file_priv->event_wait);
1583 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
1584 mutex_unlock(&c_node->event_lock);
1585
1586 DRM_DEBUG_KMS("done cmd[%d]prop_id[%d]buf_id[%d]\n",
1587 property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
1588
1589 return 0;
1590
1591 err_mem_unlock:
1592 mutex_unlock(&c_node->mem_lock);
1593 err_event_unlock:
1594 mutex_unlock(&c_node->event_lock);
1595 return ret;
1596 }
1597
1598 void ipp_sched_event(struct work_struct *work)
1599 {
1600 struct drm_exynos_ipp_event_work *event_work =
1601 (struct drm_exynos_ipp_event_work *)work;
1602 struct exynos_drm_ippdrv *ippdrv;
1603 struct drm_exynos_ipp_cmd_node *c_node;
1604 int ret;
1605
1606 if (!event_work) {
1607 DRM_ERROR("failed to get event_work.\n");
1608 return;
1609 }
1610
1611 DRM_DEBUG_KMS("buf_id[%d]\n", event_work->buf_id[EXYNOS_DRM_OPS_DST]);
1612
1613 ippdrv = event_work->ippdrv;
1614 if (!ippdrv) {
1615 DRM_ERROR("failed to get ipp driver.\n");
1616 return;
1617 }
1618
1619 c_node = ippdrv->c_node;
1620 if (!c_node) {
1621 DRM_ERROR("failed to get command node.\n");
1622 return;
1623 }
1624
1625 /*
1626 * IPP supports command thread, event thread synchronization.
1627 * If IPP close immediately from user land, then IPP make
1628 * synchronization with command thread, so make complete event.
1629 * or going out operations.
1630 */
1631 if (c_node->state != IPP_STATE_START) {
1632 DRM_DEBUG_KMS("bypass state[%d]prop_id[%d]\n",
1633 c_node->state, c_node->property.prop_id);
1634 goto err_completion;
1635 }
1636
1637 ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
1638 if (ret) {
1639 DRM_ERROR("failed to send event.\n");
1640 goto err_completion;
1641 }
1642
1643 err_completion:
1644 if (ipp_is_m2m_cmd(c_node->property.cmd))
1645 complete(&c_node->start_complete);
1646 }
1647
1648 static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1649 {
1650 struct ipp_context *ctx = get_ipp_context(dev);
1651 struct exynos_drm_ippdrv *ippdrv;
1652 int ret, count = 0;
1653
1654 /* get ipp driver entry */
1655 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1656 u32 ipp_id;
1657
1658 ippdrv->drm_dev = drm_dev;
1659
1660 ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
1661 &ipp_id);
1662 if (ret || ipp_id == 0) {
1663 DRM_ERROR("failed to create id.\n");
1664 goto err;
1665 }
1666
1667 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n",
1668 count++, (int)ippdrv, ipp_id);
1669
1670 ippdrv->prop_list.ipp_id = ipp_id;
1671
1672 /* store parent device for node */
1673 ippdrv->parent_dev = dev;
1674
1675 /* store event work queue and handler */
1676 ippdrv->event_workq = ctx->event_workq;
1677 ippdrv->sched_event = ipp_sched_event;
1678 INIT_LIST_HEAD(&ippdrv->cmd_list);
1679 mutex_init(&ippdrv->cmd_lock);
1680
1681 if (is_drm_iommu_supported(drm_dev)) {
1682 ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
1683 if (ret) {
1684 DRM_ERROR("failed to activate iommu\n");
1685 goto err;
1686 }
1687 }
1688 }
1689
1690 return 0;
1691
1692 err:
1693 /* get ipp driver entry */
1694 list_for_each_entry_continue_reverse(ippdrv, &exynos_drm_ippdrv_list,
1695 drv_list) {
1696 if (is_drm_iommu_supported(drm_dev))
1697 drm_iommu_detach_device(drm_dev, ippdrv->dev);
1698
1699 ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
1700 ippdrv->prop_list.ipp_id);
1701 }
1702
1703 return ret;
1704 }
1705
1706 static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
1707 {
1708 struct exynos_drm_ippdrv *ippdrv;
1709 struct ipp_context *ctx = get_ipp_context(dev);
1710
1711 /* get ipp driver entry */
1712 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1713 if (is_drm_iommu_supported(drm_dev))
1714 drm_iommu_detach_device(drm_dev, ippdrv->dev);
1715
1716 ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
1717 ippdrv->prop_list.ipp_id);
1718
1719 ippdrv->drm_dev = NULL;
1720 exynos_drm_ippdrv_unregister(ippdrv);
1721 }
1722 }
1723
1724 static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1725 struct drm_file *file)
1726 {
1727 struct drm_exynos_file_private *file_priv = file->driver_priv;
1728
1729 file_priv->ipp_dev = dev;
1730
1731 DRM_DEBUG_KMS("done priv[0x%x]\n", (int)dev);
1732
1733 return 0;
1734 }
1735
1736 static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1737 struct drm_file *file)
1738 {
1739 struct drm_exynos_file_private *file_priv = file->driver_priv;
1740 struct exynos_drm_ippdrv *ippdrv = NULL;
1741 struct ipp_context *ctx = get_ipp_context(dev);
1742 struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
1743 int count = 0;
1744
1745 DRM_DEBUG_KMS("for priv[0x%x]\n", (int)file_priv->ipp_dev);
1746
1747 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1748 mutex_lock(&ippdrv->cmd_lock);
1749 list_for_each_entry_safe(c_node, tc_node,
1750 &ippdrv->cmd_list, list) {
1751 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n",
1752 count++, (int)ippdrv);
1753
1754 if (c_node->dev == file_priv->ipp_dev) {
1755 /*
1756 * userland goto unnormal state. process killed.
1757 * and close the file.
1758 * so, IPP didn't called stop cmd ctrl.
1759 * so, we are make stop operation in this state.
1760 */
1761 if (c_node->state == IPP_STATE_START) {
1762 ipp_stop_property(drm_dev, ippdrv,
1763 c_node);
1764 c_node->state = IPP_STATE_STOP;
1765 }
1766
1767 ippdrv->dedicated = false;
1768 ipp_clean_cmd_node(ctx, c_node);
1769 if (list_empty(&ippdrv->cmd_list))
1770 pm_runtime_put_sync(ippdrv->dev);
1771 }
1772 }
1773 mutex_unlock(&ippdrv->cmd_lock);
1774 }
1775
1776 return;
1777 }
1778
1779 static int ipp_probe(struct platform_device *pdev)
1780 {
1781 struct device *dev = &pdev->dev;
1782 struct ipp_context *ctx;
1783 struct exynos_drm_subdrv *subdrv;
1784 int ret;
1785
1786 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1787 if (!ctx)
1788 return -ENOMEM;
1789
1790 mutex_init(&ctx->ipp_lock);
1791 mutex_init(&ctx->prop_lock);
1792
1793 idr_init(&ctx->ipp_idr);
1794 idr_init(&ctx->prop_idr);
1795
1796 /*
1797 * create single thread for ipp event
1798 * IPP supports event thread for IPP drivers.
1799 * IPP driver send event_work to this thread.
1800 * and IPP event thread send event to user process.
1801 */
1802 ctx->event_workq = create_singlethread_workqueue("ipp_event");
1803 if (!ctx->event_workq) {
1804 dev_err(dev, "failed to create event workqueue\n");
1805 return -EINVAL;
1806 }
1807
1808 /*
1809 * create single thread for ipp command
1810 * IPP supports command thread for user process.
1811 * user process make command node using set property ioctl.
1812 * and make start_work and send this work to command thread.
1813 * and then this command thread start property.
1814 */
1815 ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
1816 if (!ctx->cmd_workq) {
1817 dev_err(dev, "failed to create cmd workqueue\n");
1818 ret = -EINVAL;
1819 goto err_event_workq;
1820 }
1821
1822 /* set sub driver informations */
1823 subdrv = &ctx->subdrv;
1824 subdrv->dev = dev;
1825 subdrv->probe = ipp_subdrv_probe;
1826 subdrv->remove = ipp_subdrv_remove;
1827 subdrv->open = ipp_subdrv_open;
1828 subdrv->close = ipp_subdrv_close;
1829
1830 platform_set_drvdata(pdev, ctx);
1831
1832 ret = exynos_drm_subdrv_register(subdrv);
1833 if (ret < 0) {
1834 DRM_ERROR("failed to register drm ipp device.\n");
1835 goto err_cmd_workq;
1836 }
1837
1838 dev_info(dev, "drm ipp registered successfully.\n");
1839
1840 return 0;
1841
1842 err_cmd_workq:
1843 destroy_workqueue(ctx->cmd_workq);
1844 err_event_workq:
1845 destroy_workqueue(ctx->event_workq);
1846 return ret;
1847 }
1848
1849 static int ipp_remove(struct platform_device *pdev)
1850 {
1851 struct ipp_context *ctx = platform_get_drvdata(pdev);
1852
1853 /* unregister sub driver */
1854 exynos_drm_subdrv_unregister(&ctx->subdrv);
1855
1856 /* remove,destroy ipp idr */
1857 idr_destroy(&ctx->ipp_idr);
1858 idr_destroy(&ctx->prop_idr);
1859
1860 mutex_destroy(&ctx->ipp_lock);
1861 mutex_destroy(&ctx->prop_lock);
1862
1863 /* destroy command, event work queue */
1864 destroy_workqueue(ctx->cmd_workq);
1865 destroy_workqueue(ctx->event_workq);
1866
1867 return 0;
1868 }
1869
1870 static int ipp_power_ctrl(struct ipp_context *ctx, bool enable)
1871 {
1872 DRM_DEBUG_KMS("enable[%d]\n", enable);
1873
1874 return 0;
1875 }
1876
1877 #ifdef CONFIG_PM_SLEEP
1878 static int ipp_suspend(struct device *dev)
1879 {
1880 struct ipp_context *ctx = get_ipp_context(dev);
1881
1882 if (pm_runtime_suspended(dev))
1883 return 0;
1884
1885 return ipp_power_ctrl(ctx, false);
1886 }
1887
1888 static int ipp_resume(struct device *dev)
1889 {
1890 struct ipp_context *ctx = get_ipp_context(dev);
1891
1892 if (!pm_runtime_suspended(dev))
1893 return ipp_power_ctrl(ctx, true);
1894
1895 return 0;
1896 }
1897 #endif
1898
1899 #ifdef CONFIG_PM_RUNTIME
1900 static int ipp_runtime_suspend(struct device *dev)
1901 {
1902 struct ipp_context *ctx = get_ipp_context(dev);
1903
1904 return ipp_power_ctrl(ctx, false);
1905 }
1906
1907 static int ipp_runtime_resume(struct device *dev)
1908 {
1909 struct ipp_context *ctx = get_ipp_context(dev);
1910
1911 return ipp_power_ctrl(ctx, true);
1912 }
1913 #endif
1914
1915 static const struct dev_pm_ops ipp_pm_ops = {
1916 SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume)
1917 SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL)
1918 };
1919
1920 struct platform_driver ipp_driver = {
1921 .probe = ipp_probe,
1922 .remove = ipp_remove,
1923 .driver = {
1924 .name = "exynos-drm-ipp",
1925 .owner = THIS_MODULE,
1926 .pm = &ipp_pm_ops,
1927 },
1928 };
1929
This page took 0.072382 seconds and 4 git commands to generate.