2 * Copyright (C) 2012 Samsung Electronics Co.Ltd
4 * Eunchul Kim <chulspro.kim@samsung.com>
5 * Jinyoung Jeon <jy0.jeon@samsung.com>
6 * Sangmin Lee <lsmin.lee@samsung.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/platform_device.h>
17 #include <linux/types.h>
18 #include <linux/clk.h>
19 #include <linux/pm_runtime.h>
20 #include <plat/map-base.h>
23 #include <drm/exynos_drm.h>
24 #include "exynos_drm_drv.h"
25 #include "exynos_drm_gem.h"
26 #include "exynos_drm_ipp.h"
27 #include "exynos_drm_iommu.h"
30 * IPP stands for Image Post Processing and
31 * supports image scaler/rotator and input/output DMA operations.
32 * using FIMC, GSC, Rotator, so on.
33 * IPP is integration device driver of same attribute h/w
38 * 1. expand command control id.
39 * 2. integrate property and config.
40 * 3. removed send_event id check routine.
41 * 4. compare send_event id if needed.
42 * 5. free subdrv_remove notifier callback list if needed.
43 * 6. need to check subdrv_open about multi-open.
44 * 7. need to power_on implement power and sysmmu ctrl.
47 #define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev))
48 #define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M)
50 /* platform device pointer for ipp device. */
51 static struct platform_device
*exynos_drm_ipp_pdev
;
54 * A structure of event.
56 * @base: base of event.
59 struct drm_exynos_ipp_send_event
{
60 struct drm_pending_event base
;
61 struct drm_exynos_ipp_event event
;
65 * A structure of memory node.
67 * @list: list head to memory queue information.
68 * @ops_id: id of operations.
69 * @prop_id: id of property.
70 * @buf_id: id of buffer.
71 * @buf_info: gem objects and dma address, size.
72 * @filp: a pointer to drm_file.
74 struct drm_exynos_ipp_mem_node
{
75 struct list_head list
;
76 enum drm_exynos_ops_id ops_id
;
79 struct drm_exynos_ipp_buf_info buf_info
;
80 struct drm_file
*filp
;
84 * A structure of ipp context.
86 * @subdrv: prepare initialization using subdrv.
87 * @ipp_lock: lock for synchronization of access to ipp_idr.
88 * @prop_lock: lock for synchronization of access to prop_idr.
89 * @ipp_idr: ipp driver idr.
90 * @prop_idr: property idr.
91 * @event_workq: event work queue.
92 * @cmd_workq: command work queue.
95 struct exynos_drm_subdrv subdrv
;
96 struct mutex ipp_lock
;
97 struct mutex prop_lock
;
100 struct workqueue_struct
*event_workq
;
101 struct workqueue_struct
*cmd_workq
;
104 static LIST_HEAD(exynos_drm_ippdrv_list
);
105 static DEFINE_MUTEX(exynos_drm_ippdrv_lock
);
106 static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list
);
108 int exynos_platform_device_ipp_register(void)
110 struct platform_device
*pdev
;
112 if (exynos_drm_ipp_pdev
)
115 pdev
= platform_device_register_simple("exynos-drm-ipp", -1, NULL
, 0);
117 return PTR_ERR(pdev
);
119 exynos_drm_ipp_pdev
= pdev
;
124 void exynos_platform_device_ipp_unregister(void)
126 if (exynos_drm_ipp_pdev
) {
127 platform_device_unregister(exynos_drm_ipp_pdev
);
128 exynos_drm_ipp_pdev
= NULL
;
132 int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv
*ippdrv
)
134 DRM_DEBUG_KMS("%s\n", __func__
);
139 mutex_lock(&exynos_drm_ippdrv_lock
);
140 list_add_tail(&ippdrv
->drv_list
, &exynos_drm_ippdrv_list
);
141 mutex_unlock(&exynos_drm_ippdrv_lock
);
146 int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv
*ippdrv
)
148 DRM_DEBUG_KMS("%s\n", __func__
);
153 mutex_lock(&exynos_drm_ippdrv_lock
);
154 list_del(&ippdrv
->drv_list
);
155 mutex_unlock(&exynos_drm_ippdrv_lock
);
160 static int ipp_create_id(struct idr
*id_idr
, struct mutex
*lock
, void *obj
,
165 DRM_DEBUG_KMS("%s\n", __func__
);
167 /* do the allocation under our mutexlock */
169 ret
= idr_alloc(id_idr
, obj
, 1, 0, GFP_KERNEL
);
178 static void *ipp_find_obj(struct idr
*id_idr
, struct mutex
*lock
, u32 id
)
182 DRM_DEBUG_KMS("%s:id[%d]\n", __func__
, id
);
186 /* find object using handle */
187 obj
= idr_find(id_idr
, id
);
189 DRM_ERROR("failed to find object.\n");
191 return ERR_PTR(-ENODEV
);
199 static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv
*ippdrv
,
200 enum drm_exynos_ipp_cmd cmd
)
203 * check dedicated flag and WB, OUTPUT operation with
206 if (ippdrv
->dedicated
|| (!ipp_is_m2m_cmd(cmd
) &&
207 !pm_runtime_suspended(ippdrv
->dev
)))
213 static struct exynos_drm_ippdrv
*ipp_find_driver(struct ipp_context
*ctx
,
214 struct drm_exynos_ipp_property
*property
)
216 struct exynos_drm_ippdrv
*ippdrv
;
217 u32 ipp_id
= property
->ipp_id
;
219 DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__
, ipp_id
);
222 /* find ipp driver using idr */
223 ippdrv
= ipp_find_obj(&ctx
->ipp_idr
, &ctx
->ipp_lock
,
225 if (IS_ERR(ippdrv
)) {
226 DRM_ERROR("not found ipp%d driver.\n", ipp_id
);
231 * WB, OUTPUT opertion not supported multi-operation.
232 * so, make dedicated state at set property ioctl.
233 * when ipp driver finished operations, clear dedicated flags.
235 if (ipp_check_dedicated(ippdrv
, property
->cmd
)) {
236 DRM_ERROR("already used choose device.\n");
237 return ERR_PTR(-EBUSY
);
241 * This is necessary to find correct device in ipp drivers.
242 * ipp drivers have different abilities,
243 * so need to check property.
245 if (ippdrv
->check_property
&&
246 ippdrv
->check_property(ippdrv
->dev
, property
)) {
247 DRM_ERROR("not support property.\n");
248 return ERR_PTR(-EINVAL
);
254 * This case is search all ipp driver for finding.
255 * user application don't set ipp_id in this case,
256 * so ipp subsystem search correct driver in driver list.
258 list_for_each_entry(ippdrv
, &exynos_drm_ippdrv_list
, drv_list
) {
259 if (ipp_check_dedicated(ippdrv
, property
->cmd
)) {
260 DRM_DEBUG_KMS("%s:used device.\n", __func__
);
264 if (ippdrv
->check_property
&&
265 ippdrv
->check_property(ippdrv
->dev
, property
)) {
266 DRM_DEBUG_KMS("%s:not support property.\n",
274 DRM_ERROR("not support ipp driver operations.\n");
277 return ERR_PTR(-ENODEV
);
280 static struct exynos_drm_ippdrv
*ipp_find_drv_by_handle(u32 prop_id
)
282 struct exynos_drm_ippdrv
*ippdrv
;
283 struct drm_exynos_ipp_cmd_node
*c_node
;
286 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__
, prop_id
);
288 if (list_empty(&exynos_drm_ippdrv_list
)) {
289 DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__
);
290 return ERR_PTR(-ENODEV
);
294 * This case is search ipp driver by prop_id handle.
295 * sometimes, ipp subsystem find driver by prop_id.
296 * e.g PAUSE state, queue buf, command contro.
298 list_for_each_entry(ippdrv
, &exynos_drm_ippdrv_list
, drv_list
) {
299 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n", __func__
,
300 count
++, (int)ippdrv
);
302 if (!list_empty(&ippdrv
->cmd_list
)) {
303 list_for_each_entry(c_node
, &ippdrv
->cmd_list
, list
)
304 if (c_node
->property
.prop_id
== prop_id
)
309 return ERR_PTR(-ENODEV
);
312 int exynos_drm_ipp_get_property(struct drm_device
*drm_dev
, void *data
,
313 struct drm_file
*file
)
315 struct drm_exynos_file_private
*file_priv
= file
->driver_priv
;
316 struct exynos_drm_ipp_private
*priv
= file_priv
->ipp_priv
;
317 struct device
*dev
= priv
->dev
;
318 struct ipp_context
*ctx
= get_ipp_context(dev
);
319 struct drm_exynos_ipp_prop_list
*prop_list
= data
;
320 struct exynos_drm_ippdrv
*ippdrv
;
323 DRM_DEBUG_KMS("%s\n", __func__
);
326 DRM_ERROR("invalid context.\n");
331 DRM_ERROR("invalid property parameter.\n");
335 DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__
, prop_list
->ipp_id
);
337 if (!prop_list
->ipp_id
) {
338 list_for_each_entry(ippdrv
, &exynos_drm_ippdrv_list
, drv_list
)
341 * Supports ippdrv list count for user application.
342 * First step user application getting ippdrv count.
343 * and second step getting ippdrv capability using ipp_id.
345 prop_list
->count
= count
;
348 * Getting ippdrv capability by ipp_id.
349 * some deivce not supported wb, output interface.
350 * so, user application detect correct ipp driver
353 ippdrv
= ipp_find_obj(&ctx
->ipp_idr
, &ctx
->ipp_lock
,
356 DRM_ERROR("not found ipp%d driver.\n",
361 prop_list
= ippdrv
->prop_list
;
367 static void ipp_print_property(struct drm_exynos_ipp_property
*property
,
370 struct drm_exynos_ipp_config
*config
= &property
->config
[idx
];
371 struct drm_exynos_pos
*pos
= &config
->pos
;
372 struct drm_exynos_sz
*sz
= &config
->sz
;
374 DRM_DEBUG_KMS("%s:prop_id[%d]ops[%s]fmt[0x%x]\n",
375 __func__
, property
->prop_id
, idx
? "dst" : "src", config
->fmt
);
377 DRM_DEBUG_KMS("%s:pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
378 __func__
, pos
->x
, pos
->y
, pos
->w
, pos
->h
,
379 sz
->hsize
, sz
->vsize
, config
->flip
, config
->degree
);
382 static int ipp_find_and_set_property(struct drm_exynos_ipp_property
*property
)
384 struct exynos_drm_ippdrv
*ippdrv
;
385 struct drm_exynos_ipp_cmd_node
*c_node
;
386 u32 prop_id
= property
->prop_id
;
388 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__
, prop_id
);
390 ippdrv
= ipp_find_drv_by_handle(prop_id
);
391 if (IS_ERR(ippdrv
)) {
392 DRM_ERROR("failed to get ipp driver.\n");
397 * Find command node using command list in ippdrv.
398 * when we find this command no using prop_id.
399 * return property information set in this command node.
401 list_for_each_entry(c_node
, &ippdrv
->cmd_list
, list
) {
402 if ((c_node
->property
.prop_id
== prop_id
) &&
403 (c_node
->state
== IPP_STATE_STOP
)) {
404 DRM_DEBUG_KMS("%s:found cmd[%d]ippdrv[0x%x]\n",
405 __func__
, property
->cmd
, (int)ippdrv
);
407 c_node
->property
= *property
;
412 DRM_ERROR("failed to search property.\n");
417 static struct drm_exynos_ipp_cmd_work
*ipp_create_cmd_work(void)
419 struct drm_exynos_ipp_cmd_work
*cmd_work
;
421 DRM_DEBUG_KMS("%s\n", __func__
);
423 cmd_work
= kzalloc(sizeof(*cmd_work
), GFP_KERNEL
);
425 DRM_ERROR("failed to alloc cmd_work.\n");
426 return ERR_PTR(-ENOMEM
);
429 INIT_WORK((struct work_struct
*)cmd_work
, ipp_sched_cmd
);
434 static struct drm_exynos_ipp_event_work
*ipp_create_event_work(void)
436 struct drm_exynos_ipp_event_work
*event_work
;
438 DRM_DEBUG_KMS("%s\n", __func__
);
440 event_work
= kzalloc(sizeof(*event_work
), GFP_KERNEL
);
442 DRM_ERROR("failed to alloc event_work.\n");
443 return ERR_PTR(-ENOMEM
);
446 INIT_WORK((struct work_struct
*)event_work
, ipp_sched_event
);
451 int exynos_drm_ipp_set_property(struct drm_device
*drm_dev
, void *data
,
452 struct drm_file
*file
)
454 struct drm_exynos_file_private
*file_priv
= file
->driver_priv
;
455 struct exynos_drm_ipp_private
*priv
= file_priv
->ipp_priv
;
456 struct device
*dev
= priv
->dev
;
457 struct ipp_context
*ctx
= get_ipp_context(dev
);
458 struct drm_exynos_ipp_property
*property
= data
;
459 struct exynos_drm_ippdrv
*ippdrv
;
460 struct drm_exynos_ipp_cmd_node
*c_node
;
463 DRM_DEBUG_KMS("%s\n", __func__
);
466 DRM_ERROR("invalid context.\n");
471 DRM_ERROR("invalid property parameter.\n");
476 * This is log print for user application property.
477 * user application set various property.
480 ipp_print_property(property
, i
);
483 * set property ioctl generated new prop_id.
484 * but in this case already asigned prop_id using old set property.
485 * e.g PAUSE state. this case supports find current prop_id and use it
486 * instead of allocation.
488 if (property
->prop_id
) {
489 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__
, property
->prop_id
);
490 return ipp_find_and_set_property(property
);
493 /* find ipp driver using ipp id */
494 ippdrv
= ipp_find_driver(ctx
, property
);
495 if (IS_ERR(ippdrv
)) {
496 DRM_ERROR("failed to get ipp driver.\n");
500 /* allocate command node */
501 c_node
= kzalloc(sizeof(*c_node
), GFP_KERNEL
);
503 DRM_ERROR("failed to allocate map node.\n");
507 /* create property id */
508 ret
= ipp_create_id(&ctx
->prop_idr
, &ctx
->prop_lock
, c_node
,
511 DRM_ERROR("failed to create id.\n");
515 DRM_DEBUG_KMS("%s:created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
516 __func__
, property
->prop_id
, property
->cmd
, (int)ippdrv
);
518 /* stored property information and ippdrv in private data */
520 c_node
->property
= *property
;
521 c_node
->state
= IPP_STATE_IDLE
;
523 c_node
->start_work
= ipp_create_cmd_work();
524 if (IS_ERR(c_node
->start_work
)) {
525 DRM_ERROR("failed to create start work.\n");
529 c_node
->stop_work
= ipp_create_cmd_work();
530 if (IS_ERR(c_node
->stop_work
)) {
531 DRM_ERROR("failed to create stop work.\n");
535 c_node
->event_work
= ipp_create_event_work();
536 if (IS_ERR(c_node
->event_work
)) {
537 DRM_ERROR("failed to create event work.\n");
541 mutex_init(&c_node
->cmd_lock
);
542 mutex_init(&c_node
->mem_lock
);
543 mutex_init(&c_node
->event_lock
);
545 init_completion(&c_node
->start_complete
);
546 init_completion(&c_node
->stop_complete
);
549 INIT_LIST_HEAD(&c_node
->mem_list
[i
]);
551 INIT_LIST_HEAD(&c_node
->event_list
);
552 list_splice_init(&priv
->event_list
, &c_node
->event_list
);
553 list_add_tail(&c_node
->list
, &ippdrv
->cmd_list
);
555 /* make dedicated state without m2m */
556 if (!ipp_is_m2m_cmd(property
->cmd
))
557 ippdrv
->dedicated
= true;
562 kfree(c_node
->stop_work
);
564 kfree(c_node
->start_work
);
570 static void ipp_clean_cmd_node(struct drm_exynos_ipp_cmd_node
*c_node
)
572 DRM_DEBUG_KMS("%s\n", __func__
);
575 list_del(&c_node
->list
);
578 mutex_destroy(&c_node
->cmd_lock
);
579 mutex_destroy(&c_node
->mem_lock
);
580 mutex_destroy(&c_node
->event_lock
);
582 /* free command node */
583 kfree(c_node
->start_work
);
584 kfree(c_node
->stop_work
);
585 kfree(c_node
->event_work
);
589 static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node
*c_node
)
591 struct drm_exynos_ipp_property
*property
= &c_node
->property
;
592 struct drm_exynos_ipp_mem_node
*m_node
;
593 struct list_head
*head
;
594 int ret
, i
, count
[EXYNOS_DRM_OPS_MAX
] = { 0, };
596 DRM_DEBUG_KMS("%s\n", __func__
);
598 mutex_lock(&c_node
->mem_lock
);
600 for_each_ipp_ops(i
) {
601 /* source/destination memory list */
602 head
= &c_node
->mem_list
[i
];
604 if (list_empty(head
)) {
605 DRM_DEBUG_KMS("%s:%s memory empty.\n", __func__
,
610 /* find memory node entry */
611 list_for_each_entry(m_node
, head
, list
) {
612 DRM_DEBUG_KMS("%s:%s,count[%d]m_node[0x%x]\n", __func__
,
613 i
? "dst" : "src", count
[i
], (int)m_node
);
618 DRM_DEBUG_KMS("%s:min[%d]max[%d]\n", __func__
,
619 min(count
[EXYNOS_DRM_OPS_SRC
], count
[EXYNOS_DRM_OPS_DST
]),
620 max(count
[EXYNOS_DRM_OPS_SRC
], count
[EXYNOS_DRM_OPS_DST
]));
623 * M2M operations should be need paired memory address.
624 * so, need to check minimum count about src, dst.
625 * other case not use paired memory, so use maximum count
627 if (ipp_is_m2m_cmd(property
->cmd
))
628 ret
= min(count
[EXYNOS_DRM_OPS_SRC
],
629 count
[EXYNOS_DRM_OPS_DST
]);
631 ret
= max(count
[EXYNOS_DRM_OPS_SRC
],
632 count
[EXYNOS_DRM_OPS_DST
]);
634 mutex_unlock(&c_node
->mem_lock
);
639 static struct drm_exynos_ipp_mem_node
640 *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node
*c_node
,
641 struct drm_exynos_ipp_queue_buf
*qbuf
)
643 struct drm_exynos_ipp_mem_node
*m_node
;
644 struct list_head
*head
;
647 DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__
, qbuf
->buf_id
);
649 /* source/destination memory list */
650 head
= &c_node
->mem_list
[qbuf
->ops_id
];
652 /* find memory node from memory list */
653 list_for_each_entry(m_node
, head
, list
) {
654 DRM_DEBUG_KMS("%s:count[%d]m_node[0x%x]\n",
655 __func__
, count
++, (int)m_node
);
657 /* compare buffer id */
658 if (m_node
->buf_id
== qbuf
->buf_id
)
665 static int ipp_set_mem_node(struct exynos_drm_ippdrv
*ippdrv
,
666 struct drm_exynos_ipp_cmd_node
*c_node
,
667 struct drm_exynos_ipp_mem_node
*m_node
)
669 struct exynos_drm_ipp_ops
*ops
= NULL
;
672 DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__
, (int)m_node
);
675 DRM_ERROR("invalid queue node.\n");
679 mutex_lock(&c_node
->mem_lock
);
681 DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__
, m_node
->ops_id
);
683 /* get operations callback */
684 ops
= ippdrv
->ops
[m_node
->ops_id
];
686 DRM_ERROR("not support ops.\n");
691 /* set address and enable irq */
693 ret
= ops
->set_addr(ippdrv
->dev
, &m_node
->buf_info
,
694 m_node
->buf_id
, IPP_BUF_ENQUEUE
);
696 DRM_ERROR("failed to set addr.\n");
702 mutex_unlock(&c_node
->mem_lock
);
706 static struct drm_exynos_ipp_mem_node
707 *ipp_get_mem_node(struct drm_device
*drm_dev
,
708 struct drm_file
*file
,
709 struct drm_exynos_ipp_cmd_node
*c_node
,
710 struct drm_exynos_ipp_queue_buf
*qbuf
)
712 struct drm_exynos_ipp_mem_node
*m_node
;
713 struct drm_exynos_ipp_buf_info buf_info
;
717 DRM_DEBUG_KMS("%s\n", __func__
);
719 mutex_lock(&c_node
->mem_lock
);
721 m_node
= kzalloc(sizeof(*m_node
), GFP_KERNEL
);
723 DRM_ERROR("failed to allocate queue node.\n");
727 /* clear base address for error handling */
728 memset(&buf_info
, 0x0, sizeof(buf_info
));
730 /* operations, buffer id */
731 m_node
->ops_id
= qbuf
->ops_id
;
732 m_node
->prop_id
= qbuf
->prop_id
;
733 m_node
->buf_id
= qbuf
->buf_id
;
735 DRM_DEBUG_KMS("%s:m_node[0x%x]ops_id[%d]\n", __func__
,
736 (int)m_node
, qbuf
->ops_id
);
737 DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]\n", __func__
,
738 qbuf
->prop_id
, m_node
->buf_id
);
740 for_each_ipp_planar(i
) {
741 DRM_DEBUG_KMS("%s:i[%d]handle[0x%x]\n", __func__
,
744 /* get dma address by handle */
745 if (qbuf
->handle
[i
]) {
746 addr
= exynos_drm_gem_get_dma_addr(drm_dev
,
747 qbuf
->handle
[i
], file
);
749 DRM_ERROR("failed to get addr.\n");
753 buf_info
.handles
[i
] = qbuf
->handle
[i
];
754 buf_info
.base
[i
] = *(dma_addr_t
*) addr
;
755 DRM_DEBUG_KMS("%s:i[%d]base[0x%x]hd[0x%x]\n",
756 __func__
, i
, buf_info
.base
[i
],
757 (int)buf_info
.handles
[i
]);
762 m_node
->buf_info
= buf_info
;
763 list_add_tail(&m_node
->list
, &c_node
->mem_list
[qbuf
->ops_id
]);
765 mutex_unlock(&c_node
->mem_lock
);
771 mutex_unlock(&c_node
->mem_lock
);
772 return ERR_PTR(-EFAULT
);
775 static int ipp_put_mem_node(struct drm_device
*drm_dev
,
776 struct drm_exynos_ipp_cmd_node
*c_node
,
777 struct drm_exynos_ipp_mem_node
*m_node
)
781 DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__
, (int)m_node
);
784 DRM_ERROR("invalid dequeue node.\n");
788 if (list_empty(&m_node
->list
)) {
789 DRM_ERROR("empty memory node.\n");
793 mutex_lock(&c_node
->mem_lock
);
795 DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__
, m_node
->ops_id
);
798 for_each_ipp_planar(i
) {
799 unsigned long handle
= m_node
->buf_info
.handles
[i
];
801 exynos_drm_gem_put_dma_addr(drm_dev
, handle
,
805 /* delete list in queue */
806 list_del(&m_node
->list
);
809 mutex_unlock(&c_node
->mem_lock
);
814 static void ipp_free_event(struct drm_pending_event
*event
)
819 static int ipp_get_event(struct drm_device
*drm_dev
,
820 struct drm_file
*file
,
821 struct drm_exynos_ipp_cmd_node
*c_node
,
822 struct drm_exynos_ipp_queue_buf
*qbuf
)
824 struct drm_exynos_ipp_send_event
*e
;
827 DRM_DEBUG_KMS("%s:ops_id[%d]buf_id[%d]\n", __func__
,
828 qbuf
->ops_id
, qbuf
->buf_id
);
830 e
= kzalloc(sizeof(*e
), GFP_KERNEL
);
833 DRM_ERROR("failed to allocate event.\n");
834 spin_lock_irqsave(&drm_dev
->event_lock
, flags
);
835 file
->event_space
+= sizeof(e
->event
);
836 spin_unlock_irqrestore(&drm_dev
->event_lock
, flags
);
841 e
->event
.base
.type
= DRM_EXYNOS_IPP_EVENT
;
842 e
->event
.base
.length
= sizeof(e
->event
);
843 e
->event
.user_data
= qbuf
->user_data
;
844 e
->event
.prop_id
= qbuf
->prop_id
;
845 e
->event
.buf_id
[EXYNOS_DRM_OPS_DST
] = qbuf
->buf_id
;
846 e
->base
.event
= &e
->event
.base
;
847 e
->base
.file_priv
= file
;
848 e
->base
.destroy
= ipp_free_event
;
849 list_add_tail(&e
->base
.link
, &c_node
->event_list
);
854 static void ipp_put_event(struct drm_exynos_ipp_cmd_node
*c_node
,
855 struct drm_exynos_ipp_queue_buf
*qbuf
)
857 struct drm_exynos_ipp_send_event
*e
, *te
;
860 DRM_DEBUG_KMS("%s\n", __func__
);
862 if (list_empty(&c_node
->event_list
)) {
863 DRM_DEBUG_KMS("%s:event_list is empty.\n", __func__
);
867 list_for_each_entry_safe(e
, te
, &c_node
->event_list
, base
.link
) {
868 DRM_DEBUG_KMS("%s:count[%d]e[0x%x]\n",
869 __func__
, count
++, (int)e
);
872 * quf == NULL condition means all event deletion.
873 * stop operations want to delete all event list.
874 * another case delete only same buf id.
878 list_del(&e
->base
.link
);
882 /* compare buffer id */
883 if (qbuf
&& (qbuf
->buf_id
==
884 e
->event
.buf_id
[EXYNOS_DRM_OPS_DST
])) {
886 list_del(&e
->base
.link
);
893 static void ipp_handle_cmd_work(struct device
*dev
,
894 struct exynos_drm_ippdrv
*ippdrv
,
895 struct drm_exynos_ipp_cmd_work
*cmd_work
,
896 struct drm_exynos_ipp_cmd_node
*c_node
)
898 struct ipp_context
*ctx
= get_ipp_context(dev
);
900 cmd_work
->ippdrv
= ippdrv
;
901 cmd_work
->c_node
= c_node
;
902 queue_work(ctx
->cmd_workq
, (struct work_struct
*)cmd_work
);
905 static int ipp_queue_buf_with_run(struct device
*dev
,
906 struct drm_exynos_ipp_cmd_node
*c_node
,
907 struct drm_exynos_ipp_mem_node
*m_node
,
908 struct drm_exynos_ipp_queue_buf
*qbuf
)
910 struct exynos_drm_ippdrv
*ippdrv
;
911 struct drm_exynos_ipp_property
*property
;
912 struct exynos_drm_ipp_ops
*ops
;
915 DRM_DEBUG_KMS("%s\n", __func__
);
917 ippdrv
= ipp_find_drv_by_handle(qbuf
->prop_id
);
918 if (IS_ERR(ippdrv
)) {
919 DRM_ERROR("failed to get ipp driver.\n");
923 ops
= ippdrv
->ops
[qbuf
->ops_id
];
925 DRM_ERROR("failed to get ops.\n");
929 property
= &c_node
->property
;
931 if (c_node
->state
!= IPP_STATE_START
) {
932 DRM_DEBUG_KMS("%s:bypass for invalid state.\n" , __func__
);
936 if (!ipp_check_mem_list(c_node
)) {
937 DRM_DEBUG_KMS("%s:empty memory.\n", __func__
);
942 * If set destination buffer and enabled clock,
943 * then m2m operations need start operations at queue_buf
945 if (ipp_is_m2m_cmd(property
->cmd
)) {
946 struct drm_exynos_ipp_cmd_work
*cmd_work
= c_node
->start_work
;
948 cmd_work
->ctrl
= IPP_CTRL_PLAY
;
949 ipp_handle_cmd_work(dev
, ippdrv
, cmd_work
, c_node
);
951 ret
= ipp_set_mem_node(ippdrv
, c_node
, m_node
);
953 DRM_ERROR("failed to set m node.\n");
961 static void ipp_clean_queue_buf(struct drm_device
*drm_dev
,
962 struct drm_exynos_ipp_cmd_node
*c_node
,
963 struct drm_exynos_ipp_queue_buf
*qbuf
)
965 struct drm_exynos_ipp_mem_node
*m_node
, *tm_node
;
967 DRM_DEBUG_KMS("%s\n", __func__
);
969 if (!list_empty(&c_node
->mem_list
[qbuf
->ops_id
])) {
971 list_for_each_entry_safe(m_node
, tm_node
,
972 &c_node
->mem_list
[qbuf
->ops_id
], list
) {
973 if (m_node
->buf_id
== qbuf
->buf_id
&&
974 m_node
->ops_id
== qbuf
->ops_id
)
975 ipp_put_mem_node(drm_dev
, c_node
, m_node
);
980 int exynos_drm_ipp_queue_buf(struct drm_device
*drm_dev
, void *data
,
981 struct drm_file
*file
)
983 struct drm_exynos_file_private
*file_priv
= file
->driver_priv
;
984 struct exynos_drm_ipp_private
*priv
= file_priv
->ipp_priv
;
985 struct device
*dev
= priv
->dev
;
986 struct ipp_context
*ctx
= get_ipp_context(dev
);
987 struct drm_exynos_ipp_queue_buf
*qbuf
= data
;
988 struct drm_exynos_ipp_cmd_node
*c_node
;
989 struct drm_exynos_ipp_mem_node
*m_node
;
992 DRM_DEBUG_KMS("%s\n", __func__
);
995 DRM_ERROR("invalid buf parameter.\n");
999 if (qbuf
->ops_id
>= EXYNOS_DRM_OPS_MAX
) {
1000 DRM_ERROR("invalid ops parameter.\n");
1004 DRM_DEBUG_KMS("%s:prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
1005 __func__
, qbuf
->prop_id
, qbuf
->ops_id
? "dst" : "src",
1006 qbuf
->buf_id
, qbuf
->buf_type
);
1008 /* find command node */
1009 c_node
= ipp_find_obj(&ctx
->prop_idr
, &ctx
->prop_lock
,
1012 DRM_ERROR("failed to get command node.\n");
1016 /* buffer control */
1017 switch (qbuf
->buf_type
) {
1018 case IPP_BUF_ENQUEUE
:
1019 /* get memory node */
1020 m_node
= ipp_get_mem_node(drm_dev
, file
, c_node
, qbuf
);
1021 if (IS_ERR(m_node
)) {
1022 DRM_ERROR("failed to get m_node.\n");
1023 return PTR_ERR(m_node
);
1027 * first step get event for destination buffer.
1028 * and second step when M2M case run with destination buffer
1031 if (qbuf
->ops_id
== EXYNOS_DRM_OPS_DST
) {
1032 /* get event for destination buffer */
1033 ret
= ipp_get_event(drm_dev
, file
, c_node
, qbuf
);
1035 DRM_ERROR("failed to get event.\n");
1036 goto err_clean_node
;
1040 * M2M case run play control for streaming feature.
1041 * other case set address and waiting.
1043 ret
= ipp_queue_buf_with_run(dev
, c_node
, m_node
, qbuf
);
1045 DRM_ERROR("failed to run command.\n");
1046 goto err_clean_node
;
1050 case IPP_BUF_DEQUEUE
:
1051 mutex_lock(&c_node
->cmd_lock
);
1053 /* put event for destination buffer */
1054 if (qbuf
->ops_id
== EXYNOS_DRM_OPS_DST
)
1055 ipp_put_event(c_node
, qbuf
);
1057 ipp_clean_queue_buf(drm_dev
, c_node
, qbuf
);
1059 mutex_unlock(&c_node
->cmd_lock
);
1062 DRM_ERROR("invalid buffer control.\n");
1069 DRM_ERROR("clean memory nodes.\n");
1071 ipp_clean_queue_buf(drm_dev
, c_node
, qbuf
);
1075 static bool exynos_drm_ipp_check_valid(struct device
*dev
,
1076 enum drm_exynos_ipp_ctrl ctrl
, enum drm_exynos_ipp_state state
)
1078 DRM_DEBUG_KMS("%s\n", __func__
);
1080 if (ctrl
!= IPP_CTRL_PLAY
) {
1081 if (pm_runtime_suspended(dev
)) {
1082 DRM_ERROR("pm:runtime_suspended.\n");
1089 if (state
!= IPP_STATE_IDLE
)
1093 if (state
== IPP_STATE_STOP
)
1096 case IPP_CTRL_PAUSE
:
1097 if (state
!= IPP_STATE_START
)
1100 case IPP_CTRL_RESUME
:
1101 if (state
!= IPP_STATE_STOP
)
1105 DRM_ERROR("invalid state.\n");
1113 DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl
, state
);
1117 int exynos_drm_ipp_cmd_ctrl(struct drm_device
*drm_dev
, void *data
,
1118 struct drm_file
*file
)
1120 struct drm_exynos_file_private
*file_priv
= file
->driver_priv
;
1121 struct exynos_drm_ipp_private
*priv
= file_priv
->ipp_priv
;
1122 struct exynos_drm_ippdrv
*ippdrv
= NULL
;
1123 struct device
*dev
= priv
->dev
;
1124 struct ipp_context
*ctx
= get_ipp_context(dev
);
1125 struct drm_exynos_ipp_cmd_ctrl
*cmd_ctrl
= data
;
1126 struct drm_exynos_ipp_cmd_work
*cmd_work
;
1127 struct drm_exynos_ipp_cmd_node
*c_node
;
1129 DRM_DEBUG_KMS("%s\n", __func__
);
1132 DRM_ERROR("invalid context.\n");
1137 DRM_ERROR("invalid control parameter.\n");
1141 DRM_DEBUG_KMS("%s:ctrl[%d]prop_id[%d]\n", __func__
,
1142 cmd_ctrl
->ctrl
, cmd_ctrl
->prop_id
);
1144 ippdrv
= ipp_find_drv_by_handle(cmd_ctrl
->prop_id
);
1145 if (IS_ERR(ippdrv
)) {
1146 DRM_ERROR("failed to get ipp driver.\n");
1147 return PTR_ERR(ippdrv
);
1150 c_node
= ipp_find_obj(&ctx
->prop_idr
, &ctx
->prop_lock
,
1153 DRM_ERROR("invalid command node list.\n");
1157 if (!exynos_drm_ipp_check_valid(ippdrv
->dev
, cmd_ctrl
->ctrl
,
1159 DRM_ERROR("invalid state.\n");
1163 switch (cmd_ctrl
->ctrl
) {
1165 if (pm_runtime_suspended(ippdrv
->dev
))
1166 pm_runtime_get_sync(ippdrv
->dev
);
1167 c_node
->state
= IPP_STATE_START
;
1169 cmd_work
= c_node
->start_work
;
1170 cmd_work
->ctrl
= cmd_ctrl
->ctrl
;
1171 ipp_handle_cmd_work(dev
, ippdrv
, cmd_work
, c_node
);
1172 c_node
->state
= IPP_STATE_START
;
1175 cmd_work
= c_node
->stop_work
;
1176 cmd_work
->ctrl
= cmd_ctrl
->ctrl
;
1177 ipp_handle_cmd_work(dev
, ippdrv
, cmd_work
, c_node
);
1179 if (!wait_for_completion_timeout(&c_node
->stop_complete
,
1180 msecs_to_jiffies(300))) {
1181 DRM_ERROR("timeout stop:prop_id[%d]\n",
1182 c_node
->property
.prop_id
);
1185 c_node
->state
= IPP_STATE_STOP
;
1186 ippdrv
->dedicated
= false;
1187 ipp_clean_cmd_node(c_node
);
1189 if (list_empty(&ippdrv
->cmd_list
))
1190 pm_runtime_put_sync(ippdrv
->dev
);
1192 case IPP_CTRL_PAUSE
:
1193 cmd_work
= c_node
->stop_work
;
1194 cmd_work
->ctrl
= cmd_ctrl
->ctrl
;
1195 ipp_handle_cmd_work(dev
, ippdrv
, cmd_work
, c_node
);
1197 if (!wait_for_completion_timeout(&c_node
->stop_complete
,
1198 msecs_to_jiffies(200))) {
1199 DRM_ERROR("timeout stop:prop_id[%d]\n",
1200 c_node
->property
.prop_id
);
1203 c_node
->state
= IPP_STATE_STOP
;
1205 case IPP_CTRL_RESUME
:
1206 c_node
->state
= IPP_STATE_START
;
1207 cmd_work
= c_node
->start_work
;
1208 cmd_work
->ctrl
= cmd_ctrl
->ctrl
;
1209 ipp_handle_cmd_work(dev
, ippdrv
, cmd_work
, c_node
);
1212 DRM_ERROR("could not support this state currently.\n");
1216 DRM_DEBUG_KMS("%s:done ctrl[%d]prop_id[%d]\n", __func__
,
1217 cmd_ctrl
->ctrl
, cmd_ctrl
->prop_id
);
1222 int exynos_drm_ippnb_register(struct notifier_block
*nb
)
1224 return blocking_notifier_chain_register(
1225 &exynos_drm_ippnb_list
, nb
);
1228 int exynos_drm_ippnb_unregister(struct notifier_block
*nb
)
1230 return blocking_notifier_chain_unregister(
1231 &exynos_drm_ippnb_list
, nb
);
1234 int exynos_drm_ippnb_send_event(unsigned long val
, void *v
)
1236 return blocking_notifier_call_chain(
1237 &exynos_drm_ippnb_list
, val
, v
);
1240 static int ipp_set_property(struct exynos_drm_ippdrv
*ippdrv
,
1241 struct drm_exynos_ipp_property
*property
)
1243 struct exynos_drm_ipp_ops
*ops
= NULL
;
1248 DRM_ERROR("invalid property parameter.\n");
1252 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__
, property
->prop_id
);
1254 /* reset h/w block */
1255 if (ippdrv
->reset
&&
1256 ippdrv
->reset(ippdrv
->dev
)) {
1257 DRM_ERROR("failed to reset.\n");
1261 /* set source,destination operations */
1262 for_each_ipp_ops(i
) {
1263 struct drm_exynos_ipp_config
*config
=
1264 &property
->config
[i
];
1266 ops
= ippdrv
->ops
[i
];
1267 if (!ops
|| !config
) {
1268 DRM_ERROR("not support ops and config.\n");
1274 ret
= ops
->set_fmt(ippdrv
->dev
, config
->fmt
);
1276 DRM_ERROR("not support format.\n");
1281 /* set transform for rotation, flip */
1282 if (ops
->set_transf
) {
1283 ret
= ops
->set_transf(ippdrv
->dev
, config
->degree
,
1284 config
->flip
, &swap
);
1286 DRM_ERROR("not support tranf.\n");
1292 if (ops
->set_size
) {
1293 ret
= ops
->set_size(ippdrv
->dev
, swap
, &config
->pos
,
1296 DRM_ERROR("not support size.\n");
1305 static int ipp_start_property(struct exynos_drm_ippdrv
*ippdrv
,
1306 struct drm_exynos_ipp_cmd_node
*c_node
)
1308 struct drm_exynos_ipp_mem_node
*m_node
;
1309 struct drm_exynos_ipp_property
*property
= &c_node
->property
;
1310 struct list_head
*head
;
1313 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__
, property
->prop_id
);
1315 /* store command info in ippdrv */
1316 ippdrv
->c_node
= c_node
;
1318 if (!ipp_check_mem_list(c_node
)) {
1319 DRM_DEBUG_KMS("%s:empty memory.\n", __func__
);
1323 /* set current property in ippdrv */
1324 ret
= ipp_set_property(ippdrv
, property
);
1326 DRM_ERROR("failed to set property.\n");
1327 ippdrv
->c_node
= NULL
;
1332 switch (property
->cmd
) {
1334 for_each_ipp_ops(i
) {
1335 /* source/destination memory list */
1336 head
= &c_node
->mem_list
[i
];
1338 m_node
= list_first_entry(head
,
1339 struct drm_exynos_ipp_mem_node
, list
);
1341 DRM_ERROR("failed to get node.\n");
1346 DRM_DEBUG_KMS("%s:m_node[0x%x]\n",
1347 __func__
, (int)m_node
);
1349 ret
= ipp_set_mem_node(ippdrv
, c_node
, m_node
);
1351 DRM_ERROR("failed to set m node.\n");
1357 /* destination memory list */
1358 head
= &c_node
->mem_list
[EXYNOS_DRM_OPS_DST
];
1360 list_for_each_entry(m_node
, head
, list
) {
1361 ret
= ipp_set_mem_node(ippdrv
, c_node
, m_node
);
1363 DRM_ERROR("failed to set m node.\n");
1368 case IPP_CMD_OUTPUT
:
1369 /* source memory list */
1370 head
= &c_node
->mem_list
[EXYNOS_DRM_OPS_SRC
];
1372 list_for_each_entry(m_node
, head
, list
) {
1373 ret
= ipp_set_mem_node(ippdrv
, c_node
, m_node
);
1375 DRM_ERROR("failed to set m node.\n");
1381 DRM_ERROR("invalid operations.\n");
1385 DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__
, property
->cmd
);
1387 /* start operations */
1388 if (ippdrv
->start
) {
1389 ret
= ippdrv
->start(ippdrv
->dev
, property
->cmd
);
1391 DRM_ERROR("failed to start ops.\n");
1399 static int ipp_stop_property(struct drm_device
*drm_dev
,
1400 struct exynos_drm_ippdrv
*ippdrv
,
1401 struct drm_exynos_ipp_cmd_node
*c_node
)
1403 struct drm_exynos_ipp_mem_node
*m_node
, *tm_node
;
1404 struct drm_exynos_ipp_property
*property
= &c_node
->property
;
1405 struct list_head
*head
;
1408 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__
, property
->prop_id
);
1411 ipp_put_event(c_node
, NULL
);
1414 switch (property
->cmd
) {
1416 for_each_ipp_ops(i
) {
1417 /* source/destination memory list */
1418 head
= &c_node
->mem_list
[i
];
1420 if (list_empty(head
)) {
1421 DRM_DEBUG_KMS("%s:mem_list is empty.\n",
1426 list_for_each_entry_safe(m_node
, tm_node
,
1428 ret
= ipp_put_mem_node(drm_dev
, c_node
,
1431 DRM_ERROR("failed to put m_node.\n");
1438 /* destination memory list */
1439 head
= &c_node
->mem_list
[EXYNOS_DRM_OPS_DST
];
1441 if (list_empty(head
)) {
1442 DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__
);
1446 list_for_each_entry_safe(m_node
, tm_node
, head
, list
) {
1447 ret
= ipp_put_mem_node(drm_dev
, c_node
, m_node
);
1449 DRM_ERROR("failed to put m_node.\n");
1454 case IPP_CMD_OUTPUT
:
1455 /* source memory list */
1456 head
= &c_node
->mem_list
[EXYNOS_DRM_OPS_SRC
];
1458 if (list_empty(head
)) {
1459 DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__
);
1463 list_for_each_entry_safe(m_node
, tm_node
, head
, list
) {
1464 ret
= ipp_put_mem_node(drm_dev
, c_node
, m_node
);
1466 DRM_ERROR("failed to put m_node.\n");
1472 DRM_ERROR("invalid operations.\n");
1478 /* stop operations */
1480 ippdrv
->stop(ippdrv
->dev
, property
->cmd
);
1485 void ipp_sched_cmd(struct work_struct
*work
)
1487 struct drm_exynos_ipp_cmd_work
*cmd_work
=
1488 (struct drm_exynos_ipp_cmd_work
*)work
;
1489 struct exynos_drm_ippdrv
*ippdrv
;
1490 struct drm_exynos_ipp_cmd_node
*c_node
;
1491 struct drm_exynos_ipp_property
*property
;
1494 DRM_DEBUG_KMS("%s\n", __func__
);
1496 ippdrv
= cmd_work
->ippdrv
;
1498 DRM_ERROR("invalid ippdrv list.\n");
1502 c_node
= cmd_work
->c_node
;
1504 DRM_ERROR("invalid command node list.\n");
1508 mutex_lock(&c_node
->cmd_lock
);
1510 property
= &c_node
->property
;
1512 switch (cmd_work
->ctrl
) {
1514 case IPP_CTRL_RESUME
:
1515 ret
= ipp_start_property(ippdrv
, c_node
);
1517 DRM_ERROR("failed to start property:prop_id[%d]\n",
1518 c_node
->property
.prop_id
);
1523 * M2M case supports wait_completion of transfer.
1524 * because M2M case supports single unit operation
1525 * with multiple queue.
1526 * M2M need to wait completion of data transfer.
1528 if (ipp_is_m2m_cmd(property
->cmd
)) {
1529 if (!wait_for_completion_timeout
1530 (&c_node
->start_complete
, msecs_to_jiffies(200))) {
1531 DRM_ERROR("timeout event:prop_id[%d]\n",
1532 c_node
->property
.prop_id
);
1538 case IPP_CTRL_PAUSE
:
1539 ret
= ipp_stop_property(ippdrv
->drm_dev
, ippdrv
,
1542 DRM_ERROR("failed to stop property.\n");
1546 complete(&c_node
->stop_complete
);
1549 DRM_ERROR("unknown control type\n");
1553 DRM_DEBUG_KMS("%s:ctrl[%d] done.\n", __func__
, cmd_work
->ctrl
);
1556 mutex_unlock(&c_node
->cmd_lock
);
1559 static int ipp_send_event(struct exynos_drm_ippdrv
*ippdrv
,
1560 struct drm_exynos_ipp_cmd_node
*c_node
, int *buf_id
)
1562 struct drm_device
*drm_dev
= ippdrv
->drm_dev
;
1563 struct drm_exynos_ipp_property
*property
= &c_node
->property
;
1564 struct drm_exynos_ipp_mem_node
*m_node
;
1565 struct drm_exynos_ipp_queue_buf qbuf
;
1566 struct drm_exynos_ipp_send_event
*e
;
1567 struct list_head
*head
;
1569 unsigned long flags
;
1570 u32 tbuf_id
[EXYNOS_DRM_OPS_MAX
] = {0, };
1574 DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__
,
1575 i
? "dst" : "src", buf_id
[i
]);
1578 DRM_ERROR("failed to get drm_dev.\n");
1583 DRM_ERROR("failed to get property.\n");
1587 if (list_empty(&c_node
->event_list
)) {
1588 DRM_DEBUG_KMS("%s:event list is empty.\n", __func__
);
1592 if (!ipp_check_mem_list(c_node
)) {
1593 DRM_DEBUG_KMS("%s:empty memory.\n", __func__
);
1598 switch (property
->cmd
) {
1600 for_each_ipp_ops(i
) {
1601 /* source/destination memory list */
1602 head
= &c_node
->mem_list
[i
];
1604 m_node
= list_first_entry(head
,
1605 struct drm_exynos_ipp_mem_node
, list
);
1607 DRM_ERROR("empty memory node.\n");
1611 tbuf_id
[i
] = m_node
->buf_id
;
1612 DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__
,
1613 i
? "dst" : "src", tbuf_id
[i
]);
1615 ret
= ipp_put_mem_node(drm_dev
, c_node
, m_node
);
1617 DRM_ERROR("failed to put m_node.\n");
1621 /* clear buf for finding */
1622 memset(&qbuf
, 0x0, sizeof(qbuf
));
1623 qbuf
.ops_id
= EXYNOS_DRM_OPS_DST
;
1624 qbuf
.buf_id
= buf_id
[EXYNOS_DRM_OPS_DST
];
1626 /* get memory node entry */
1627 m_node
= ipp_find_mem_node(c_node
, &qbuf
);
1629 DRM_ERROR("empty memory node.\n");
1633 tbuf_id
[EXYNOS_DRM_OPS_DST
] = m_node
->buf_id
;
1635 ret
= ipp_put_mem_node(drm_dev
, c_node
, m_node
);
1637 DRM_ERROR("failed to put m_node.\n");
1639 case IPP_CMD_OUTPUT
:
1640 /* source memory list */
1641 head
= &c_node
->mem_list
[EXYNOS_DRM_OPS_SRC
];
1643 m_node
= list_first_entry(head
,
1644 struct drm_exynos_ipp_mem_node
, list
);
1646 DRM_ERROR("empty memory node.\n");
1650 tbuf_id
[EXYNOS_DRM_OPS_SRC
] = m_node
->buf_id
;
1652 ret
= ipp_put_mem_node(drm_dev
, c_node
, m_node
);
1654 DRM_ERROR("failed to put m_node.\n");
1657 DRM_ERROR("invalid operations.\n");
1661 if (tbuf_id
[EXYNOS_DRM_OPS_DST
] != buf_id
[EXYNOS_DRM_OPS_DST
])
1662 DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
1663 tbuf_id
[1], buf_id
[1], property
->prop_id
);
1666 * command node have event list of destination buffer
1667 * If destination buffer enqueue to mem list,
1668 * then we make event and link to event list tail.
1669 * so, we get first event for first enqueued buffer.
1671 e
= list_first_entry(&c_node
->event_list
,
1672 struct drm_exynos_ipp_send_event
, base
.link
);
1675 DRM_ERROR("empty event.\n");
1679 do_gettimeofday(&now
);
1680 DRM_DEBUG_KMS("%s:tv_sec[%ld]tv_usec[%ld]\n"
1681 , __func__
, now
.tv_sec
, now
.tv_usec
);
1682 e
->event
.tv_sec
= now
.tv_sec
;
1683 e
->event
.tv_usec
= now
.tv_usec
;
1684 e
->event
.prop_id
= property
->prop_id
;
1686 /* set buffer id about source destination */
1688 e
->event
.buf_id
[i
] = tbuf_id
[i
];
1690 spin_lock_irqsave(&drm_dev
->event_lock
, flags
);
1691 list_move_tail(&e
->base
.link
, &e
->base
.file_priv
->event_list
);
1692 wake_up_interruptible(&e
->base
.file_priv
->event_wait
);
1693 spin_unlock_irqrestore(&drm_dev
->event_lock
, flags
);
1695 DRM_DEBUG_KMS("%s:done cmd[%d]prop_id[%d]buf_id[%d]\n", __func__
,
1696 property
->cmd
, property
->prop_id
, tbuf_id
[EXYNOS_DRM_OPS_DST
]);
1701 void ipp_sched_event(struct work_struct
*work
)
1703 struct drm_exynos_ipp_event_work
*event_work
=
1704 (struct drm_exynos_ipp_event_work
*)work
;
1705 struct exynos_drm_ippdrv
*ippdrv
;
1706 struct drm_exynos_ipp_cmd_node
*c_node
;
1710 DRM_ERROR("failed to get event_work.\n");
1714 DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__
,
1715 event_work
->buf_id
[EXYNOS_DRM_OPS_DST
]);
1717 ippdrv
= event_work
->ippdrv
;
1719 DRM_ERROR("failed to get ipp driver.\n");
1723 c_node
= ippdrv
->c_node
;
1725 DRM_ERROR("failed to get command node.\n");
1730 * IPP supports command thread, event thread synchronization.
1731 * If IPP close immediately from user land, then IPP make
1732 * synchronization with command thread, so make complete event.
1733 * or going out operations.
1735 if (c_node
->state
!= IPP_STATE_START
) {
1736 DRM_DEBUG_KMS("%s:bypass state[%d]prop_id[%d]\n",
1737 __func__
, c_node
->state
, c_node
->property
.prop_id
);
1738 goto err_completion
;
1741 mutex_lock(&c_node
->event_lock
);
1743 ret
= ipp_send_event(ippdrv
, c_node
, event_work
->buf_id
);
1745 DRM_ERROR("failed to send event.\n");
1746 goto err_completion
;
1750 if (ipp_is_m2m_cmd(c_node
->property
.cmd
))
1751 complete(&c_node
->start_complete
);
1753 mutex_unlock(&c_node
->event_lock
);
1756 static int ipp_subdrv_probe(struct drm_device
*drm_dev
, struct device
*dev
)
1758 struct ipp_context
*ctx
= get_ipp_context(dev
);
1759 struct exynos_drm_ippdrv
*ippdrv
;
1762 DRM_DEBUG_KMS("%s\n", __func__
);
1764 /* get ipp driver entry */
1765 list_for_each_entry(ippdrv
, &exynos_drm_ippdrv_list
, drv_list
) {
1766 ippdrv
->drm_dev
= drm_dev
;
1768 ret
= ipp_create_id(&ctx
->ipp_idr
, &ctx
->ipp_lock
, ippdrv
,
1771 DRM_ERROR("failed to create id.\n");
1775 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]ipp_id[%d]\n", __func__
,
1776 count
++, (int)ippdrv
, ippdrv
->ipp_id
);
1778 if (ippdrv
->ipp_id
== 0) {
1779 DRM_ERROR("failed to get ipp_id[%d]\n",
1784 /* store parent device for node */
1785 ippdrv
->parent_dev
= dev
;
1787 /* store event work queue and handler */
1788 ippdrv
->event_workq
= ctx
->event_workq
;
1789 ippdrv
->sched_event
= ipp_sched_event
;
1790 INIT_LIST_HEAD(&ippdrv
->cmd_list
);
1792 if (is_drm_iommu_supported(drm_dev
)) {
1793 ret
= drm_iommu_attach_device(drm_dev
, ippdrv
->dev
);
1795 DRM_ERROR("failed to activate iommu\n");
1804 /* get ipp driver entry */
1805 list_for_each_entry_reverse(ippdrv
, &exynos_drm_ippdrv_list
, drv_list
)
1806 if (is_drm_iommu_supported(drm_dev
))
1807 drm_iommu_detach_device(drm_dev
, ippdrv
->dev
);
1810 idr_destroy(&ctx
->ipp_idr
);
1811 idr_destroy(&ctx
->prop_idr
);
1815 static void ipp_subdrv_remove(struct drm_device
*drm_dev
, struct device
*dev
)
1817 struct exynos_drm_ippdrv
*ippdrv
;
1819 DRM_DEBUG_KMS("%s\n", __func__
);
1821 /* get ipp driver entry */
1822 list_for_each_entry(ippdrv
, &exynos_drm_ippdrv_list
, drv_list
) {
1823 if (is_drm_iommu_supported(drm_dev
))
1824 drm_iommu_detach_device(drm_dev
, ippdrv
->dev
);
1826 ippdrv
->drm_dev
= NULL
;
1827 exynos_drm_ippdrv_unregister(ippdrv
);
1831 static int ipp_subdrv_open(struct drm_device
*drm_dev
, struct device
*dev
,
1832 struct drm_file
*file
)
1834 struct drm_exynos_file_private
*file_priv
= file
->driver_priv
;
1835 struct exynos_drm_ipp_private
*priv
;
1837 DRM_DEBUG_KMS("%s\n", __func__
);
1839 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
1841 DRM_ERROR("failed to allocate priv.\n");
1845 file_priv
->ipp_priv
= priv
;
1847 INIT_LIST_HEAD(&priv
->event_list
);
1849 DRM_DEBUG_KMS("%s:done priv[0x%x]\n", __func__
, (int)priv
);
1854 static void ipp_subdrv_close(struct drm_device
*drm_dev
, struct device
*dev
,
1855 struct drm_file
*file
)
1857 struct drm_exynos_file_private
*file_priv
= file
->driver_priv
;
1858 struct exynos_drm_ipp_private
*priv
= file_priv
->ipp_priv
;
1859 struct exynos_drm_ippdrv
*ippdrv
= NULL
;
1860 struct drm_exynos_ipp_cmd_node
*c_node
, *tc_node
;
1863 DRM_DEBUG_KMS("%s:for priv[0x%x]\n", __func__
, (int)priv
);
1865 if (list_empty(&exynos_drm_ippdrv_list
)) {
1866 DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__
);
1870 list_for_each_entry(ippdrv
, &exynos_drm_ippdrv_list
, drv_list
) {
1871 if (list_empty(&ippdrv
->cmd_list
))
1874 list_for_each_entry_safe(c_node
, tc_node
,
1875 &ippdrv
->cmd_list
, list
) {
1876 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n",
1877 __func__
, count
++, (int)ippdrv
);
1879 if (c_node
->priv
== priv
) {
1881 * userland goto unnormal state. process killed.
1882 * and close the file.
1883 * so, IPP didn't called stop cmd ctrl.
1884 * so, we are make stop operation in this state.
1886 if (c_node
->state
== IPP_STATE_START
) {
1887 ipp_stop_property(drm_dev
, ippdrv
,
1889 c_node
->state
= IPP_STATE_STOP
;
1892 ippdrv
->dedicated
= false;
1893 ipp_clean_cmd_node(c_node
);
1894 if (list_empty(&ippdrv
->cmd_list
))
1895 pm_runtime_put_sync(ippdrv
->dev
);
1905 static int ipp_probe(struct platform_device
*pdev
)
1907 struct device
*dev
= &pdev
->dev
;
1908 struct ipp_context
*ctx
;
1909 struct exynos_drm_subdrv
*subdrv
;
1912 ctx
= devm_kzalloc(dev
, sizeof(*ctx
), GFP_KERNEL
);
1916 DRM_DEBUG_KMS("%s\n", __func__
);
1918 mutex_init(&ctx
->ipp_lock
);
1919 mutex_init(&ctx
->prop_lock
);
1921 idr_init(&ctx
->ipp_idr
);
1922 idr_init(&ctx
->prop_idr
);
1925 * create single thread for ipp event
1926 * IPP supports event thread for IPP drivers.
1927 * IPP driver send event_work to this thread.
1928 * and IPP event thread send event to user process.
1930 ctx
->event_workq
= create_singlethread_workqueue("ipp_event");
1931 if (!ctx
->event_workq
) {
1932 dev_err(dev
, "failed to create event workqueue\n");
1937 * create single thread for ipp command
1938 * IPP supports command thread for user process.
1939 * user process make command node using set property ioctl.
1940 * and make start_work and send this work to command thread.
1941 * and then this command thread start property.
1943 ctx
->cmd_workq
= create_singlethread_workqueue("ipp_cmd");
1944 if (!ctx
->cmd_workq
) {
1945 dev_err(dev
, "failed to create cmd workqueue\n");
1947 goto err_event_workq
;
1950 /* set sub driver informations */
1951 subdrv
= &ctx
->subdrv
;
1953 subdrv
->probe
= ipp_subdrv_probe
;
1954 subdrv
->remove
= ipp_subdrv_remove
;
1955 subdrv
->open
= ipp_subdrv_open
;
1956 subdrv
->close
= ipp_subdrv_close
;
1958 platform_set_drvdata(pdev
, ctx
);
1960 ret
= exynos_drm_subdrv_register(subdrv
);
1962 DRM_ERROR("failed to register drm ipp device.\n");
1966 dev_info(dev
, "drm ipp registered successfully.\n");
1971 destroy_workqueue(ctx
->cmd_workq
);
1973 destroy_workqueue(ctx
->event_workq
);
1977 static int ipp_remove(struct platform_device
*pdev
)
1979 struct ipp_context
*ctx
= platform_get_drvdata(pdev
);
1981 DRM_DEBUG_KMS("%s\n", __func__
);
1983 /* unregister sub driver */
1984 exynos_drm_subdrv_unregister(&ctx
->subdrv
);
1986 /* remove,destroy ipp idr */
1987 idr_destroy(&ctx
->ipp_idr
);
1988 idr_destroy(&ctx
->prop_idr
);
1990 mutex_destroy(&ctx
->ipp_lock
);
1991 mutex_destroy(&ctx
->prop_lock
);
1993 /* destroy command, event work queue */
1994 destroy_workqueue(ctx
->cmd_workq
);
1995 destroy_workqueue(ctx
->event_workq
);
2000 static int ipp_power_ctrl(struct ipp_context
*ctx
, bool enable
)
2002 DRM_DEBUG_KMS("%s:enable[%d]\n", __func__
, enable
);
2007 #ifdef CONFIG_PM_SLEEP
2008 static int ipp_suspend(struct device
*dev
)
2010 struct ipp_context
*ctx
= get_ipp_context(dev
);
2012 DRM_DEBUG_KMS("%s\n", __func__
);
2014 if (pm_runtime_suspended(dev
))
2017 return ipp_power_ctrl(ctx
, false);
2020 static int ipp_resume(struct device
*dev
)
2022 struct ipp_context
*ctx
= get_ipp_context(dev
);
2024 DRM_DEBUG_KMS("%s\n", __func__
);
2026 if (!pm_runtime_suspended(dev
))
2027 return ipp_power_ctrl(ctx
, true);
2033 #ifdef CONFIG_PM_RUNTIME
2034 static int ipp_runtime_suspend(struct device
*dev
)
2036 struct ipp_context
*ctx
= get_ipp_context(dev
);
2038 DRM_DEBUG_KMS("%s\n", __func__
);
2040 return ipp_power_ctrl(ctx
, false);
2043 static int ipp_runtime_resume(struct device
*dev
)
2045 struct ipp_context
*ctx
= get_ipp_context(dev
);
2047 DRM_DEBUG_KMS("%s\n", __func__
);
2049 return ipp_power_ctrl(ctx
, true);
2053 static const struct dev_pm_ops ipp_pm_ops
= {
2054 SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend
, ipp_resume
)
2055 SET_RUNTIME_PM_OPS(ipp_runtime_suspend
, ipp_runtime_resume
, NULL
)
2058 struct platform_driver ipp_driver
= {
2060 .remove
= ipp_remove
,
2062 .name
= "exynos-drm-ipp",
2063 .owner
= THIS_MODULE
,