Commit | Line | Data |
---|---|---|
cb471f14 EK |
1 | /* |
2 | * Copyright (C) 2012 Samsung Electronics Co.Ltd | |
3 | * Authors: | |
4 | * Eunchul Kim <chulspro.kim@samsung.com> | |
5 | * Jinyoung Jeon <jy0.jeon@samsung.com> | |
6 | * Sangmin Lee <lsmin.lee@samsung.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms of the GNU General Public License as published by the | |
10 | * Free Software Foundation; either version 2 of the License, or (at your | |
11 | * option) any later version. | |
12 | * | |
13 | */ | |
14 | #include <linux/kernel.h> | |
cb471f14 EK |
15 | #include <linux/platform_device.h> |
16 | #include <linux/types.h> | |
17 | #include <linux/clk.h> | |
18 | #include <linux/pm_runtime.h> | |
cb471f14 EK |
19 | |
20 | #include <drm/drmP.h> | |
21 | #include <drm/exynos_drm.h> | |
22 | #include "exynos_drm_drv.h" | |
23 | #include "exynos_drm_gem.h" | |
24 | #include "exynos_drm_ipp.h" | |
c12e2617 | 25 | #include "exynos_drm_iommu.h" |
cb471f14 EK |
26 | |
27 | /* | |
6fe891f6 | 28 | * IPP stands for Image Post Processing and |
cb471f14 EK |
29 | * supports image scaler/rotator and input/output DMA operations. |
30 | * using FIMC, GSC, Rotator, so on. | |
31 | * IPP is integration device driver of same attribute h/w | |
32 | */ | |
33 | ||
34 | /* | |
35 | * TODO | |
36 | * 1. expand command control id. | |
37 | * 2. integrate property and config. | |
38 | * 3. removed send_event id check routine. | |
39 | * 4. compare send_event id if needed. | |
40 | * 5. free subdrv_remove notifier callback list if needed. | |
41 | * 6. need to check subdrv_open about multi-open. | |
42 | * 7. need to power_on implement power and sysmmu ctrl. | |
43 | */ | |
44 | ||
45 | #define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev)) | |
46 | #define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M) | |
47 | ||
43f41900 SWK |
48 | /* platform device pointer for ipp device. */ |
49 | static struct platform_device *exynos_drm_ipp_pdev; | |
50 | ||
cb471f14 EK |
51 | /* |
52 | * A structure of event. | |
53 | * | |
54 | * @base: base of event. | |
55 | * @event: ipp event. | |
56 | */ | |
57 | struct drm_exynos_ipp_send_event { | |
58 | struct drm_pending_event base; | |
59 | struct drm_exynos_ipp_event event; | |
60 | }; | |
61 | ||
62 | /* | |
63 | * A structure of memory node. | |
64 | * | |
65 | * @list: list head to memory queue information. | |
66 | * @ops_id: id of operations. | |
67 | * @prop_id: id of property. | |
68 | * @buf_id: id of buffer. | |
69 | * @buf_info: gem objects and dma address, size. | |
70 | * @filp: a pointer to drm_file. | |
71 | */ | |
72 | struct drm_exynos_ipp_mem_node { | |
73 | struct list_head list; | |
74 | enum drm_exynos_ops_id ops_id; | |
75 | u32 prop_id; | |
76 | u32 buf_id; | |
77 | struct drm_exynos_ipp_buf_info buf_info; | |
78 | struct drm_file *filp; | |
79 | }; | |
80 | ||
81 | /* | |
82 | * A structure of ipp context. | |
83 | * | |
84 | * @subdrv: prepare initialization using subdrv. | |
85 | * @ipp_lock: lock for synchronization of access to ipp_idr. | |
86 | * @prop_lock: lock for synchronization of access to prop_idr. | |
87 | * @ipp_idr: ipp driver idr. | |
88 | * @prop_idr: property idr. | |
89 | * @event_workq: event work queue. | |
90 | * @cmd_workq: command work queue. | |
91 | */ | |
92 | struct ipp_context { | |
93 | struct exynos_drm_subdrv subdrv; | |
94 | struct mutex ipp_lock; | |
95 | struct mutex prop_lock; | |
96 | struct idr ipp_idr; | |
97 | struct idr prop_idr; | |
98 | struct workqueue_struct *event_workq; | |
99 | struct workqueue_struct *cmd_workq; | |
100 | }; | |
101 | ||
102 | static LIST_HEAD(exynos_drm_ippdrv_list); | |
103 | static DEFINE_MUTEX(exynos_drm_ippdrv_lock); | |
104 | static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list); | |
105 | ||
43f41900 SWK |
106 | int exynos_platform_device_ipp_register(void) |
107 | { | |
108 | struct platform_device *pdev; | |
109 | ||
110 | if (exynos_drm_ipp_pdev) | |
111 | return -EEXIST; | |
112 | ||
113 | pdev = platform_device_register_simple("exynos-drm-ipp", -1, NULL, 0); | |
114 | if (IS_ERR(pdev)) | |
115 | return PTR_ERR(pdev); | |
116 | ||
117 | exynos_drm_ipp_pdev = pdev; | |
118 | ||
119 | return 0; | |
120 | } | |
121 | ||
122 | void exynos_platform_device_ipp_unregister(void) | |
123 | { | |
124 | if (exynos_drm_ipp_pdev) { | |
125 | platform_device_unregister(exynos_drm_ipp_pdev); | |
126 | exynos_drm_ipp_pdev = NULL; | |
127 | } | |
128 | } | |
129 | ||
cb471f14 EK |
130 | int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv) |
131 | { | |
cb471f14 EK |
132 | mutex_lock(&exynos_drm_ippdrv_lock); |
133 | list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list); | |
134 | mutex_unlock(&exynos_drm_ippdrv_lock); | |
135 | ||
136 | return 0; | |
137 | } | |
138 | ||
139 | int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv) | |
140 | { | |
cb471f14 EK |
141 | mutex_lock(&exynos_drm_ippdrv_lock); |
142 | list_del(&ippdrv->drv_list); | |
143 | mutex_unlock(&exynos_drm_ippdrv_lock); | |
144 | ||
145 | return 0; | |
146 | } | |
147 | ||
12ff54d2 | 148 | static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj) |
cb471f14 EK |
149 | { |
150 | int ret; | |
151 | ||
cb471f14 | 152 | mutex_lock(lock); |
8550cb2e | 153 | ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL); |
cb471f14 | 154 | mutex_unlock(lock); |
cb471f14 | 155 | |
12ff54d2 | 156 | return ret; |
cb471f14 EK |
157 | } |
158 | ||
075436b0 YC |
159 | static void ipp_remove_id(struct idr *id_idr, struct mutex *lock, u32 id) |
160 | { | |
161 | mutex_lock(lock); | |
162 | idr_remove(id_idr, id); | |
163 | mutex_unlock(lock); | |
164 | } | |
165 | ||
cb471f14 EK |
166 | static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id) |
167 | { | |
168 | void *obj; | |
169 | ||
cb471f14 | 170 | mutex_lock(lock); |
cb471f14 | 171 | obj = idr_find(id_idr, id); |
cb471f14 EK |
172 | mutex_unlock(lock); |
173 | ||
174 | return obj; | |
175 | } | |
176 | ||
9cc7d85e AH |
177 | static int ipp_check_driver(struct exynos_drm_ippdrv *ippdrv, |
178 | struct drm_exynos_ipp_property *property) | |
cb471f14 | 179 | { |
9cc7d85e AH |
180 | if (ippdrv->dedicated || (!ipp_is_m2m_cmd(property->cmd) && |
181 | !pm_runtime_suspended(ippdrv->dev))) | |
182 | return -EBUSY; | |
cb471f14 | 183 | |
9cc7d85e AH |
184 | if (ippdrv->check_property && |
185 | ippdrv->check_property(ippdrv->dev, property)) | |
186 | return -EINVAL; | |
187 | ||
188 | return 0; | |
cb471f14 EK |
189 | } |
190 | ||
191 | static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx, | |
192 | struct drm_exynos_ipp_property *property) | |
193 | { | |
194 | struct exynos_drm_ippdrv *ippdrv; | |
195 | u32 ipp_id = property->ipp_id; | |
9cc7d85e | 196 | int ret; |
cb471f14 EK |
197 | |
198 | if (ipp_id) { | |
9cc7d85e | 199 | ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, ipp_id); |
134f0e9b | 200 | if (!ippdrv) { |
9cc7d85e | 201 | DRM_DEBUG("ipp%d driver not found\n", ipp_id); |
134f0e9b | 202 | return ERR_PTR(-ENODEV); |
cb471f14 EK |
203 | } |
204 | ||
9cc7d85e AH |
205 | ret = ipp_check_driver(ippdrv, property); |
206 | if (ret < 0) { | |
207 | DRM_DEBUG("ipp%d driver check error %d\n", ipp_id, ret); | |
208 | return ERR_PTR(ret); | |
cb471f14 EK |
209 | } |
210 | ||
211 | return ippdrv; | |
212 | } else { | |
cb471f14 | 213 | list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { |
9cc7d85e AH |
214 | ret = ipp_check_driver(ippdrv, property); |
215 | if (ret == 0) | |
216 | return ippdrv; | |
cb471f14 EK |
217 | } |
218 | ||
9cc7d85e | 219 | DRM_DEBUG("cannot find driver suitable for given property.\n"); |
cb471f14 EK |
220 | } |
221 | ||
222 | return ERR_PTR(-ENODEV); | |
223 | } | |
224 | ||
225 | static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id) | |
226 | { | |
227 | struct exynos_drm_ippdrv *ippdrv; | |
228 | struct drm_exynos_ipp_cmd_node *c_node; | |
229 | int count = 0; | |
230 | ||
cbc4c33d | 231 | DRM_DEBUG_KMS("prop_id[%d]\n", prop_id); |
cb471f14 | 232 | |
cb471f14 EK |
233 | /* |
234 | * This case is search ipp driver by prop_id handle. | |
235 | * sometimes, ipp subsystem find driver by prop_id. | |
9fca9acf | 236 | * e.g PAUSE state, queue buf, command control. |
cb471f14 EK |
237 | */ |
238 | list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { | |
cbc4c33d | 239 | DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", count++, (int)ippdrv); |
cb471f14 | 240 | |
7f5af059 YC |
241 | mutex_lock(&ippdrv->cmd_lock); |
242 | list_for_each_entry(c_node, &ippdrv->cmd_list, list) { | |
243 | if (c_node->property.prop_id == prop_id) { | |
244 | mutex_unlock(&ippdrv->cmd_lock); | |
c66ce40b | 245 | return ippdrv; |
7f5af059 | 246 | } |
cb471f14 | 247 | } |
7f5af059 | 248 | mutex_unlock(&ippdrv->cmd_lock); |
cb471f14 EK |
249 | } |
250 | ||
251 | return ERR_PTR(-ENODEV); | |
252 | } | |
253 | ||
254 | int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data, | |
255 | struct drm_file *file) | |
256 | { | |
257 | struct drm_exynos_file_private *file_priv = file->driver_priv; | |
5c76c5b1 | 258 | struct device *dev = file_priv->ipp_dev; |
cb471f14 EK |
259 | struct ipp_context *ctx = get_ipp_context(dev); |
260 | struct drm_exynos_ipp_prop_list *prop_list = data; | |
261 | struct exynos_drm_ippdrv *ippdrv; | |
262 | int count = 0; | |
263 | ||
cb471f14 EK |
264 | if (!ctx) { |
265 | DRM_ERROR("invalid context.\n"); | |
266 | return -EINVAL; | |
267 | } | |
268 | ||
269 | if (!prop_list) { | |
270 | DRM_ERROR("invalid property parameter.\n"); | |
271 | return -EINVAL; | |
272 | } | |
273 | ||
cbc4c33d | 274 | DRM_DEBUG_KMS("ipp_id[%d]\n", prop_list->ipp_id); |
cb471f14 EK |
275 | |
276 | if (!prop_list->ipp_id) { | |
277 | list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) | |
278 | count++; | |
7f5af059 | 279 | |
cb471f14 EK |
280 | /* |
281 | * Supports ippdrv list count for user application. | |
282 | * First step user application getting ippdrv count. | |
283 | * and second step getting ippdrv capability using ipp_id. | |
284 | */ | |
285 | prop_list->count = count; | |
286 | } else { | |
287 | /* | |
288 | * Getting ippdrv capability by ipp_id. | |
c6b78bc8 | 289 | * some device not supported wb, output interface. |
cb471f14 EK |
290 | * so, user application detect correct ipp driver |
291 | * using this ioctl. | |
292 | */ | |
293 | ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, | |
294 | prop_list->ipp_id); | |
134f0e9b | 295 | if (!ippdrv) { |
cb471f14 EK |
296 | DRM_ERROR("not found ipp%d driver.\n", |
297 | prop_list->ipp_id); | |
134f0e9b | 298 | return -ENODEV; |
cb471f14 EK |
299 | } |
300 | ||
31646054 | 301 | *prop_list = ippdrv->prop_list; |
cb471f14 EK |
302 | } |
303 | ||
304 | return 0; | |
305 | } | |
306 | ||
307 | static void ipp_print_property(struct drm_exynos_ipp_property *property, | |
308 | int idx) | |
309 | { | |
310 | struct drm_exynos_ipp_config *config = &property->config[idx]; | |
311 | struct drm_exynos_pos *pos = &config->pos; | |
312 | struct drm_exynos_sz *sz = &config->sz; | |
313 | ||
cbc4c33d YC |
314 | DRM_DEBUG_KMS("prop_id[%d]ops[%s]fmt[0x%x]\n", |
315 | property->prop_id, idx ? "dst" : "src", config->fmt); | |
cb471f14 | 316 | |
cbc4c33d YC |
317 | DRM_DEBUG_KMS("pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n", |
318 | pos->x, pos->y, pos->w, pos->h, | |
cb471f14 EK |
319 | sz->hsize, sz->vsize, config->flip, config->degree); |
320 | } | |
321 | ||
322 | static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property) | |
323 | { | |
324 | struct exynos_drm_ippdrv *ippdrv; | |
325 | struct drm_exynos_ipp_cmd_node *c_node; | |
326 | u32 prop_id = property->prop_id; | |
327 | ||
cbc4c33d | 328 | DRM_DEBUG_KMS("prop_id[%d]\n", prop_id); |
cb471f14 EK |
329 | |
330 | ippdrv = ipp_find_drv_by_handle(prop_id); | |
f0250458 | 331 | if (IS_ERR(ippdrv)) { |
cb471f14 EK |
332 | DRM_ERROR("failed to get ipp driver.\n"); |
333 | return -EINVAL; | |
334 | } | |
335 | ||
336 | /* | |
337 | * Find command node using command list in ippdrv. | |
338 | * when we find this command no using prop_id. | |
339 | * return property information set in this command node. | |
340 | */ | |
7f5af059 | 341 | mutex_lock(&ippdrv->cmd_lock); |
cb471f14 EK |
342 | list_for_each_entry(c_node, &ippdrv->cmd_list, list) { |
343 | if ((c_node->property.prop_id == prop_id) && | |
344 | (c_node->state == IPP_STATE_STOP)) { | |
7f5af059 | 345 | mutex_unlock(&ippdrv->cmd_lock); |
cbc4c33d YC |
346 | DRM_DEBUG_KMS("found cmd[%d]ippdrv[0x%x]\n", |
347 | property->cmd, (int)ippdrv); | |
cb471f14 EK |
348 | |
349 | c_node->property = *property; | |
350 | return 0; | |
351 | } | |
352 | } | |
7f5af059 | 353 | mutex_unlock(&ippdrv->cmd_lock); |
cb471f14 EK |
354 | |
355 | DRM_ERROR("failed to search property.\n"); | |
356 | ||
357 | return -EINVAL; | |
358 | } | |
359 | ||
360 | static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void) | |
361 | { | |
362 | struct drm_exynos_ipp_cmd_work *cmd_work; | |
363 | ||
cb471f14 | 364 | cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL); |
38bb5253 | 365 | if (!cmd_work) |
cb471f14 | 366 | return ERR_PTR(-ENOMEM); |
cb471f14 EK |
367 | |
368 | INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd); | |
369 | ||
370 | return cmd_work; | |
371 | } | |
372 | ||
373 | static struct drm_exynos_ipp_event_work *ipp_create_event_work(void) | |
374 | { | |
375 | struct drm_exynos_ipp_event_work *event_work; | |
376 | ||
cb471f14 | 377 | event_work = kzalloc(sizeof(*event_work), GFP_KERNEL); |
38bb5253 | 378 | if (!event_work) |
cb471f14 | 379 | return ERR_PTR(-ENOMEM); |
cb471f14 | 380 | |
60b61c2f | 381 | INIT_WORK(&event_work->work, ipp_sched_event); |
cb471f14 EK |
382 | |
383 | return event_work; | |
384 | } | |
385 | ||
386 | int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data, | |
387 | struct drm_file *file) | |
388 | { | |
389 | struct drm_exynos_file_private *file_priv = file->driver_priv; | |
5c76c5b1 | 390 | struct device *dev = file_priv->ipp_dev; |
cb471f14 EK |
391 | struct ipp_context *ctx = get_ipp_context(dev); |
392 | struct drm_exynos_ipp_property *property = data; | |
393 | struct exynos_drm_ippdrv *ippdrv; | |
394 | struct drm_exynos_ipp_cmd_node *c_node; | |
395 | int ret, i; | |
396 | ||
cb471f14 EK |
397 | if (!ctx) { |
398 | DRM_ERROR("invalid context.\n"); | |
399 | return -EINVAL; | |
400 | } | |
401 | ||
402 | if (!property) { | |
403 | DRM_ERROR("invalid property parameter.\n"); | |
404 | return -EINVAL; | |
405 | } | |
406 | ||
407 | /* | |
408 | * This is log print for user application property. | |
409 | * user application set various property. | |
410 | */ | |
411 | for_each_ipp_ops(i) | |
412 | ipp_print_property(property, i); | |
413 | ||
414 | /* | |
415 | * set property ioctl generated new prop_id. | |
416 | * but in this case already asigned prop_id using old set property. | |
417 | * e.g PAUSE state. this case supports find current prop_id and use it | |
418 | * instead of allocation. | |
419 | */ | |
420 | if (property->prop_id) { | |
cbc4c33d | 421 | DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id); |
cb471f14 EK |
422 | return ipp_find_and_set_property(property); |
423 | } | |
424 | ||
425 | /* find ipp driver using ipp id */ | |
426 | ippdrv = ipp_find_driver(ctx, property); | |
f0250458 | 427 | if (IS_ERR(ippdrv)) { |
cb471f14 EK |
428 | DRM_ERROR("failed to get ipp driver.\n"); |
429 | return -EINVAL; | |
430 | } | |
431 | ||
432 | /* allocate command node */ | |
433 | c_node = kzalloc(sizeof(*c_node), GFP_KERNEL); | |
38bb5253 | 434 | if (!c_node) |
cb471f14 | 435 | return -ENOMEM; |
cb471f14 | 436 | |
12ff54d2 AH |
437 | ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node); |
438 | if (ret < 0) { | |
cb471f14 EK |
439 | DRM_ERROR("failed to create id.\n"); |
440 | goto err_clear; | |
441 | } | |
12ff54d2 | 442 | property->prop_id = ret; |
cb471f14 | 443 | |
cbc4c33d YC |
444 | DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[0x%x]\n", |
445 | property->prop_id, property->cmd, (int)ippdrv); | |
cb471f14 EK |
446 | |
447 | /* stored property information and ippdrv in private data */ | |
5c76c5b1 | 448 | c_node->dev = dev; |
cb471f14 EK |
449 | c_node->property = *property; |
450 | c_node->state = IPP_STATE_IDLE; | |
451 | ||
452 | c_node->start_work = ipp_create_cmd_work(); | |
f0250458 | 453 | if (IS_ERR(c_node->start_work)) { |
cb471f14 | 454 | DRM_ERROR("failed to create start work.\n"); |
075436b0 | 455 | goto err_remove_id; |
cb471f14 EK |
456 | } |
457 | ||
458 | c_node->stop_work = ipp_create_cmd_work(); | |
f0250458 | 459 | if (IS_ERR(c_node->stop_work)) { |
cb471f14 EK |
460 | DRM_ERROR("failed to create stop work.\n"); |
461 | goto err_free_start; | |
462 | } | |
463 | ||
464 | c_node->event_work = ipp_create_event_work(); | |
f0250458 | 465 | if (IS_ERR(c_node->event_work)) { |
cb471f14 EK |
466 | DRM_ERROR("failed to create event work.\n"); |
467 | goto err_free_stop; | |
468 | } | |
469 | ||
4e4fe554 | 470 | mutex_init(&c_node->lock); |
cb471f14 EK |
471 | mutex_init(&c_node->mem_lock); |
472 | mutex_init(&c_node->event_lock); | |
473 | ||
474 | init_completion(&c_node->start_complete); | |
475 | init_completion(&c_node->stop_complete); | |
476 | ||
477 | for_each_ipp_ops(i) | |
478 | INIT_LIST_HEAD(&c_node->mem_list[i]); | |
479 | ||
480 | INIT_LIST_HEAD(&c_node->event_list); | |
7f5af059 | 481 | mutex_lock(&ippdrv->cmd_lock); |
cb471f14 | 482 | list_add_tail(&c_node->list, &ippdrv->cmd_list); |
7f5af059 | 483 | mutex_unlock(&ippdrv->cmd_lock); |
cb471f14 EK |
484 | |
485 | /* make dedicated state without m2m */ | |
486 | if (!ipp_is_m2m_cmd(property->cmd)) | |
487 | ippdrv->dedicated = true; | |
488 | ||
489 | return 0; | |
490 | ||
491 | err_free_stop: | |
492 | kfree(c_node->stop_work); | |
493 | err_free_start: | |
494 | kfree(c_node->start_work); | |
075436b0 YC |
495 | err_remove_id: |
496 | ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock, property->prop_id); | |
cb471f14 EK |
497 | err_clear: |
498 | kfree(c_node); | |
499 | return ret; | |
500 | } | |
501 | ||
075436b0 YC |
502 | static void ipp_clean_cmd_node(struct ipp_context *ctx, |
503 | struct drm_exynos_ipp_cmd_node *c_node) | |
cb471f14 | 504 | { |
6f7d48ea AH |
505 | /* cancel works */ |
506 | cancel_work_sync(&c_node->start_work->work); | |
507 | cancel_work_sync(&c_node->stop_work->work); | |
508 | cancel_work_sync(&c_node->event_work->work); | |
509 | ||
cb471f14 EK |
510 | /* delete list */ |
511 | list_del(&c_node->list); | |
512 | ||
075436b0 YC |
513 | ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock, |
514 | c_node->property.prop_id); | |
515 | ||
cb471f14 | 516 | /* destroy mutex */ |
4e4fe554 | 517 | mutex_destroy(&c_node->lock); |
cb471f14 EK |
518 | mutex_destroy(&c_node->mem_lock); |
519 | mutex_destroy(&c_node->event_lock); | |
520 | ||
521 | /* free command node */ | |
522 | kfree(c_node->start_work); | |
523 | kfree(c_node->stop_work); | |
524 | kfree(c_node->event_work); | |
525 | kfree(c_node); | |
526 | } | |
527 | ||
fb5ee01c | 528 | static bool ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node) |
cb471f14 | 529 | { |
fb5ee01c AH |
530 | switch (c_node->property.cmd) { |
531 | case IPP_CMD_WB: | |
532 | return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]); | |
533 | case IPP_CMD_OUTPUT: | |
534 | return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]); | |
535 | case IPP_CMD_M2M: | |
536 | default: | |
537 | return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]) && | |
538 | !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]); | |
cb471f14 | 539 | } |
cb471f14 EK |
540 | } |
541 | ||
542 | static struct drm_exynos_ipp_mem_node | |
543 | *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node, | |
544 | struct drm_exynos_ipp_queue_buf *qbuf) | |
545 | { | |
546 | struct drm_exynos_ipp_mem_node *m_node; | |
547 | struct list_head *head; | |
548 | int count = 0; | |
549 | ||
cbc4c33d | 550 | DRM_DEBUG_KMS("buf_id[%d]\n", qbuf->buf_id); |
cb471f14 EK |
551 | |
552 | /* source/destination memory list */ | |
553 | head = &c_node->mem_list[qbuf->ops_id]; | |
554 | ||
555 | /* find memory node from memory list */ | |
556 | list_for_each_entry(m_node, head, list) { | |
cbc4c33d | 557 | DRM_DEBUG_KMS("count[%d]m_node[0x%x]\n", count++, (int)m_node); |
cb471f14 EK |
558 | |
559 | /* compare buffer id */ | |
560 | if (m_node->buf_id == qbuf->buf_id) | |
561 | return m_node; | |
562 | } | |
563 | ||
564 | return NULL; | |
565 | } | |
566 | ||
567 | static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv, | |
568 | struct drm_exynos_ipp_cmd_node *c_node, | |
569 | struct drm_exynos_ipp_mem_node *m_node) | |
570 | { | |
571 | struct exynos_drm_ipp_ops *ops = NULL; | |
572 | int ret = 0; | |
573 | ||
cbc4c33d | 574 | DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node); |
cb471f14 EK |
575 | |
576 | if (!m_node) { | |
577 | DRM_ERROR("invalid queue node.\n"); | |
578 | return -EFAULT; | |
579 | } | |
580 | ||
cbc4c33d | 581 | DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id); |
cb471f14 EK |
582 | |
583 | /* get operations callback */ | |
584 | ops = ippdrv->ops[m_node->ops_id]; | |
585 | if (!ops) { | |
586 | DRM_ERROR("not support ops.\n"); | |
220db6fe | 587 | return -EFAULT; |
cb471f14 EK |
588 | } |
589 | ||
590 | /* set address and enable irq */ | |
591 | if (ops->set_addr) { | |
592 | ret = ops->set_addr(ippdrv->dev, &m_node->buf_info, | |
593 | m_node->buf_id, IPP_BUF_ENQUEUE); | |
594 | if (ret) { | |
595 | DRM_ERROR("failed to set addr.\n"); | |
220db6fe | 596 | return ret; |
cb471f14 EK |
597 | } |
598 | } | |
599 | ||
cb471f14 EK |
600 | return ret; |
601 | } | |
602 | ||
603 | static struct drm_exynos_ipp_mem_node | |
604 | *ipp_get_mem_node(struct drm_device *drm_dev, | |
605 | struct drm_file *file, | |
606 | struct drm_exynos_ipp_cmd_node *c_node, | |
607 | struct drm_exynos_ipp_queue_buf *qbuf) | |
608 | { | |
609 | struct drm_exynos_ipp_mem_node *m_node; | |
73b00232 | 610 | struct drm_exynos_ipp_buf_info *buf_info; |
cb471f14 EK |
611 | int i; |
612 | ||
cb471f14 | 613 | m_node = kzalloc(sizeof(*m_node), GFP_KERNEL); |
38bb5253 | 614 | if (!m_node) |
220db6fe | 615 | return ERR_PTR(-ENOMEM); |
cb471f14 | 616 | |
73b00232 | 617 | buf_info = &m_node->buf_info; |
cb471f14 EK |
618 | |
619 | /* operations, buffer id */ | |
620 | m_node->ops_id = qbuf->ops_id; | |
621 | m_node->prop_id = qbuf->prop_id; | |
622 | m_node->buf_id = qbuf->buf_id; | |
623 | ||
cbc4c33d YC |
624 | DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id); |
625 | DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id); | |
cb471f14 EK |
626 | |
627 | for_each_ipp_planar(i) { | |
cbc4c33d | 628 | DRM_DEBUG_KMS("i[%d]handle[0x%x]\n", i, qbuf->handle[i]); |
cb471f14 EK |
629 | |
630 | /* get dma address by handle */ | |
631 | if (qbuf->handle[i]) { | |
a8ea17f6 AH |
632 | dma_addr_t *addr; |
633 | ||
cb471f14 EK |
634 | addr = exynos_drm_gem_get_dma_addr(drm_dev, |
635 | qbuf->handle[i], file); | |
636 | if (IS_ERR(addr)) { | |
637 | DRM_ERROR("failed to get addr.\n"); | |
638 | goto err_clear; | |
639 | } | |
640 | ||
73b00232 AH |
641 | buf_info->handles[i] = qbuf->handle[i]; |
642 | buf_info->base[i] = *addr; | |
643 | DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%lx]\n", i, | |
644 | buf_info->base[i], buf_info->handles[i]); | |
cb471f14 EK |
645 | } |
646 | } | |
647 | ||
648 | m_node->filp = file; | |
220db6fe | 649 | mutex_lock(&c_node->mem_lock); |
cb471f14 | 650 | list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]); |
cb471f14 | 651 | mutex_unlock(&c_node->mem_lock); |
220db6fe | 652 | |
cb471f14 EK |
653 | return m_node; |
654 | ||
655 | err_clear: | |
656 | kfree(m_node); | |
cb471f14 EK |
657 | return ERR_PTR(-EFAULT); |
658 | } | |
659 | ||
660 | static int ipp_put_mem_node(struct drm_device *drm_dev, | |
661 | struct drm_exynos_ipp_cmd_node *c_node, | |
662 | struct drm_exynos_ipp_mem_node *m_node) | |
663 | { | |
664 | int i; | |
665 | ||
cbc4c33d | 666 | DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node); |
cb471f14 EK |
667 | |
668 | if (!m_node) { | |
669 | DRM_ERROR("invalid dequeue node.\n"); | |
670 | return -EFAULT; | |
671 | } | |
672 | ||
cbc4c33d | 673 | DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id); |
cb471f14 EK |
674 | |
675 | /* put gem buffer */ | |
676 | for_each_ipp_planar(i) { | |
677 | unsigned long handle = m_node->buf_info.handles[i]; | |
678 | if (handle) | |
679 | exynos_drm_gem_put_dma_addr(drm_dev, handle, | |
680 | m_node->filp); | |
681 | } | |
682 | ||
683 | /* delete list in queue */ | |
684 | list_del(&m_node->list); | |
685 | kfree(m_node); | |
686 | ||
cb471f14 EK |
687 | return 0; |
688 | } | |
689 | ||
690 | static void ipp_free_event(struct drm_pending_event *event) | |
691 | { | |
692 | kfree(event); | |
693 | } | |
694 | ||
695 | static int ipp_get_event(struct drm_device *drm_dev, | |
696 | struct drm_file *file, | |
697 | struct drm_exynos_ipp_cmd_node *c_node, | |
698 | struct drm_exynos_ipp_queue_buf *qbuf) | |
699 | { | |
700 | struct drm_exynos_ipp_send_event *e; | |
701 | unsigned long flags; | |
702 | ||
cbc4c33d | 703 | DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id); |
cb471f14 EK |
704 | |
705 | e = kzalloc(sizeof(*e), GFP_KERNEL); | |
cb471f14 | 706 | if (!e) { |
cb471f14 EK |
707 | spin_lock_irqsave(&drm_dev->event_lock, flags); |
708 | file->event_space += sizeof(e->event); | |
709 | spin_unlock_irqrestore(&drm_dev->event_lock, flags); | |
710 | return -ENOMEM; | |
711 | } | |
712 | ||
713 | /* make event */ | |
714 | e->event.base.type = DRM_EXYNOS_IPP_EVENT; | |
715 | e->event.base.length = sizeof(e->event); | |
716 | e->event.user_data = qbuf->user_data; | |
717 | e->event.prop_id = qbuf->prop_id; | |
718 | e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id; | |
719 | e->base.event = &e->event.base; | |
720 | e->base.file_priv = file; | |
721 | e->base.destroy = ipp_free_event; | |
4d520767 | 722 | mutex_lock(&c_node->event_lock); |
cb471f14 | 723 | list_add_tail(&e->base.link, &c_node->event_list); |
4d520767 | 724 | mutex_unlock(&c_node->event_lock); |
cb471f14 EK |
725 | |
726 | return 0; | |
727 | } | |
728 | ||
729 | static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node, | |
730 | struct drm_exynos_ipp_queue_buf *qbuf) | |
731 | { | |
732 | struct drm_exynos_ipp_send_event *e, *te; | |
733 | int count = 0; | |
734 | ||
4d520767 | 735 | mutex_lock(&c_node->event_lock); |
cb471f14 | 736 | list_for_each_entry_safe(e, te, &c_node->event_list, base.link) { |
cbc4c33d | 737 | DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e); |
cb471f14 EK |
738 | |
739 | /* | |
4fe25b82 | 740 | * qbuf == NULL condition means all event deletion. |
cb471f14 EK |
741 | * stop operations want to delete all event list. |
742 | * another case delete only same buf id. | |
743 | */ | |
744 | if (!qbuf) { | |
745 | /* delete list */ | |
746 | list_del(&e->base.link); | |
747 | kfree(e); | |
748 | } | |
749 | ||
750 | /* compare buffer id */ | |
751 | if (qbuf && (qbuf->buf_id == | |
752 | e->event.buf_id[EXYNOS_DRM_OPS_DST])) { | |
753 | /* delete list */ | |
754 | list_del(&e->base.link); | |
755 | kfree(e); | |
4d520767 | 756 | goto out_unlock; |
cb471f14 EK |
757 | } |
758 | } | |
4d520767 YC |
759 | |
760 | out_unlock: | |
761 | mutex_unlock(&c_node->event_lock); | |
762 | return; | |
cb471f14 EK |
763 | } |
764 | ||
0bc4a0aa | 765 | static void ipp_handle_cmd_work(struct device *dev, |
cb471f14 EK |
766 | struct exynos_drm_ippdrv *ippdrv, |
767 | struct drm_exynos_ipp_cmd_work *cmd_work, | |
768 | struct drm_exynos_ipp_cmd_node *c_node) | |
769 | { | |
770 | struct ipp_context *ctx = get_ipp_context(dev); | |
771 | ||
772 | cmd_work->ippdrv = ippdrv; | |
773 | cmd_work->c_node = c_node; | |
774 | queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work); | |
775 | } | |
776 | ||
777 | static int ipp_queue_buf_with_run(struct device *dev, | |
778 | struct drm_exynos_ipp_cmd_node *c_node, | |
779 | struct drm_exynos_ipp_mem_node *m_node, | |
780 | struct drm_exynos_ipp_queue_buf *qbuf) | |
781 | { | |
782 | struct exynos_drm_ippdrv *ippdrv; | |
783 | struct drm_exynos_ipp_property *property; | |
784 | struct exynos_drm_ipp_ops *ops; | |
785 | int ret; | |
786 | ||
cb471f14 | 787 | ippdrv = ipp_find_drv_by_handle(qbuf->prop_id); |
f0250458 | 788 | if (IS_ERR(ippdrv)) { |
cb471f14 EK |
789 | DRM_ERROR("failed to get ipp driver.\n"); |
790 | return -EFAULT; | |
791 | } | |
792 | ||
793 | ops = ippdrv->ops[qbuf->ops_id]; | |
794 | if (!ops) { | |
795 | DRM_ERROR("failed to get ops.\n"); | |
796 | return -EFAULT; | |
797 | } | |
798 | ||
799 | property = &c_node->property; | |
800 | ||
801 | if (c_node->state != IPP_STATE_START) { | |
cbc4c33d | 802 | DRM_DEBUG_KMS("bypass for invalid state.\n"); |
cb471f14 EK |
803 | return 0; |
804 | } | |
805 | ||
220db6fe | 806 | mutex_lock(&c_node->mem_lock); |
cb471f14 | 807 | if (!ipp_check_mem_list(c_node)) { |
220db6fe | 808 | mutex_unlock(&c_node->mem_lock); |
cbc4c33d | 809 | DRM_DEBUG_KMS("empty memory.\n"); |
cb471f14 EK |
810 | return 0; |
811 | } | |
812 | ||
813 | /* | |
814 | * If set destination buffer and enabled clock, | |
815 | * then m2m operations need start operations at queue_buf | |
816 | */ | |
817 | if (ipp_is_m2m_cmd(property->cmd)) { | |
818 | struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work; | |
819 | ||
820 | cmd_work->ctrl = IPP_CTRL_PLAY; | |
821 | ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node); | |
822 | } else { | |
823 | ret = ipp_set_mem_node(ippdrv, c_node, m_node); | |
824 | if (ret) { | |
220db6fe | 825 | mutex_unlock(&c_node->mem_lock); |
cb471f14 EK |
826 | DRM_ERROR("failed to set m node.\n"); |
827 | return ret; | |
828 | } | |
829 | } | |
220db6fe | 830 | mutex_unlock(&c_node->mem_lock); |
cb471f14 EK |
831 | |
832 | return 0; | |
833 | } | |
834 | ||
835 | static void ipp_clean_queue_buf(struct drm_device *drm_dev, | |
836 | struct drm_exynos_ipp_cmd_node *c_node, | |
837 | struct drm_exynos_ipp_queue_buf *qbuf) | |
838 | { | |
839 | struct drm_exynos_ipp_mem_node *m_node, *tm_node; | |
840 | ||
c66ce40b | 841 | /* delete list */ |
220db6fe | 842 | mutex_lock(&c_node->mem_lock); |
c66ce40b YC |
843 | list_for_each_entry_safe(m_node, tm_node, |
844 | &c_node->mem_list[qbuf->ops_id], list) { | |
845 | if (m_node->buf_id == qbuf->buf_id && | |
846 | m_node->ops_id == qbuf->ops_id) | |
847 | ipp_put_mem_node(drm_dev, c_node, m_node); | |
cb471f14 | 848 | } |
220db6fe | 849 | mutex_unlock(&c_node->mem_lock); |
cb471f14 EK |
850 | } |
851 | ||
852 | int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data, | |
853 | struct drm_file *file) | |
854 | { | |
855 | struct drm_exynos_file_private *file_priv = file->driver_priv; | |
5c76c5b1 | 856 | struct device *dev = file_priv->ipp_dev; |
cb471f14 EK |
857 | struct ipp_context *ctx = get_ipp_context(dev); |
858 | struct drm_exynos_ipp_queue_buf *qbuf = data; | |
859 | struct drm_exynos_ipp_cmd_node *c_node; | |
860 | struct drm_exynos_ipp_mem_node *m_node; | |
861 | int ret; | |
862 | ||
cb471f14 EK |
863 | if (!qbuf) { |
864 | DRM_ERROR("invalid buf parameter.\n"); | |
865 | return -EINVAL; | |
866 | } | |
867 | ||
868 | if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) { | |
869 | DRM_ERROR("invalid ops parameter.\n"); | |
870 | return -EINVAL; | |
871 | } | |
872 | ||
cbc4c33d YC |
873 | DRM_DEBUG_KMS("prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n", |
874 | qbuf->prop_id, qbuf->ops_id ? "dst" : "src", | |
cb471f14 EK |
875 | qbuf->buf_id, qbuf->buf_type); |
876 | ||
877 | /* find command node */ | |
878 | c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, | |
879 | qbuf->prop_id); | |
134f0e9b | 880 | if (!c_node) { |
cb471f14 | 881 | DRM_ERROR("failed to get command node.\n"); |
134f0e9b | 882 | return -ENODEV; |
cb471f14 EK |
883 | } |
884 | ||
885 | /* buffer control */ | |
886 | switch (qbuf->buf_type) { | |
887 | case IPP_BUF_ENQUEUE: | |
888 | /* get memory node */ | |
889 | m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf); | |
890 | if (IS_ERR(m_node)) { | |
891 | DRM_ERROR("failed to get m_node.\n"); | |
892 | return PTR_ERR(m_node); | |
893 | } | |
894 | ||
895 | /* | |
896 | * first step get event for destination buffer. | |
897 | * and second step when M2M case run with destination buffer | |
898 | * if needed. | |
899 | */ | |
900 | if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) { | |
901 | /* get event for destination buffer */ | |
902 | ret = ipp_get_event(drm_dev, file, c_node, qbuf); | |
903 | if (ret) { | |
904 | DRM_ERROR("failed to get event.\n"); | |
905 | goto err_clean_node; | |
906 | } | |
907 | ||
908 | /* | |
909 | * M2M case run play control for streaming feature. | |
910 | * other case set address and waiting. | |
911 | */ | |
912 | ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf); | |
913 | if (ret) { | |
914 | DRM_ERROR("failed to run command.\n"); | |
915 | goto err_clean_node; | |
916 | } | |
917 | } | |
918 | break; | |
919 | case IPP_BUF_DEQUEUE: | |
4e4fe554 | 920 | mutex_lock(&c_node->lock); |
cb471f14 EK |
921 | |
922 | /* put event for destination buffer */ | |
923 | if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) | |
924 | ipp_put_event(c_node, qbuf); | |
925 | ||
926 | ipp_clean_queue_buf(drm_dev, c_node, qbuf); | |
927 | ||
4e4fe554 | 928 | mutex_unlock(&c_node->lock); |
cb471f14 EK |
929 | break; |
930 | default: | |
931 | DRM_ERROR("invalid buffer control.\n"); | |
932 | return -EINVAL; | |
933 | } | |
934 | ||
935 | return 0; | |
936 | ||
937 | err_clean_node: | |
938 | DRM_ERROR("clean memory nodes.\n"); | |
939 | ||
940 | ipp_clean_queue_buf(drm_dev, c_node, qbuf); | |
941 | return ret; | |
942 | } | |
943 | ||
944 | static bool exynos_drm_ipp_check_valid(struct device *dev, | |
945 | enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state) | |
946 | { | |
cb471f14 EK |
947 | if (ctrl != IPP_CTRL_PLAY) { |
948 | if (pm_runtime_suspended(dev)) { | |
949 | DRM_ERROR("pm:runtime_suspended.\n"); | |
950 | goto err_status; | |
951 | } | |
952 | } | |
953 | ||
954 | switch (ctrl) { | |
955 | case IPP_CTRL_PLAY: | |
956 | if (state != IPP_STATE_IDLE) | |
957 | goto err_status; | |
958 | break; | |
959 | case IPP_CTRL_STOP: | |
960 | if (state == IPP_STATE_STOP) | |
961 | goto err_status; | |
962 | break; | |
963 | case IPP_CTRL_PAUSE: | |
964 | if (state != IPP_STATE_START) | |
965 | goto err_status; | |
966 | break; | |
967 | case IPP_CTRL_RESUME: | |
968 | if (state != IPP_STATE_STOP) | |
969 | goto err_status; | |
970 | break; | |
971 | default: | |
972 | DRM_ERROR("invalid state.\n"); | |
973 | goto err_status; | |
cb471f14 EK |
974 | } |
975 | ||
976 | return true; | |
977 | ||
978 | err_status: | |
979 | DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state); | |
980 | return false; | |
981 | } | |
982 | ||
983 | int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data, | |
984 | struct drm_file *file) | |
985 | { | |
986 | struct drm_exynos_file_private *file_priv = file->driver_priv; | |
cb471f14 | 987 | struct exynos_drm_ippdrv *ippdrv = NULL; |
5c76c5b1 | 988 | struct device *dev = file_priv->ipp_dev; |
cb471f14 EK |
989 | struct ipp_context *ctx = get_ipp_context(dev); |
990 | struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data; | |
991 | struct drm_exynos_ipp_cmd_work *cmd_work; | |
992 | struct drm_exynos_ipp_cmd_node *c_node; | |
993 | ||
cb471f14 EK |
994 | if (!ctx) { |
995 | DRM_ERROR("invalid context.\n"); | |
996 | return -EINVAL; | |
997 | } | |
998 | ||
999 | if (!cmd_ctrl) { | |
1000 | DRM_ERROR("invalid control parameter.\n"); | |
1001 | return -EINVAL; | |
1002 | } | |
1003 | ||
cbc4c33d | 1004 | DRM_DEBUG_KMS("ctrl[%d]prop_id[%d]\n", |
cb471f14 EK |
1005 | cmd_ctrl->ctrl, cmd_ctrl->prop_id); |
1006 | ||
1007 | ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id); | |
1008 | if (IS_ERR(ippdrv)) { | |
1009 | DRM_ERROR("failed to get ipp driver.\n"); | |
1010 | return PTR_ERR(ippdrv); | |
1011 | } | |
1012 | ||
1013 | c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, | |
1014 | cmd_ctrl->prop_id); | |
134f0e9b | 1015 | if (!c_node) { |
cb471f14 | 1016 | DRM_ERROR("invalid command node list.\n"); |
134f0e9b | 1017 | return -ENODEV; |
cb471f14 EK |
1018 | } |
1019 | ||
1020 | if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl, | |
1021 | c_node->state)) { | |
1022 | DRM_ERROR("invalid state.\n"); | |
1023 | return -EINVAL; | |
1024 | } | |
1025 | ||
1026 | switch (cmd_ctrl->ctrl) { | |
1027 | case IPP_CTRL_PLAY: | |
1028 | if (pm_runtime_suspended(ippdrv->dev)) | |
1029 | pm_runtime_get_sync(ippdrv->dev); | |
ebaf05c8 | 1030 | |
cb471f14 EK |
1031 | c_node->state = IPP_STATE_START; |
1032 | ||
1033 | cmd_work = c_node->start_work; | |
1034 | cmd_work->ctrl = cmd_ctrl->ctrl; | |
1035 | ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node); | |
cb471f14 EK |
1036 | break; |
1037 | case IPP_CTRL_STOP: | |
1038 | cmd_work = c_node->stop_work; | |
1039 | cmd_work->ctrl = cmd_ctrl->ctrl; | |
1040 | ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node); | |
1041 | ||
1042 | if (!wait_for_completion_timeout(&c_node->stop_complete, | |
1043 | msecs_to_jiffies(300))) { | |
1044 | DRM_ERROR("timeout stop:prop_id[%d]\n", | |
1045 | c_node->property.prop_id); | |
1046 | } | |
1047 | ||
1048 | c_node->state = IPP_STATE_STOP; | |
1049 | ippdrv->dedicated = false; | |
7f5af059 | 1050 | mutex_lock(&ippdrv->cmd_lock); |
075436b0 | 1051 | ipp_clean_cmd_node(ctx, c_node); |
cb471f14 EK |
1052 | |
1053 | if (list_empty(&ippdrv->cmd_list)) | |
1054 | pm_runtime_put_sync(ippdrv->dev); | |
7f5af059 | 1055 | mutex_unlock(&ippdrv->cmd_lock); |
cb471f14 EK |
1056 | break; |
1057 | case IPP_CTRL_PAUSE: | |
1058 | cmd_work = c_node->stop_work; | |
1059 | cmd_work->ctrl = cmd_ctrl->ctrl; | |
1060 | ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node); | |
1061 | ||
1062 | if (!wait_for_completion_timeout(&c_node->stop_complete, | |
1063 | msecs_to_jiffies(200))) { | |
1064 | DRM_ERROR("timeout stop:prop_id[%d]\n", | |
1065 | c_node->property.prop_id); | |
1066 | } | |
1067 | ||
1068 | c_node->state = IPP_STATE_STOP; | |
1069 | break; | |
1070 | case IPP_CTRL_RESUME: | |
1071 | c_node->state = IPP_STATE_START; | |
1072 | cmd_work = c_node->start_work; | |
1073 | cmd_work->ctrl = cmd_ctrl->ctrl; | |
1074 | ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node); | |
1075 | break; | |
1076 | default: | |
1077 | DRM_ERROR("could not support this state currently.\n"); | |
1078 | return -EINVAL; | |
1079 | } | |
1080 | ||
cbc4c33d | 1081 | DRM_DEBUG_KMS("done ctrl[%d]prop_id[%d]\n", |
cb471f14 EK |
1082 | cmd_ctrl->ctrl, cmd_ctrl->prop_id); |
1083 | ||
1084 | return 0; | |
1085 | } | |
1086 | ||
1087 | int exynos_drm_ippnb_register(struct notifier_block *nb) | |
1088 | { | |
1089 | return blocking_notifier_chain_register( | |
1090 | &exynos_drm_ippnb_list, nb); | |
1091 | } | |
1092 | ||
1093 | int exynos_drm_ippnb_unregister(struct notifier_block *nb) | |
1094 | { | |
1095 | return blocking_notifier_chain_unregister( | |
1096 | &exynos_drm_ippnb_list, nb); | |
1097 | } | |
1098 | ||
1099 | int exynos_drm_ippnb_send_event(unsigned long val, void *v) | |
1100 | { | |
1101 | return blocking_notifier_call_chain( | |
1102 | &exynos_drm_ippnb_list, val, v); | |
1103 | } | |
1104 | ||
1105 | static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv, | |
1106 | struct drm_exynos_ipp_property *property) | |
1107 | { | |
1108 | struct exynos_drm_ipp_ops *ops = NULL; | |
1109 | bool swap = false; | |
1110 | int ret, i; | |
1111 | ||
1112 | if (!property) { | |
1113 | DRM_ERROR("invalid property parameter.\n"); | |
1114 | return -EINVAL; | |
1115 | } | |
1116 | ||
cbc4c33d | 1117 | DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id); |
cb471f14 EK |
1118 | |
1119 | /* reset h/w block */ | |
1120 | if (ippdrv->reset && | |
1121 | ippdrv->reset(ippdrv->dev)) { | |
cb471f14 EK |
1122 | return -EINVAL; |
1123 | } | |
1124 | ||
1125 | /* set source,destination operations */ | |
1126 | for_each_ipp_ops(i) { | |
1127 | struct drm_exynos_ipp_config *config = | |
1128 | &property->config[i]; | |
1129 | ||
1130 | ops = ippdrv->ops[i]; | |
1131 | if (!ops || !config) { | |
1132 | DRM_ERROR("not support ops and config.\n"); | |
1133 | return -EINVAL; | |
1134 | } | |
1135 | ||
1136 | /* set format */ | |
1137 | if (ops->set_fmt) { | |
1138 | ret = ops->set_fmt(ippdrv->dev, config->fmt); | |
57ace335 | 1139 | if (ret) |
cb471f14 | 1140 | return ret; |
cb471f14 EK |
1141 | } |
1142 | ||
1143 | /* set transform for rotation, flip */ | |
1144 | if (ops->set_transf) { | |
1145 | ret = ops->set_transf(ippdrv->dev, config->degree, | |
1146 | config->flip, &swap); | |
57ace335 AH |
1147 | if (ret) |
1148 | return ret; | |
cb471f14 EK |
1149 | } |
1150 | ||
1151 | /* set size */ | |
1152 | if (ops->set_size) { | |
1153 | ret = ops->set_size(ippdrv->dev, swap, &config->pos, | |
1154 | &config->sz); | |
57ace335 | 1155 | if (ret) |
cb471f14 | 1156 | return ret; |
cb471f14 EK |
1157 | } |
1158 | } | |
1159 | ||
1160 | return 0; | |
1161 | } | |
1162 | ||
1163 | static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv, | |
1164 | struct drm_exynos_ipp_cmd_node *c_node) | |
1165 | { | |
1166 | struct drm_exynos_ipp_mem_node *m_node; | |
1167 | struct drm_exynos_ipp_property *property = &c_node->property; | |
1168 | struct list_head *head; | |
1169 | int ret, i; | |
1170 | ||
cbc4c33d | 1171 | DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id); |
cb471f14 EK |
1172 | |
1173 | /* store command info in ippdrv */ | |
7259c3d6 | 1174 | ippdrv->c_node = c_node; |
cb471f14 | 1175 | |
220db6fe | 1176 | mutex_lock(&c_node->mem_lock); |
cb471f14 | 1177 | if (!ipp_check_mem_list(c_node)) { |
cbc4c33d | 1178 | DRM_DEBUG_KMS("empty memory.\n"); |
220db6fe YC |
1179 | ret = -ENOMEM; |
1180 | goto err_unlock; | |
cb471f14 EK |
1181 | } |
1182 | ||
1183 | /* set current property in ippdrv */ | |
1184 | ret = ipp_set_property(ippdrv, property); | |
1185 | if (ret) { | |
1186 | DRM_ERROR("failed to set property.\n"); | |
7259c3d6 | 1187 | ippdrv->c_node = NULL; |
220db6fe | 1188 | goto err_unlock; |
cb471f14 EK |
1189 | } |
1190 | ||
1191 | /* check command */ | |
1192 | switch (property->cmd) { | |
1193 | case IPP_CMD_M2M: | |
1194 | for_each_ipp_ops(i) { | |
1195 | /* source/destination memory list */ | |
1196 | head = &c_node->mem_list[i]; | |
1197 | ||
1198 | m_node = list_first_entry(head, | |
1199 | struct drm_exynos_ipp_mem_node, list); | |
cb471f14 | 1200 | |
cbc4c33d | 1201 | DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node); |
cb471f14 EK |
1202 | |
1203 | ret = ipp_set_mem_node(ippdrv, c_node, m_node); | |
1204 | if (ret) { | |
1205 | DRM_ERROR("failed to set m node.\n"); | |
220db6fe | 1206 | goto err_unlock; |
cb471f14 EK |
1207 | } |
1208 | } | |
1209 | break; | |
1210 | case IPP_CMD_WB: | |
1211 | /* destination memory list */ | |
1212 | head = &c_node->mem_list[EXYNOS_DRM_OPS_DST]; | |
1213 | ||
1214 | list_for_each_entry(m_node, head, list) { | |
1215 | ret = ipp_set_mem_node(ippdrv, c_node, m_node); | |
1216 | if (ret) { | |
1217 | DRM_ERROR("failed to set m node.\n"); | |
220db6fe | 1218 | goto err_unlock; |
cb471f14 EK |
1219 | } |
1220 | } | |
1221 | break; | |
1222 | case IPP_CMD_OUTPUT: | |
1223 | /* source memory list */ | |
1224 | head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC]; | |
1225 | ||
1226 | list_for_each_entry(m_node, head, list) { | |
1227 | ret = ipp_set_mem_node(ippdrv, c_node, m_node); | |
1228 | if (ret) { | |
1229 | DRM_ERROR("failed to set m node.\n"); | |
220db6fe | 1230 | goto err_unlock; |
cb471f14 EK |
1231 | } |
1232 | } | |
1233 | break; | |
1234 | default: | |
1235 | DRM_ERROR("invalid operations.\n"); | |
220db6fe YC |
1236 | ret = -EINVAL; |
1237 | goto err_unlock; | |
cb471f14 | 1238 | } |
220db6fe | 1239 | mutex_unlock(&c_node->mem_lock); |
cb471f14 | 1240 | |
cbc4c33d | 1241 | DRM_DEBUG_KMS("cmd[%d]\n", property->cmd); |
cb471f14 EK |
1242 | |
1243 | /* start operations */ | |
1244 | if (ippdrv->start) { | |
1245 | ret = ippdrv->start(ippdrv->dev, property->cmd); | |
1246 | if (ret) { | |
1247 | DRM_ERROR("failed to start ops.\n"); | |
220db6fe | 1248 | ippdrv->c_node = NULL; |
cb471f14 EK |
1249 | return ret; |
1250 | } | |
1251 | } | |
1252 | ||
1253 | return 0; | |
220db6fe YC |
1254 | |
1255 | err_unlock: | |
1256 | mutex_unlock(&c_node->mem_lock); | |
1257 | ippdrv->c_node = NULL; | |
1258 | return ret; | |
cb471f14 EK |
1259 | } |
1260 | ||
1261 | static int ipp_stop_property(struct drm_device *drm_dev, | |
1262 | struct exynos_drm_ippdrv *ippdrv, | |
1263 | struct drm_exynos_ipp_cmd_node *c_node) | |
1264 | { | |
1265 | struct drm_exynos_ipp_mem_node *m_node, *tm_node; | |
1266 | struct drm_exynos_ipp_property *property = &c_node->property; | |
1267 | struct list_head *head; | |
1268 | int ret = 0, i; | |
1269 | ||
cbc4c33d | 1270 | DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id); |
cb471f14 EK |
1271 | |
1272 | /* put event */ | |
1273 | ipp_put_event(c_node, NULL); | |
1274 | ||
220db6fe YC |
1275 | mutex_lock(&c_node->mem_lock); |
1276 | ||
cb471f14 EK |
1277 | /* check command */ |
1278 | switch (property->cmd) { | |
1279 | case IPP_CMD_M2M: | |
1280 | for_each_ipp_ops(i) { | |
1281 | /* source/destination memory list */ | |
1282 | head = &c_node->mem_list[i]; | |
1283 | ||
cb471f14 EK |
1284 | list_for_each_entry_safe(m_node, tm_node, |
1285 | head, list) { | |
1286 | ret = ipp_put_mem_node(drm_dev, c_node, | |
1287 | m_node); | |
1288 | if (ret) { | |
1289 | DRM_ERROR("failed to put m_node.\n"); | |
1290 | goto err_clear; | |
1291 | } | |
1292 | } | |
1293 | } | |
1294 | break; | |
1295 | case IPP_CMD_WB: | |
1296 | /* destination memory list */ | |
1297 | head = &c_node->mem_list[EXYNOS_DRM_OPS_DST]; | |
1298 | ||
cb471f14 EK |
1299 | list_for_each_entry_safe(m_node, tm_node, head, list) { |
1300 | ret = ipp_put_mem_node(drm_dev, c_node, m_node); | |
1301 | if (ret) { | |
1302 | DRM_ERROR("failed to put m_node.\n"); | |
1303 | goto err_clear; | |
1304 | } | |
1305 | } | |
1306 | break; | |
1307 | case IPP_CMD_OUTPUT: | |
1308 | /* source memory list */ | |
1309 | head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC]; | |
1310 | ||
cb471f14 EK |
1311 | list_for_each_entry_safe(m_node, tm_node, head, list) { |
1312 | ret = ipp_put_mem_node(drm_dev, c_node, m_node); | |
1313 | if (ret) { | |
1314 | DRM_ERROR("failed to put m_node.\n"); | |
1315 | goto err_clear; | |
1316 | } | |
1317 | } | |
1318 | break; | |
1319 | default: | |
1320 | DRM_ERROR("invalid operations.\n"); | |
1321 | ret = -EINVAL; | |
1322 | goto err_clear; | |
1323 | } | |
1324 | ||
1325 | err_clear: | |
220db6fe YC |
1326 | mutex_unlock(&c_node->mem_lock); |
1327 | ||
cb471f14 EK |
1328 | /* stop operations */ |
1329 | if (ippdrv->stop) | |
1330 | ippdrv->stop(ippdrv->dev, property->cmd); | |
1331 | ||
1332 | return ret; | |
1333 | } | |
1334 | ||
1335 | void ipp_sched_cmd(struct work_struct *work) | |
1336 | { | |
1337 | struct drm_exynos_ipp_cmd_work *cmd_work = | |
1338 | (struct drm_exynos_ipp_cmd_work *)work; | |
1339 | struct exynos_drm_ippdrv *ippdrv; | |
1340 | struct drm_exynos_ipp_cmd_node *c_node; | |
1341 | struct drm_exynos_ipp_property *property; | |
1342 | int ret; | |
1343 | ||
cb471f14 EK |
1344 | ippdrv = cmd_work->ippdrv; |
1345 | if (!ippdrv) { | |
1346 | DRM_ERROR("invalid ippdrv list.\n"); | |
1347 | return; | |
1348 | } | |
1349 | ||
1350 | c_node = cmd_work->c_node; | |
1351 | if (!c_node) { | |
1352 | DRM_ERROR("invalid command node list.\n"); | |
1353 | return; | |
1354 | } | |
1355 | ||
4e4fe554 | 1356 | mutex_lock(&c_node->lock); |
cb471f14 EK |
1357 | |
1358 | property = &c_node->property; | |
cb471f14 EK |
1359 | |
1360 | switch (cmd_work->ctrl) { | |
1361 | case IPP_CTRL_PLAY: | |
1362 | case IPP_CTRL_RESUME: | |
1363 | ret = ipp_start_property(ippdrv, c_node); | |
1364 | if (ret) { | |
1365 | DRM_ERROR("failed to start property:prop_id[%d]\n", | |
1366 | c_node->property.prop_id); | |
1367 | goto err_unlock; | |
1368 | } | |
1369 | ||
1370 | /* | |
1371 | * M2M case supports wait_completion of transfer. | |
1372 | * because M2M case supports single unit operation | |
1373 | * with multiple queue. | |
1374 | * M2M need to wait completion of data transfer. | |
1375 | */ | |
1376 | if (ipp_is_m2m_cmd(property->cmd)) { | |
1377 | if (!wait_for_completion_timeout | |
1378 | (&c_node->start_complete, msecs_to_jiffies(200))) { | |
1379 | DRM_ERROR("timeout event:prop_id[%d]\n", | |
1380 | c_node->property.prop_id); | |
1381 | goto err_unlock; | |
1382 | } | |
1383 | } | |
1384 | break; | |
1385 | case IPP_CTRL_STOP: | |
1386 | case IPP_CTRL_PAUSE: | |
1387 | ret = ipp_stop_property(ippdrv->drm_dev, ippdrv, | |
1388 | c_node); | |
1389 | if (ret) { | |
1390 | DRM_ERROR("failed to stop property.\n"); | |
1391 | goto err_unlock; | |
1392 | } | |
1393 | ||
1394 | complete(&c_node->stop_complete); | |
1395 | break; | |
1396 | default: | |
1397 | DRM_ERROR("unknown control type\n"); | |
1398 | break; | |
1399 | } | |
1400 | ||
cbc4c33d | 1401 | DRM_DEBUG_KMS("ctrl[%d] done.\n", cmd_work->ctrl); |
cb471f14 EK |
1402 | |
1403 | err_unlock: | |
4e4fe554 | 1404 | mutex_unlock(&c_node->lock); |
cb471f14 EK |
1405 | } |
1406 | ||
1407 | static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv, | |
1408 | struct drm_exynos_ipp_cmd_node *c_node, int *buf_id) | |
1409 | { | |
1410 | struct drm_device *drm_dev = ippdrv->drm_dev; | |
1411 | struct drm_exynos_ipp_property *property = &c_node->property; | |
1412 | struct drm_exynos_ipp_mem_node *m_node; | |
1413 | struct drm_exynos_ipp_queue_buf qbuf; | |
1414 | struct drm_exynos_ipp_send_event *e; | |
1415 | struct list_head *head; | |
1416 | struct timeval now; | |
1417 | unsigned long flags; | |
1418 | u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, }; | |
1419 | int ret, i; | |
1420 | ||
1421 | for_each_ipp_ops(i) | |
cbc4c33d | 1422 | DRM_DEBUG_KMS("%s buf_id[%d]\n", i ? "dst" : "src", buf_id[i]); |
cb471f14 EK |
1423 | |
1424 | if (!drm_dev) { | |
1425 | DRM_ERROR("failed to get drm_dev.\n"); | |
1426 | return -EINVAL; | |
1427 | } | |
1428 | ||
1429 | if (!property) { | |
1430 | DRM_ERROR("failed to get property.\n"); | |
1431 | return -EINVAL; | |
1432 | } | |
1433 | ||
4d520767 | 1434 | mutex_lock(&c_node->event_lock); |
cb471f14 | 1435 | if (list_empty(&c_node->event_list)) { |
cbc4c33d | 1436 | DRM_DEBUG_KMS("event list is empty.\n"); |
4d520767 YC |
1437 | ret = 0; |
1438 | goto err_event_unlock; | |
cb471f14 EK |
1439 | } |
1440 | ||
220db6fe | 1441 | mutex_lock(&c_node->mem_lock); |
cb471f14 | 1442 | if (!ipp_check_mem_list(c_node)) { |
cbc4c33d | 1443 | DRM_DEBUG_KMS("empty memory.\n"); |
220db6fe YC |
1444 | ret = 0; |
1445 | goto err_mem_unlock; | |
cb471f14 EK |
1446 | } |
1447 | ||
1448 | /* check command */ | |
1449 | switch (property->cmd) { | |
1450 | case IPP_CMD_M2M: | |
1451 | for_each_ipp_ops(i) { | |
1452 | /* source/destination memory list */ | |
1453 | head = &c_node->mem_list[i]; | |
1454 | ||
1455 | m_node = list_first_entry(head, | |
1456 | struct drm_exynos_ipp_mem_node, list); | |
cb471f14 EK |
1457 | |
1458 | tbuf_id[i] = m_node->buf_id; | |
cbc4c33d | 1459 | DRM_DEBUG_KMS("%s buf_id[%d]\n", |
cb471f14 EK |
1460 | i ? "dst" : "src", tbuf_id[i]); |
1461 | ||
1462 | ret = ipp_put_mem_node(drm_dev, c_node, m_node); | |
1463 | if (ret) | |
1464 | DRM_ERROR("failed to put m_node.\n"); | |
1465 | } | |
1466 | break; | |
1467 | case IPP_CMD_WB: | |
1468 | /* clear buf for finding */ | |
1469 | memset(&qbuf, 0x0, sizeof(qbuf)); | |
1470 | qbuf.ops_id = EXYNOS_DRM_OPS_DST; | |
1471 | qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST]; | |
1472 | ||
1473 | /* get memory node entry */ | |
1474 | m_node = ipp_find_mem_node(c_node, &qbuf); | |
1475 | if (!m_node) { | |
1476 | DRM_ERROR("empty memory node.\n"); | |
220db6fe YC |
1477 | ret = -ENOMEM; |
1478 | goto err_mem_unlock; | |
cb471f14 EK |
1479 | } |
1480 | ||
1481 | tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id; | |
1482 | ||
1483 | ret = ipp_put_mem_node(drm_dev, c_node, m_node); | |
1484 | if (ret) | |
1485 | DRM_ERROR("failed to put m_node.\n"); | |
1486 | break; | |
1487 | case IPP_CMD_OUTPUT: | |
1488 | /* source memory list */ | |
1489 | head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC]; | |
1490 | ||
1491 | m_node = list_first_entry(head, | |
1492 | struct drm_exynos_ipp_mem_node, list); | |
cb471f14 EK |
1493 | |
1494 | tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id; | |
1495 | ||
1496 | ret = ipp_put_mem_node(drm_dev, c_node, m_node); | |
1497 | if (ret) | |
1498 | DRM_ERROR("failed to put m_node.\n"); | |
1499 | break; | |
1500 | default: | |
1501 | DRM_ERROR("invalid operations.\n"); | |
220db6fe YC |
1502 | ret = -EINVAL; |
1503 | goto err_mem_unlock; | |
cb471f14 | 1504 | } |
220db6fe | 1505 | mutex_unlock(&c_node->mem_lock); |
cb471f14 EK |
1506 | |
1507 | if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST]) | |
1508 | DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n", | |
1509 | tbuf_id[1], buf_id[1], property->prop_id); | |
1510 | ||
1511 | /* | |
1512 | * command node have event list of destination buffer | |
1513 | * If destination buffer enqueue to mem list, | |
1514 | * then we make event and link to event list tail. | |
1515 | * so, we get first event for first enqueued buffer. | |
1516 | */ | |
1517 | e = list_first_entry(&c_node->event_list, | |
1518 | struct drm_exynos_ipp_send_event, base.link); | |
1519 | ||
cb471f14 | 1520 | do_gettimeofday(&now); |
cbc4c33d | 1521 | DRM_DEBUG_KMS("tv_sec[%ld]tv_usec[%ld]\n", now.tv_sec, now.tv_usec); |
cb471f14 EK |
1522 | e->event.tv_sec = now.tv_sec; |
1523 | e->event.tv_usec = now.tv_usec; | |
1524 | e->event.prop_id = property->prop_id; | |
1525 | ||
1526 | /* set buffer id about source destination */ | |
1527 | for_each_ipp_ops(i) | |
1528 | e->event.buf_id[i] = tbuf_id[i]; | |
1529 | ||
1530 | spin_lock_irqsave(&drm_dev->event_lock, flags); | |
1531 | list_move_tail(&e->base.link, &e->base.file_priv->event_list); | |
1532 | wake_up_interruptible(&e->base.file_priv->event_wait); | |
1533 | spin_unlock_irqrestore(&drm_dev->event_lock, flags); | |
4d520767 | 1534 | mutex_unlock(&c_node->event_lock); |
cb471f14 | 1535 | |
cbc4c33d | 1536 | DRM_DEBUG_KMS("done cmd[%d]prop_id[%d]buf_id[%d]\n", |
cb471f14 EK |
1537 | property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]); |
1538 | ||
1539 | return 0; | |
220db6fe YC |
1540 | |
1541 | err_mem_unlock: | |
1542 | mutex_unlock(&c_node->mem_lock); | |
4d520767 YC |
1543 | err_event_unlock: |
1544 | mutex_unlock(&c_node->event_lock); | |
220db6fe | 1545 | return ret; |
cb471f14 EK |
1546 | } |
1547 | ||
1548 | void ipp_sched_event(struct work_struct *work) | |
1549 | { | |
1550 | struct drm_exynos_ipp_event_work *event_work = | |
1551 | (struct drm_exynos_ipp_event_work *)work; | |
1552 | struct exynos_drm_ippdrv *ippdrv; | |
1553 | struct drm_exynos_ipp_cmd_node *c_node; | |
1554 | int ret; | |
1555 | ||
1556 | if (!event_work) { | |
1557 | DRM_ERROR("failed to get event_work.\n"); | |
1558 | return; | |
1559 | } | |
1560 | ||
cbc4c33d | 1561 | DRM_DEBUG_KMS("buf_id[%d]\n", event_work->buf_id[EXYNOS_DRM_OPS_DST]); |
cb471f14 EK |
1562 | |
1563 | ippdrv = event_work->ippdrv; | |
1564 | if (!ippdrv) { | |
1565 | DRM_ERROR("failed to get ipp driver.\n"); | |
1566 | return; | |
1567 | } | |
1568 | ||
7259c3d6 | 1569 | c_node = ippdrv->c_node; |
cb471f14 EK |
1570 | if (!c_node) { |
1571 | DRM_ERROR("failed to get command node.\n"); | |
1572 | return; | |
1573 | } | |
1574 | ||
1575 | /* | |
1576 | * IPP supports command thread, event thread synchronization. | |
1577 | * If IPP close immediately from user land, then IPP make | |
1578 | * synchronization with command thread, so make complete event. | |
1579 | * or going out operations. | |
1580 | */ | |
1581 | if (c_node->state != IPP_STATE_START) { | |
cbc4c33d YC |
1582 | DRM_DEBUG_KMS("bypass state[%d]prop_id[%d]\n", |
1583 | c_node->state, c_node->property.prop_id); | |
cb471f14 EK |
1584 | goto err_completion; |
1585 | } | |
1586 | ||
cb471f14 EK |
1587 | ret = ipp_send_event(ippdrv, c_node, event_work->buf_id); |
1588 | if (ret) { | |
1589 | DRM_ERROR("failed to send event.\n"); | |
1590 | goto err_completion; | |
1591 | } | |
1592 | ||
1593 | err_completion: | |
1594 | if (ipp_is_m2m_cmd(c_node->property.cmd)) | |
1595 | complete(&c_node->start_complete); | |
cb471f14 EK |
1596 | } |
1597 | ||
1598 | static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev) | |
1599 | { | |
1600 | struct ipp_context *ctx = get_ipp_context(dev); | |
1601 | struct exynos_drm_ippdrv *ippdrv; | |
1602 | int ret, count = 0; | |
1603 | ||
cb471f14 EK |
1604 | /* get ipp driver entry */ |
1605 | list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { | |
1606 | ippdrv->drm_dev = drm_dev; | |
1607 | ||
12ff54d2 AH |
1608 | ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv); |
1609 | if (ret < 0) { | |
cb471f14 | 1610 | DRM_ERROR("failed to create id.\n"); |
075436b0 | 1611 | goto err; |
cb471f14 | 1612 | } |
12ff54d2 | 1613 | ippdrv->prop_list.ipp_id = ret; |
cb471f14 | 1614 | |
cbc4c33d | 1615 | DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n", |
12ff54d2 | 1616 | count++, (int)ippdrv, ret); |
cb471f14 EK |
1617 | |
1618 | /* store parent device for node */ | |
1619 | ippdrv->parent_dev = dev; | |
1620 | ||
1621 | /* store event work queue and handler */ | |
1622 | ippdrv->event_workq = ctx->event_workq; | |
1623 | ippdrv->sched_event = ipp_sched_event; | |
1624 | INIT_LIST_HEAD(&ippdrv->cmd_list); | |
7f5af059 | 1625 | mutex_init(&ippdrv->cmd_lock); |
c12e2617 EK |
1626 | |
1627 | if (is_drm_iommu_supported(drm_dev)) { | |
1628 | ret = drm_iommu_attach_device(drm_dev, ippdrv->dev); | |
1629 | if (ret) { | |
1630 | DRM_ERROR("failed to activate iommu\n"); | |
075436b0 | 1631 | goto err; |
c12e2617 EK |
1632 | } |
1633 | } | |
cb471f14 EK |
1634 | } |
1635 | ||
1636 | return 0; | |
1637 | ||
075436b0 | 1638 | err: |
c12e2617 | 1639 | /* get ipp driver entry */ |
075436b0 YC |
1640 | list_for_each_entry_continue_reverse(ippdrv, &exynos_drm_ippdrv_list, |
1641 | drv_list) { | |
c12e2617 EK |
1642 | if (is_drm_iommu_supported(drm_dev)) |
1643 | drm_iommu_detach_device(drm_dev, ippdrv->dev); | |
1644 | ||
075436b0 YC |
1645 | ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock, |
1646 | ippdrv->prop_list.ipp_id); | |
1647 | } | |
1648 | ||
cb471f14 EK |
1649 | return ret; |
1650 | } | |
1651 | ||
1652 | static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev) | |
1653 | { | |
1654 | struct exynos_drm_ippdrv *ippdrv; | |
075436b0 | 1655 | struct ipp_context *ctx = get_ipp_context(dev); |
cb471f14 | 1656 | |
cb471f14 EK |
1657 | /* get ipp driver entry */ |
1658 | list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { | |
c12e2617 EK |
1659 | if (is_drm_iommu_supported(drm_dev)) |
1660 | drm_iommu_detach_device(drm_dev, ippdrv->dev); | |
1661 | ||
075436b0 YC |
1662 | ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock, |
1663 | ippdrv->prop_list.ipp_id); | |
1664 | ||
cb471f14 EK |
1665 | ippdrv->drm_dev = NULL; |
1666 | exynos_drm_ippdrv_unregister(ippdrv); | |
1667 | } | |
1668 | } | |
1669 | ||
1670 | static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev, | |
1671 | struct drm_file *file) | |
1672 | { | |
1673 | struct drm_exynos_file_private *file_priv = file->driver_priv; | |
cb471f14 | 1674 | |
5c76c5b1 | 1675 | file_priv->ipp_dev = dev; |
cb471f14 | 1676 | |
5c76c5b1 | 1677 | DRM_DEBUG_KMS("done priv[0x%x]\n", (int)dev); |
cb471f14 EK |
1678 | |
1679 | return 0; | |
1680 | } | |
1681 | ||
1682 | static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev, | |
1683 | struct drm_file *file) | |
1684 | { | |
1685 | struct drm_exynos_file_private *file_priv = file->driver_priv; | |
cb471f14 | 1686 | struct exynos_drm_ippdrv *ippdrv = NULL; |
075436b0 | 1687 | struct ipp_context *ctx = get_ipp_context(dev); |
cb471f14 EK |
1688 | struct drm_exynos_ipp_cmd_node *c_node, *tc_node; |
1689 | int count = 0; | |
1690 | ||
5c76c5b1 | 1691 | DRM_DEBUG_KMS("for priv[0x%x]\n", (int)file_priv->ipp_dev); |
cb471f14 | 1692 | |
cb471f14 | 1693 | list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { |
7f5af059 | 1694 | mutex_lock(&ippdrv->cmd_lock); |
cb471f14 EK |
1695 | list_for_each_entry_safe(c_node, tc_node, |
1696 | &ippdrv->cmd_list, list) { | |
cbc4c33d YC |
1697 | DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", |
1698 | count++, (int)ippdrv); | |
cb471f14 | 1699 | |
5c76c5b1 | 1700 | if (c_node->dev == file_priv->ipp_dev) { |
cb471f14 EK |
1701 | /* |
1702 | * userland goto unnormal state. process killed. | |
1703 | * and close the file. | |
1704 | * so, IPP didn't called stop cmd ctrl. | |
1705 | * so, we are make stop operation in this state. | |
1706 | */ | |
1707 | if (c_node->state == IPP_STATE_START) { | |
1708 | ipp_stop_property(drm_dev, ippdrv, | |
1709 | c_node); | |
1710 | c_node->state = IPP_STATE_STOP; | |
1711 | } | |
1712 | ||
1713 | ippdrv->dedicated = false; | |
075436b0 | 1714 | ipp_clean_cmd_node(ctx, c_node); |
cb471f14 EK |
1715 | if (list_empty(&ippdrv->cmd_list)) |
1716 | pm_runtime_put_sync(ippdrv->dev); | |
1717 | } | |
1718 | } | |
7f5af059 | 1719 | mutex_unlock(&ippdrv->cmd_lock); |
cb471f14 EK |
1720 | } |
1721 | ||
cb471f14 EK |
1722 | return; |
1723 | } | |
1724 | ||
56550d94 | 1725 | static int ipp_probe(struct platform_device *pdev) |
cb471f14 EK |
1726 | { |
1727 | struct device *dev = &pdev->dev; | |
1728 | struct ipp_context *ctx; | |
1729 | struct exynos_drm_subdrv *subdrv; | |
1730 | int ret; | |
1731 | ||
d873ab99 | 1732 | ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); |
cb471f14 EK |
1733 | if (!ctx) |
1734 | return -ENOMEM; | |
1735 | ||
cb471f14 EK |
1736 | mutex_init(&ctx->ipp_lock); |
1737 | mutex_init(&ctx->prop_lock); | |
1738 | ||
1739 | idr_init(&ctx->ipp_idr); | |
1740 | idr_init(&ctx->prop_idr); | |
1741 | ||
1742 | /* | |
1743 | * create single thread for ipp event | |
1744 | * IPP supports event thread for IPP drivers. | |
1745 | * IPP driver send event_work to this thread. | |
1746 | * and IPP event thread send event to user process. | |
1747 | */ | |
1748 | ctx->event_workq = create_singlethread_workqueue("ipp_event"); | |
1749 | if (!ctx->event_workq) { | |
1750 | dev_err(dev, "failed to create event workqueue\n"); | |
bfb6ed26 | 1751 | return -EINVAL; |
cb471f14 EK |
1752 | } |
1753 | ||
1754 | /* | |
1755 | * create single thread for ipp command | |
1756 | * IPP supports command thread for user process. | |
1757 | * user process make command node using set property ioctl. | |
1758 | * and make start_work and send this work to command thread. | |
1759 | * and then this command thread start property. | |
1760 | */ | |
1761 | ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd"); | |
1762 | if (!ctx->cmd_workq) { | |
1763 | dev_err(dev, "failed to create cmd workqueue\n"); | |
1764 | ret = -EINVAL; | |
1765 | goto err_event_workq; | |
1766 | } | |
1767 | ||
1768 | /* set sub driver informations */ | |
1769 | subdrv = &ctx->subdrv; | |
1770 | subdrv->dev = dev; | |
1771 | subdrv->probe = ipp_subdrv_probe; | |
1772 | subdrv->remove = ipp_subdrv_remove; | |
1773 | subdrv->open = ipp_subdrv_open; | |
1774 | subdrv->close = ipp_subdrv_close; | |
1775 | ||
1776 | platform_set_drvdata(pdev, ctx); | |
1777 | ||
1778 | ret = exynos_drm_subdrv_register(subdrv); | |
1779 | if (ret < 0) { | |
1780 | DRM_ERROR("failed to register drm ipp device.\n"); | |
1781 | goto err_cmd_workq; | |
1782 | } | |
1783 | ||
d873ab99 | 1784 | dev_info(dev, "drm ipp registered successfully.\n"); |
cb471f14 EK |
1785 | |
1786 | return 0; | |
1787 | ||
1788 | err_cmd_workq: | |
1789 | destroy_workqueue(ctx->cmd_workq); | |
1790 | err_event_workq: | |
1791 | destroy_workqueue(ctx->event_workq); | |
cb471f14 EK |
1792 | return ret; |
1793 | } | |
1794 | ||
56550d94 | 1795 | static int ipp_remove(struct platform_device *pdev) |
cb471f14 EK |
1796 | { |
1797 | struct ipp_context *ctx = platform_get_drvdata(pdev); | |
1798 | ||
cb471f14 EK |
1799 | /* unregister sub driver */ |
1800 | exynos_drm_subdrv_unregister(&ctx->subdrv); | |
1801 | ||
1802 | /* remove,destroy ipp idr */ | |
cb471f14 EK |
1803 | idr_destroy(&ctx->ipp_idr); |
1804 | idr_destroy(&ctx->prop_idr); | |
1805 | ||
1806 | mutex_destroy(&ctx->ipp_lock); | |
1807 | mutex_destroy(&ctx->prop_lock); | |
1808 | ||
1809 | /* destroy command, event work queue */ | |
1810 | destroy_workqueue(ctx->cmd_workq); | |
1811 | destroy_workqueue(ctx->event_workq); | |
1812 | ||
cb471f14 EK |
1813 | return 0; |
1814 | } | |
1815 | ||
cb471f14 EK |
1816 | struct platform_driver ipp_driver = { |
1817 | .probe = ipp_probe, | |
56550d94 | 1818 | .remove = ipp_remove, |
cb471f14 EK |
1819 | .driver = { |
1820 | .name = "exynos-drm-ipp", | |
1821 | .owner = THIS_MODULE, | |
cb471f14 EK |
1822 | }, |
1823 | }; | |
1824 |