input / PM: Replace CONFIG_PM_RUNTIME with CONFIG_PM
[deliverable/linux.git] / drivers / media / platform / s5p-mfc / s5p_mfc.c
CommitLineData
af935746
KD
1/*
2 * Samsung S5P Multi Format Codec v 5.1
3 *
4 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
5 * Kamil Debski, <k.debski@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13#include <linux/clk.h>
14#include <linux/delay.h>
15#include <linux/interrupt.h>
16#include <linux/io.h>
17#include <linux/module.h>
18#include <linux/platform_device.h>
19#include <linux/sched.h>
20#include <linux/slab.h>
af935746 21#include <linux/videodev2.h>
f9f715a9 22#include <media/v4l2-event.h>
af935746 23#include <linux/workqueue.h>
b27a23be 24#include <linux/of.h>
af935746 25#include <media/videobuf2-core.h>
43a1ea1f 26#include "s5p_mfc_common.h"
af935746
KD
27#include "s5p_mfc_ctrl.h"
28#include "s5p_mfc_debug.h"
29#include "s5p_mfc_dec.h"
30#include "s5p_mfc_enc.h"
31#include "s5p_mfc_intr.h"
43a1ea1f
AK
32#include "s5p_mfc_opr.h"
33#include "s5p_mfc_cmd.h"
af935746 34#include "s5p_mfc_pm.h"
af935746
KD
35
36#define S5P_MFC_NAME "s5p-mfc"
37#define S5P_MFC_DEC_NAME "s5p-mfc-dec"
38#define S5P_MFC_ENC_NAME "s5p-mfc-enc"
39
139adba6
MCC
40int mfc_debug_level;
41module_param_named(debug, mfc_debug_level, int, S_IRUGO | S_IWUSR);
af935746
KD
42MODULE_PARM_DESC(debug, "Debug level - higher value produces more verbose messages");
43
44/* Helper functions for interrupt processing */
7fb89eca 45
af935746 46/* Remove from hw execution round robin */
7fb89eca 47void clear_work_bit(struct s5p_mfc_ctx *ctx)
af935746
KD
48{
49 struct s5p_mfc_dev *dev = ctx->dev;
50
51 spin_lock(&dev->condlock);
7fb89eca 52 __clear_bit(ctx->num, &dev->ctx_work_bits);
af935746
KD
53 spin_unlock(&dev->condlock);
54}
55
7fb89eca
AH
56/* Add to hw execution round robin */
57void set_work_bit(struct s5p_mfc_ctx *ctx)
58{
59 struct s5p_mfc_dev *dev = ctx->dev;
60
61 spin_lock(&dev->condlock);
62 __set_bit(ctx->num, &dev->ctx_work_bits);
63 spin_unlock(&dev->condlock);
64}
65
66/* Remove from hw execution round robin */
67void clear_work_bit_irqsave(struct s5p_mfc_ctx *ctx)
68{
69 struct s5p_mfc_dev *dev = ctx->dev;
70 unsigned long flags;
71
72 spin_lock_irqsave(&dev->condlock, flags);
73 __clear_bit(ctx->num, &dev->ctx_work_bits);
74 spin_unlock_irqrestore(&dev->condlock, flags);
75}
76
77/* Add to hw execution round robin */
78void set_work_bit_irqsave(struct s5p_mfc_ctx *ctx)
79{
80 struct s5p_mfc_dev *dev = ctx->dev;
81 unsigned long flags;
82
83 spin_lock_irqsave(&dev->condlock, flags);
84 __set_bit(ctx->num, &dev->ctx_work_bits);
85 spin_unlock_irqrestore(&dev->condlock, flags);
86}
87
af935746
KD
88/* Wake up context wait_queue */
89static void wake_up_ctx(struct s5p_mfc_ctx *ctx, unsigned int reason,
90 unsigned int err)
91{
92 ctx->int_cond = 1;
93 ctx->int_type = reason;
94 ctx->int_err = err;
95 wake_up(&ctx->queue);
96}
97
98/* Wake up device wait_queue */
99static void wake_up_dev(struct s5p_mfc_dev *dev, unsigned int reason,
100 unsigned int err)
101{
102 dev->int_cond = 1;
103 dev->int_type = reason;
104 dev->int_err = err;
105 wake_up(&dev->queue);
106}
107
a13bba4f 108static void s5p_mfc_watchdog(unsigned long arg)
af935746
KD
109{
110 struct s5p_mfc_dev *dev = (struct s5p_mfc_dev *)arg;
111
112 if (test_bit(0, &dev->hw_lock))
113 atomic_inc(&dev->watchdog_cnt);
114 if (atomic_read(&dev->watchdog_cnt) >= MFC_WATCHDOG_CNT) {
115 /* This means that hw is busy and no interrupts were
116 * generated by hw for the Nth time of running this
117 * watchdog timer. This usually means a serious hw
118 * error. Now it is time to kill all instances and
119 * reset the MFC. */
120 mfc_err("Time out during waiting for HW\n");
121 queue_work(dev->watchdog_workqueue, &dev->watchdog_work);
122 }
123 dev->watchdog_timer.expires = jiffies +
124 msecs_to_jiffies(MFC_WATCHDOG_INTERVAL);
125 add_timer(&dev->watchdog_timer);
126}
127
128static void s5p_mfc_watchdog_worker(struct work_struct *work)
129{
130 struct s5p_mfc_dev *dev;
131 struct s5p_mfc_ctx *ctx;
132 unsigned long flags;
133 int mutex_locked;
134 int i, ret;
135
136 dev = container_of(work, struct s5p_mfc_dev, watchdog_work);
137
138 mfc_err("Driver timeout error handling\n");
139 /* Lock the mutex that protects open and release.
140 * This is necessary as they may load and unload firmware. */
141 mutex_locked = mutex_trylock(&dev->mfc_mutex);
142 if (!mutex_locked)
143 mfc_err("Error: some instance may be closing/opening\n");
144 spin_lock_irqsave(&dev->irqlock, flags);
145
146 s5p_mfc_clock_off();
147
148 for (i = 0; i < MFC_NUM_CONTEXTS; i++) {
149 ctx = dev->ctx[i];
150 if (!ctx)
151 continue;
152 ctx->state = MFCINST_ERROR;
e2c3be2a
KD
153 s5p_mfc_hw_call_void(dev->mfc_ops, cleanup_queue,
154 &ctx->dst_queue, &ctx->vq_dst);
155 s5p_mfc_hw_call_void(dev->mfc_ops, cleanup_queue,
156 &ctx->src_queue, &ctx->vq_src);
af935746 157 clear_work_bit(ctx);
43a1ea1f 158 wake_up_ctx(ctx, S5P_MFC_R2H_CMD_ERR_RET, 0);
af935746
KD
159 }
160 clear_bit(0, &dev->hw_lock);
161 spin_unlock_irqrestore(&dev->irqlock, flags);
162 /* Double check if there is at least one instance running.
163 * If no instance is in memory than no firmware should be present */
164 if (dev->num_inst > 0) {
46075006 165 ret = s5p_mfc_load_firmware(dev);
af935746
KD
166 if (ret) {
167 mfc_err("Failed to reload FW\n");
168 goto unlock;
169 }
170 s5p_mfc_clock_on();
171 ret = s5p_mfc_init_hw(dev);
172 if (ret)
173 mfc_err("Failed to reinit FW\n");
174 }
175unlock:
176 if (mutex_locked)
177 mutex_unlock(&dev->mfc_mutex);
178}
179
af935746
KD
180static void s5p_mfc_clear_int_flags(struct s5p_mfc_dev *dev)
181{
182 mfc_write(dev, 0, S5P_FIMV_RISC_HOST_INT);
183 mfc_write(dev, 0, S5P_FIMV_RISC2HOST_CMD);
184 mfc_write(dev, 0xffff, S5P_FIMV_SI_RTN_CHID);
185}
186
187static void s5p_mfc_handle_frame_all_extracted(struct s5p_mfc_ctx *ctx)
188{
189 struct s5p_mfc_buf *dst_buf;
43a1ea1f 190 struct s5p_mfc_dev *dev = ctx->dev;
af935746
KD
191
192 ctx->state = MFCINST_FINISHED;
193 ctx->sequence++;
194 while (!list_empty(&ctx->dst_queue)) {
195 dst_buf = list_entry(ctx->dst_queue.next,
196 struct s5p_mfc_buf, list);
197 mfc_debug(2, "Cleaning up buffer: %d\n",
198 dst_buf->b->v4l2_buf.index);
199 vb2_set_plane_payload(dst_buf->b, 0, 0);
200 vb2_set_plane_payload(dst_buf->b, 1, 0);
201 list_del(&dst_buf->list);
202 ctx->dst_queue_cnt--;
203 dst_buf->b->v4l2_buf.sequence = (ctx->sequence++);
204
43a1ea1f
AK
205 if (s5p_mfc_hw_call(dev->mfc_ops, get_pic_type_top, ctx) ==
206 s5p_mfc_hw_call(dev->mfc_ops, get_pic_type_bot, ctx))
af935746
KD
207 dst_buf->b->v4l2_buf.field = V4L2_FIELD_NONE;
208 else
209 dst_buf->b->v4l2_buf.field = V4L2_FIELD_INTERLACED;
210
211 ctx->dec_dst_flag &= ~(1 << dst_buf->b->v4l2_buf.index);
212 vb2_buffer_done(dst_buf->b, VB2_BUF_STATE_DONE);
213 }
214}
215
216static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx)
217{
218 struct s5p_mfc_dev *dev = ctx->dev;
219 struct s5p_mfc_buf *dst_buf, *src_buf;
43a1ea1f
AK
220 size_t dec_y_addr;
221 unsigned int frame_type;
222
223 dec_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dec_y_adr, dev);
224 frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev);
af935746
KD
225
226 /* Copy timestamp / timecode from decoded src to dst and set
39c1cb2b 227 appropriate flags */
af935746
KD
228 src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
229 list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
ba7fcb0c 230 if (vb2_dma_contig_plane_dma_addr(dst_buf->b, 0) == dec_y_addr) {
56006017
KD
231 dst_buf->b->v4l2_buf.timecode =
232 src_buf->b->v4l2_buf.timecode;
233 dst_buf->b->v4l2_buf.timestamp =
234 src_buf->b->v4l2_buf.timestamp;
309f4d62
SA
235 dst_buf->b->v4l2_buf.flags &=
236 ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
237 dst_buf->b->v4l2_buf.flags |=
238 src_buf->b->v4l2_buf.flags
239 & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
af935746
KD
240 switch (frame_type) {
241 case S5P_FIMV_DECODE_FRAME_I_FRAME:
242 dst_buf->b->v4l2_buf.flags |=
243 V4L2_BUF_FLAG_KEYFRAME;
244 break;
245 case S5P_FIMV_DECODE_FRAME_P_FRAME:
246 dst_buf->b->v4l2_buf.flags |=
247 V4L2_BUF_FLAG_PFRAME;
248 break;
249 case S5P_FIMV_DECODE_FRAME_B_FRAME:
250 dst_buf->b->v4l2_buf.flags |=
251 V4L2_BUF_FLAG_BFRAME;
252 break;
253 }
254 break;
255 }
256 }
257}
258
259static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
260{
261 struct s5p_mfc_dev *dev = ctx->dev;
262 struct s5p_mfc_buf *dst_buf;
43a1ea1f
AK
263 size_t dspl_y_addr;
264 unsigned int frame_type;
af935746 265
43a1ea1f 266 dspl_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev);
7c672812
SS
267 if (IS_MFCV6_PLUS(dev))
268 frame_type = s5p_mfc_hw_call(dev->mfc_ops,
269 get_disp_frame_type, ctx);
270 else
271 frame_type = s5p_mfc_hw_call(dev->mfc_ops,
272 get_dec_frame_type, dev);
43a1ea1f 273
af935746
KD
274 /* If frame is same as previous then skip and do not dequeue */
275 if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED) {
276 if (!ctx->after_packed_pb)
277 ctx->sequence++;
278 ctx->after_packed_pb = 0;
279 return;
280 }
281 ctx->sequence++;
282 /* The MFC returns address of the buffer, now we have to
283 * check which videobuf does it correspond to */
284 list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
285 /* Check if this is the buffer we're looking for */
ba7fcb0c 286 if (vb2_dma_contig_plane_dma_addr(dst_buf->b, 0) == dspl_y_addr) {
af935746
KD
287 list_del(&dst_buf->list);
288 ctx->dst_queue_cnt--;
289 dst_buf->b->v4l2_buf.sequence = ctx->sequence;
43a1ea1f
AK
290 if (s5p_mfc_hw_call(dev->mfc_ops,
291 get_pic_type_top, ctx) ==
292 s5p_mfc_hw_call(dev->mfc_ops,
293 get_pic_type_bot, ctx))
af935746
KD
294 dst_buf->b->v4l2_buf.field = V4L2_FIELD_NONE;
295 else
296 dst_buf->b->v4l2_buf.field =
297 V4L2_FIELD_INTERLACED;
298 vb2_set_plane_payload(dst_buf->b, 0, ctx->luma_size);
299 vb2_set_plane_payload(dst_buf->b, 1, ctx->chroma_size);
300 clear_bit(dst_buf->b->v4l2_buf.index,
301 &ctx->dec_dst_flag);
302
303 vb2_buffer_done(dst_buf->b,
304 err ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
305
af935746
KD
306 break;
307 }
308 }
309}
310
311/* Handle frame decoding interrupt */
312static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx,
313 unsigned int reason, unsigned int err)
314{
315 struct s5p_mfc_dev *dev = ctx->dev;
316 unsigned int dst_frame_status;
a0517f5d 317 unsigned int dec_frame_status;
af935746
KD
318 struct s5p_mfc_buf *src_buf;
319 unsigned long flags;
320 unsigned int res_change;
321
43a1ea1f 322 dst_frame_status = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_status, dev)
af935746 323 & S5P_FIMV_DEC_STATUS_DECODING_STATUS_MASK;
a0517f5d
PO
324 dec_frame_status = s5p_mfc_hw_call(dev->mfc_ops, get_dec_status, dev)
325 & S5P_FIMV_DEC_STATUS_DECODING_STATUS_MASK;
f96f3cfa
JP
326 res_change = (s5p_mfc_hw_call(dev->mfc_ops, get_dspl_status, dev)
327 & S5P_FIMV_DEC_STATUS_RESOLUTION_MASK)
328 >> S5P_FIMV_DEC_STATUS_RESOLUTION_SHIFT;
af935746
KD
329 mfc_debug(2, "Frame Status: %x\n", dst_frame_status);
330 if (ctx->state == MFCINST_RES_CHANGE_INIT)
331 ctx->state = MFCINST_RES_CHANGE_FLUSH;
f96f3cfa
JP
332 if (res_change == S5P_FIMV_RES_INCREASE ||
333 res_change == S5P_FIMV_RES_DECREASE) {
af935746 334 ctx->state = MFCINST_RES_CHANGE_INIT;
e2c3be2a 335 s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
af935746
KD
336 wake_up_ctx(ctx, reason, err);
337 if (test_and_clear_bit(0, &dev->hw_lock) == 0)
338 BUG();
339 s5p_mfc_clock_off();
e2c3be2a 340 s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
af935746
KD
341 return;
342 }
343 if (ctx->dpb_flush_flag)
344 ctx->dpb_flush_flag = 0;
345
346 spin_lock_irqsave(&dev->irqlock, flags);
347 /* All frames remaining in the buffer have been extracted */
348 if (dst_frame_status == S5P_FIMV_DEC_STATUS_DECODING_EMPTY) {
349 if (ctx->state == MFCINST_RES_CHANGE_FLUSH) {
0520e4cc
PO
350 static const struct v4l2_event ev_src_ch = {
351 .type = V4L2_EVENT_SOURCE_CHANGE,
352 .u.src_change.changes =
353 V4L2_EVENT_SRC_CH_RESOLUTION,
354 };
355
af935746
KD
356 s5p_mfc_handle_frame_all_extracted(ctx);
357 ctx->state = MFCINST_RES_CHANGE_END;
0520e4cc
PO
358 v4l2_event_queue_fh(&ctx->fh, &ev_src_ch);
359
af935746
KD
360 goto leave_handle_frame;
361 } else {
362 s5p_mfc_handle_frame_all_extracted(ctx);
363 }
364 }
365
a0517f5d 366 if (dec_frame_status == S5P_FIMV_DEC_STATUS_DECODING_DISPLAY)
af935746
KD
367 s5p_mfc_handle_frame_copy_time(ctx);
368
369 /* A frame has been decoded and is in the buffer */
370 if (dst_frame_status == S5P_FIMV_DEC_STATUS_DISPLAY_ONLY ||
371 dst_frame_status == S5P_FIMV_DEC_STATUS_DECODING_DISPLAY) {
372 s5p_mfc_handle_frame_new(ctx, err);
373 } else {
374 mfc_debug(2, "No frame decode\n");
375 }
376 /* Mark source buffer as complete */
377 if (dst_frame_status != S5P_FIMV_DEC_STATUS_DISPLAY_ONLY
378 && !list_empty(&ctx->src_queue)) {
379 src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf,
380 list);
43a1ea1f
AK
381 ctx->consumed_stream += s5p_mfc_hw_call(dev->mfc_ops,
382 get_consumed_stream, dev);
383 if (ctx->codec_mode != S5P_MFC_CODEC_H264_DEC &&
f49f3ed5 384 ctx->codec_mode != S5P_MFC_CODEC_VP8_DEC &&
d2a0db1e
AK
385 ctx->consumed_stream + STUFF_BYTE <
386 src_buf->b->v4l2_planes[0].bytesused) {
af935746
KD
387 /* Run MFC again on the same buffer */
388 mfc_debug(2, "Running again the same buffer\n");
389 ctx->after_packed_pb = 1;
390 } else {
af935746
KD
391 mfc_debug(2, "MFC needs next buffer\n");
392 ctx->consumed_stream = 0;
a34026e7
KD
393 if (src_buf->flags & MFC_BUF_FLAG_EOS)
394 ctx->state = MFCINST_FINISHING;
af935746
KD
395 list_del(&src_buf->list);
396 ctx->src_queue_cnt--;
43a1ea1f 397 if (s5p_mfc_hw_call(dev->mfc_ops, err_dec, err) > 0)
af935746
KD
398 vb2_buffer_done(src_buf->b, VB2_BUF_STATE_ERROR);
399 else
400 vb2_buffer_done(src_buf->b, VB2_BUF_STATE_DONE);
401 }
402 }
403leave_handle_frame:
404 spin_unlock_irqrestore(&dev->irqlock, flags);
405 if ((ctx->src_queue_cnt == 0 && ctx->state != MFCINST_FINISHING)
e9d98ddc 406 || ctx->dst_queue_cnt < ctx->pb_count)
af935746 407 clear_work_bit(ctx);
e2c3be2a 408 s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
af935746
KD
409 wake_up_ctx(ctx, reason, err);
410 if (test_and_clear_bit(0, &dev->hw_lock) == 0)
411 BUG();
412 s5p_mfc_clock_off();
76a4ddbd
P
413 /* if suspending, wake up device and do not try_run again*/
414 if (test_bit(0, &dev->enter_suspend))
415 wake_up_dev(dev, reason, err);
416 else
e2c3be2a 417 s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
af935746
KD
418}
419
420/* Error handling for interrupt */
7296e25f
KD
421static void s5p_mfc_handle_error(struct s5p_mfc_dev *dev,
422 struct s5p_mfc_ctx *ctx, unsigned int reason, unsigned int err)
af935746 423{
af935746
KD
424 unsigned long flags;
425
af935746 426 mfc_err("Interrupt Error: %08x\n", err);
af935746 427
7296e25f
KD
428 if (ctx != NULL) {
429 /* Error recovery is dependent on the state of context */
430 switch (ctx->state) {
431 case MFCINST_RES_CHANGE_INIT:
432 case MFCINST_RES_CHANGE_FLUSH:
433 case MFCINST_RES_CHANGE_END:
434 case MFCINST_FINISHING:
435 case MFCINST_FINISHED:
436 case MFCINST_RUNNING:
39c1cb2b 437 /* It is highly probable that an error occurred
7296e25f
KD
438 * while decoding a frame */
439 clear_work_bit(ctx);
440 ctx->state = MFCINST_ERROR;
441 /* Mark all dst buffers as having an error */
442 spin_lock_irqsave(&dev->irqlock, flags);
e2c3be2a 443 s5p_mfc_hw_call_void(dev->mfc_ops, cleanup_queue,
7296e25f
KD
444 &ctx->dst_queue, &ctx->vq_dst);
445 /* Mark all src buffers as having an error */
e2c3be2a 446 s5p_mfc_hw_call_void(dev->mfc_ops, cleanup_queue,
7296e25f
KD
447 &ctx->src_queue, &ctx->vq_src);
448 spin_unlock_irqrestore(&dev->irqlock, flags);
449 wake_up_ctx(ctx, reason, err);
450 break;
451 default:
452 clear_work_bit(ctx);
453 ctx->state = MFCINST_ERROR;
454 wake_up_ctx(ctx, reason, err);
455 break;
456 }
af935746 457 }
7296e25f
KD
458 if (test_and_clear_bit(0, &dev->hw_lock) == 0)
459 BUG();
e2c3be2a 460 s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
7296e25f
KD
461 s5p_mfc_clock_off();
462 wake_up_dev(dev, reason, err);
af935746
KD
463 return;
464}
465
466/* Header parsing interrupt handling */
467static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
468 unsigned int reason, unsigned int err)
469{
470 struct s5p_mfc_dev *dev;
af935746 471
1259762f 472 if (ctx == NULL)
af935746
KD
473 return;
474 dev = ctx->dev;
475 if (ctx->c_ops->post_seq_start) {
476 if (ctx->c_ops->post_seq_start(ctx))
477 mfc_err("post_seq_start() failed\n");
478 } else {
43a1ea1f
AK
479 ctx->img_width = s5p_mfc_hw_call(dev->mfc_ops, get_img_width,
480 dev);
481 ctx->img_height = s5p_mfc_hw_call(dev->mfc_ops, get_img_height,
482 dev);
af935746 483
e2c3be2a 484 s5p_mfc_hw_call_void(dev->mfc_ops, dec_calc_dpb_size, ctx);
8f532a7f 485
e9d98ddc 486 ctx->pb_count = s5p_mfc_hw_call(dev->mfc_ops, get_dpb_count,
43a1ea1f 487 dev);
f96f3cfa
JP
488 ctx->mv_count = s5p_mfc_hw_call(dev->mfc_ops, get_mv_count,
489 dev);
bb869368 490 if (ctx->img_width == 0 || ctx->img_height == 0)
af935746
KD
491 ctx->state = MFCINST_ERROR;
492 else
493 ctx->state = MFCINST_HEAD_PARSED;
f96f3cfa
JP
494
495 if ((ctx->codec_mode == S5P_MFC_CODEC_H264_DEC ||
496 ctx->codec_mode == S5P_MFC_CODEC_H264_MVC_DEC) &&
497 !list_empty(&ctx->src_queue)) {
498 struct s5p_mfc_buf *src_buf;
499 src_buf = list_entry(ctx->src_queue.next,
500 struct s5p_mfc_buf, list);
501 if (s5p_mfc_hw_call(dev->mfc_ops, get_consumed_stream,
502 dev) <
503 src_buf->b->v4l2_planes[0].bytesused)
504 ctx->head_processed = 0;
505 else
506 ctx->head_processed = 1;
507 } else {
508 ctx->head_processed = 1;
509 }
af935746 510 }
e2c3be2a 511 s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
af935746
KD
512 clear_work_bit(ctx);
513 if (test_and_clear_bit(0, &dev->hw_lock) == 0)
514 BUG();
515 s5p_mfc_clock_off();
e2c3be2a 516 s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
af935746
KD
517 wake_up_ctx(ctx, reason, err);
518}
519
520/* Header parsing interrupt handling */
521static void s5p_mfc_handle_init_buffers(struct s5p_mfc_ctx *ctx,
522 unsigned int reason, unsigned int err)
523{
524 struct s5p_mfc_buf *src_buf;
525 struct s5p_mfc_dev *dev;
526 unsigned long flags;
527
1259762f 528 if (ctx == NULL)
af935746
KD
529 return;
530 dev = ctx->dev;
e2c3be2a 531 s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
af935746
KD
532 ctx->int_type = reason;
533 ctx->int_err = err;
534 ctx->int_cond = 1;
7fb89eca 535 clear_work_bit(ctx);
af935746
KD
536 if (err == 0) {
537 ctx->state = MFCINST_RUNNING;
f96f3cfa 538 if (!ctx->dpb_flush_flag && ctx->head_processed) {
af935746
KD
539 spin_lock_irqsave(&dev->irqlock, flags);
540 if (!list_empty(&ctx->src_queue)) {
541 src_buf = list_entry(ctx->src_queue.next,
542 struct s5p_mfc_buf, list);
543 list_del(&src_buf->list);
544 ctx->src_queue_cnt--;
545 vb2_buffer_done(src_buf->b,
546 VB2_BUF_STATE_DONE);
547 }
548 spin_unlock_irqrestore(&dev->irqlock, flags);
549 } else {
550 ctx->dpb_flush_flag = 0;
551 }
552 if (test_and_clear_bit(0, &dev->hw_lock) == 0)
553 BUG();
554
555 s5p_mfc_clock_off();
556
557 wake_up(&ctx->queue);
e2c3be2a 558 s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
af935746
KD
559 } else {
560 if (test_and_clear_bit(0, &dev->hw_lock) == 0)
561 BUG();
562
563 s5p_mfc_clock_off();
564
565 wake_up(&ctx->queue);
566 }
567}
568
f9f715a9
AH
569static void s5p_mfc_handle_stream_complete(struct s5p_mfc_ctx *ctx,
570 unsigned int reason, unsigned int err)
571{
572 struct s5p_mfc_dev *dev = ctx->dev;
573 struct s5p_mfc_buf *mb_entry;
574
4130eabc 575 mfc_debug(2, "Stream completed\n");
f9f715a9
AH
576
577 s5p_mfc_clear_int_flags(dev);
578 ctx->int_type = reason;
579 ctx->int_err = err;
580 ctx->state = MFCINST_FINISHED;
581
582 spin_lock(&dev->irqlock);
583 if (!list_empty(&ctx->dst_queue)) {
584 mb_entry = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf,
585 list);
586 list_del(&mb_entry->list);
587 ctx->dst_queue_cnt--;
588 vb2_set_plane_payload(mb_entry->b, 0, 0);
589 vb2_buffer_done(mb_entry->b, VB2_BUF_STATE_DONE);
590 }
591 spin_unlock(&dev->irqlock);
592
593 clear_work_bit(ctx);
594
e8256447 595 WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
f9f715a9
AH
596
597 s5p_mfc_clock_off();
598 wake_up(&ctx->queue);
e2c3be2a 599 s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
f9f715a9
AH
600}
601
af935746
KD
602/* Interrupt processing */
603static irqreturn_t s5p_mfc_irq(int irq, void *priv)
604{
605 struct s5p_mfc_dev *dev = priv;
606 struct s5p_mfc_ctx *ctx;
607 unsigned int reason;
608 unsigned int err;
609
610 mfc_debug_enter();
611 /* Reset the timeout watchdog */
612 atomic_set(&dev->watchdog_cnt, 0);
613 ctx = dev->ctx[dev->curr_ctx];
614 /* Get the reason of interrupt and the error code */
43a1ea1f
AK
615 reason = s5p_mfc_hw_call(dev->mfc_ops, get_int_reason, dev);
616 err = s5p_mfc_hw_call(dev->mfc_ops, get_int_err, dev);
af935746
KD
617 mfc_debug(1, "Int reason: %d (err: %08x)\n", reason, err);
618 switch (reason) {
43a1ea1f 619 case S5P_MFC_R2H_CMD_ERR_RET:
39c1cb2b 620 /* An error has occurred */
af935746 621 if (ctx->state == MFCINST_RUNNING &&
43a1ea1f
AK
622 s5p_mfc_hw_call(dev->mfc_ops, err_dec, err) >=
623 dev->warn_start)
af935746
KD
624 s5p_mfc_handle_frame(ctx, reason, err);
625 else
7296e25f 626 s5p_mfc_handle_error(dev, ctx, reason, err);
af935746
KD
627 clear_bit(0, &dev->enter_suspend);
628 break;
629
43a1ea1f
AK
630 case S5P_MFC_R2H_CMD_SLICE_DONE_RET:
631 case S5P_MFC_R2H_CMD_FIELD_DONE_RET:
632 case S5P_MFC_R2H_CMD_FRAME_DONE_RET:
af935746
KD
633 if (ctx->c_ops->post_frame_start) {
634 if (ctx->c_ops->post_frame_start(ctx))
635 mfc_err("post_frame_start() failed\n");
e2c3be2a 636 s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
af935746
KD
637 wake_up_ctx(ctx, reason, err);
638 if (test_and_clear_bit(0, &dev->hw_lock) == 0)
639 BUG();
640 s5p_mfc_clock_off();
e2c3be2a 641 s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
af935746
KD
642 } else {
643 s5p_mfc_handle_frame(ctx, reason, err);
644 }
645 break;
646
43a1ea1f 647 case S5P_MFC_R2H_CMD_SEQ_DONE_RET:
af935746
KD
648 s5p_mfc_handle_seq_done(ctx, reason, err);
649 break;
650
43a1ea1f
AK
651 case S5P_MFC_R2H_CMD_OPEN_INSTANCE_RET:
652 ctx->inst_no = s5p_mfc_hw_call(dev->mfc_ops, get_inst_no, dev);
af935746
KD
653 ctx->state = MFCINST_GOT_INST;
654 clear_work_bit(ctx);
655 wake_up(&ctx->queue);
656 goto irq_cleanup_hw;
657
43a1ea1f 658 case S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET:
af935746 659 clear_work_bit(ctx);
9d87e837 660 ctx->inst_no = MFC_NO_INSTANCE_SET;
af935746
KD
661 ctx->state = MFCINST_FREE;
662 wake_up(&ctx->queue);
663 goto irq_cleanup_hw;
664
43a1ea1f
AK
665 case S5P_MFC_R2H_CMD_SYS_INIT_RET:
666 case S5P_MFC_R2H_CMD_FW_STATUS_RET:
667 case S5P_MFC_R2H_CMD_SLEEP_RET:
668 case S5P_MFC_R2H_CMD_WAKEUP_RET:
af935746
KD
669 if (ctx)
670 clear_work_bit(ctx);
e2c3be2a 671 s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
af935746
KD
672 wake_up_dev(dev, reason, err);
673 clear_bit(0, &dev->hw_lock);
674 clear_bit(0, &dev->enter_suspend);
675 break;
676
43a1ea1f 677 case S5P_MFC_R2H_CMD_INIT_BUFFERS_RET:
af935746
KD
678 s5p_mfc_handle_init_buffers(ctx, reason, err);
679 break;
f9f715a9 680
43a1ea1f 681 case S5P_MFC_R2H_CMD_COMPLETE_SEQ_RET:
f9f715a9
AH
682 s5p_mfc_handle_stream_complete(ctx, reason, err);
683 break;
684
8f23cc02
AK
685 case S5P_MFC_R2H_CMD_DPB_FLUSH_RET:
686 clear_work_bit(ctx);
687 ctx->state = MFCINST_RUNNING;
688 wake_up(&ctx->queue);
689 goto irq_cleanup_hw;
690
af935746
KD
691 default:
692 mfc_debug(2, "Unknown int reason\n");
e2c3be2a 693 s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
af935746
KD
694 }
695 mfc_debug_leave();
696 return IRQ_HANDLED;
697irq_cleanup_hw:
e2c3be2a 698 s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
af935746
KD
699 ctx->int_type = reason;
700 ctx->int_err = err;
701 ctx->int_cond = 1;
702 if (test_and_clear_bit(0, &dev->hw_lock) == 0)
703 mfc_err("Failed to unlock hw\n");
704
705 s5p_mfc_clock_off();
706
e2c3be2a 707 s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
af935746
KD
708 mfc_debug(2, "Exit via irq_cleanup_hw\n");
709 return IRQ_HANDLED;
710}
711
712/* Open an MFC node */
713static int s5p_mfc_open(struct file *file)
714{
b80cb8dc 715 struct video_device *vdev = video_devdata(file);
af935746
KD
716 struct s5p_mfc_dev *dev = video_drvdata(file);
717 struct s5p_mfc_ctx *ctx = NULL;
718 struct vb2_queue *q;
af935746
KD
719 int ret = 0;
720
721 mfc_debug_enter();
bc738301
HV
722 if (mutex_lock_interruptible(&dev->mfc_mutex))
723 return -ERESTARTSYS;
af935746
KD
724 dev->num_inst++; /* It is guarded by mfc_mutex in vfd */
725 /* Allocate memory for context */
bae061b4 726 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
af935746
KD
727 if (!ctx) {
728 mfc_err("Not enough memory\n");
729 ret = -ENOMEM;
730 goto err_alloc;
731 }
55647a99 732 v4l2_fh_init(&ctx->fh, vdev);
af935746
KD
733 file->private_data = &ctx->fh;
734 v4l2_fh_add(&ctx->fh);
735 ctx->dev = dev;
736 INIT_LIST_HEAD(&ctx->src_queue);
737 INIT_LIST_HEAD(&ctx->dst_queue);
738 ctx->src_queue_cnt = 0;
739 ctx->dst_queue_cnt = 0;
740 /* Get context number */
741 ctx->num = 0;
742 while (dev->ctx[ctx->num]) {
743 ctx->num++;
744 if (ctx->num >= MFC_NUM_CONTEXTS) {
745 mfc_err("Too many open contexts\n");
746 ret = -EBUSY;
747 goto err_no_ctx;
748 }
749 }
750 /* Mark context as idle */
7fb89eca 751 clear_work_bit_irqsave(ctx);
af935746 752 dev->ctx[ctx->num] = ctx;
b80cb8dc 753 if (vdev == dev->vfd_dec) {
af935746
KD
754 ctx->type = MFCINST_DECODER;
755 ctx->c_ops = get_dec_codec_ops();
43a1ea1f 756 s5p_mfc_dec_init(ctx);
af935746
KD
757 /* Setup ctrl handler */
758 ret = s5p_mfc_dec_ctrls_setup(ctx);
759 if (ret) {
760 mfc_err("Failed to setup mfc controls\n");
761 goto err_ctrls_setup;
762 }
b80cb8dc 763 } else if (vdev == dev->vfd_enc) {
af935746
KD
764 ctx->type = MFCINST_ENCODER;
765 ctx->c_ops = get_enc_codec_ops();
766 /* only for encoder */
767 INIT_LIST_HEAD(&ctx->ref_queue);
768 ctx->ref_queue_cnt = 0;
43a1ea1f 769 s5p_mfc_enc_init(ctx);
af935746
KD
770 /* Setup ctrl handler */
771 ret = s5p_mfc_enc_ctrls_setup(ctx);
772 if (ret) {
773 mfc_err("Failed to setup mfc controls\n");
774 goto err_ctrls_setup;
775 }
776 } else {
777 ret = -ENOENT;
778 goto err_bad_node;
779 }
780 ctx->fh.ctrl_handler = &ctx->ctrl_handler;
9d87e837 781 ctx->inst_no = MFC_NO_INSTANCE_SET;
af935746
KD
782 /* Load firmware if this is the first instance */
783 if (dev->num_inst == 1) {
784 dev->watchdog_timer.expires = jiffies +
785 msecs_to_jiffies(MFC_WATCHDOG_INTERVAL);
786 add_timer(&dev->watchdog_timer);
787 ret = s5p_mfc_power_on();
788 if (ret < 0) {
789 mfc_err("power on failed\n");
790 goto err_pwr_enable;
791 }
792 s5p_mfc_clock_on();
2e731e44
KD
793 ret = s5p_mfc_load_firmware(dev);
794 if (ret) {
795 s5p_mfc_clock_off();
796 goto err_load_fw;
797 }
af935746
KD
798 /* Init the FW */
799 ret = s5p_mfc_init_hw(dev);
2e731e44 800 s5p_mfc_clock_off();
af935746
KD
801 if (ret)
802 goto err_init_hw;
af935746
KD
803 }
804 /* Init videobuf2 queue for CAPTURE */
805 q = &ctx->vq_dst;
806 q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
807 q->drv_priv = &ctx->fh;
b80cb8dc 808 if (vdev == dev->vfd_dec) {
af935746
KD
809 q->io_modes = VB2_MMAP;
810 q->ops = get_dec_queue_ops();
b80cb8dc 811 } else if (vdev == dev->vfd_enc) {
af935746
KD
812 q->io_modes = VB2_MMAP | VB2_USERPTR;
813 q->ops = get_enc_queue_ops();
814 } else {
815 ret = -ENOENT;
816 goto err_queue_init;
817 }
818 q->mem_ops = (struct vb2_mem_ops *)&vb2_dma_contig_memops;
ade48681 819 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
af935746
KD
820 ret = vb2_queue_init(q);
821 if (ret) {
822 mfc_err("Failed to initialize videobuf2 queue(capture)\n");
823 goto err_queue_init;
824 }
825 /* Init videobuf2 queue for OUTPUT */
826 q = &ctx->vq_src;
827 q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
828 q->io_modes = VB2_MMAP;
829 q->drv_priv = &ctx->fh;
b80cb8dc 830 if (vdev == dev->vfd_dec) {
af935746
KD
831 q->io_modes = VB2_MMAP;
832 q->ops = get_dec_queue_ops();
b80cb8dc 833 } else if (vdev == dev->vfd_enc) {
af935746
KD
834 q->io_modes = VB2_MMAP | VB2_USERPTR;
835 q->ops = get_enc_queue_ops();
836 } else {
837 ret = -ENOENT;
838 goto err_queue_init;
839 }
840 q->mem_ops = (struct vb2_mem_ops *)&vb2_dma_contig_memops;
ade48681 841 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
af935746
KD
842 ret = vb2_queue_init(q);
843 if (ret) {
844 mfc_err("Failed to initialize videobuf2 queue(output)\n");
845 goto err_queue_init;
846 }
847 init_waitqueue_head(&ctx->queue);
bc738301 848 mutex_unlock(&dev->mfc_mutex);
af935746
KD
849 mfc_debug_leave();
850 return ret;
39c1cb2b 851 /* Deinit when failure occurred */
af935746 852err_queue_init:
2e731e44
KD
853 if (dev->num_inst == 1)
854 s5p_mfc_deinit_hw(dev);
af935746 855err_init_hw:
2e731e44 856err_load_fw:
af935746
KD
857err_pwr_enable:
858 if (dev->num_inst == 1) {
859 if (s5p_mfc_power_off() < 0)
860 mfc_err("power off failed\n");
1b73ba0b 861 del_timer_sync(&dev->watchdog_timer);
af935746
KD
862 }
863err_ctrls_setup:
864 s5p_mfc_dec_ctrls_delete(ctx);
865err_bad_node:
1b73ba0b 866 dev->ctx[ctx->num] = NULL;
af935746
KD
867err_no_ctx:
868 v4l2_fh_del(&ctx->fh);
869 v4l2_fh_exit(&ctx->fh);
870 kfree(ctx);
871err_alloc:
872 dev->num_inst--;
bc738301 873 mutex_unlock(&dev->mfc_mutex);
af935746
KD
874 mfc_debug_leave();
875 return ret;
876}
877
878/* Release MFC context */
879static int s5p_mfc_release(struct file *file)
880{
881 struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
882 struct s5p_mfc_dev *dev = ctx->dev;
af935746
KD
883
884 mfc_debug_enter();
bc738301 885 mutex_lock(&dev->mfc_mutex);
af935746
KD
886 s5p_mfc_clock_on();
887 vb2_queue_release(&ctx->vq_src);
888 vb2_queue_release(&ctx->vq_dst);
889 /* Mark context as idle */
7fb89eca 890 clear_work_bit_irqsave(ctx);
9d87e837 891 /* If instance was initialised and not yet freed,
39c1cb2b 892 * return instance and free resources */
9d87e837 893 if (ctx->state != MFCINST_FREE && ctx->state != MFCINST_INIT) {
af935746 894 mfc_debug(2, "Has to free instance\n");
818cd91a 895 s5p_mfc_close_mfc_inst(dev, ctx);
af935746
KD
896 }
897 /* hardware locking scheme */
898 if (dev->curr_ctx == ctx->num)
899 clear_bit(0, &dev->hw_lock);
900 dev->num_inst--;
901 if (dev->num_inst == 0) {
2e731e44 902 mfc_debug(2, "Last instance\n");
43a1ea1f 903 s5p_mfc_deinit_hw(dev);
af935746
KD
904 del_timer_sync(&dev->watchdog_timer);
905 if (s5p_mfc_power_off() < 0)
906 mfc_err("Power off failed\n");
907 }
908 mfc_debug(2, "Shutting down clock\n");
909 s5p_mfc_clock_off();
1259762f 910 dev->ctx[ctx->num] = NULL;
af935746
KD
911 s5p_mfc_dec_ctrls_delete(ctx);
912 v4l2_fh_del(&ctx->fh);
913 v4l2_fh_exit(&ctx->fh);
914 kfree(ctx);
915 mfc_debug_leave();
bc738301 916 mutex_unlock(&dev->mfc_mutex);
af935746
KD
917 return 0;
918}
919
920/* Poll */
921static unsigned int s5p_mfc_poll(struct file *file,
922 struct poll_table_struct *wait)
923{
924 struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
925 struct s5p_mfc_dev *dev = ctx->dev;
926 struct vb2_queue *src_q, *dst_q;
927 struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
928 unsigned int rc = 0;
929 unsigned long flags;
930
bc738301 931 mutex_lock(&dev->mfc_mutex);
af935746
KD
932 src_q = &ctx->vq_src;
933 dst_q = &ctx->vq_dst;
934 /*
935 * There has to be at least one buffer queued on each queued_list, which
936 * means either in driver already or waiting for driver to claim it
937 * and start processing.
938 */
939 if ((!src_q->streaming || list_empty(&src_q->queued_list))
940 && (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
941 rc = POLLERR;
942 goto end;
943 }
944 mutex_unlock(&dev->mfc_mutex);
f9f715a9 945 poll_wait(file, &ctx->fh.wait, wait);
af935746
KD
946 poll_wait(file, &src_q->done_wq, wait);
947 poll_wait(file, &dst_q->done_wq, wait);
948 mutex_lock(&dev->mfc_mutex);
f9f715a9
AH
949 if (v4l2_event_pending(&ctx->fh))
950 rc |= POLLPRI;
af935746
KD
951 spin_lock_irqsave(&src_q->done_lock, flags);
952 if (!list_empty(&src_q->done_list))
953 src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
954 done_entry);
955 if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
956 || src_vb->state == VB2_BUF_STATE_ERROR))
957 rc |= POLLOUT | POLLWRNORM;
958 spin_unlock_irqrestore(&src_q->done_lock, flags);
959 spin_lock_irqsave(&dst_q->done_lock, flags);
960 if (!list_empty(&dst_q->done_list))
961 dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
962 done_entry);
963 if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
964 || dst_vb->state == VB2_BUF_STATE_ERROR))
965 rc |= POLLIN | POLLRDNORM;
966 spin_unlock_irqrestore(&dst_q->done_lock, flags);
967end:
bc738301 968 mutex_unlock(&dev->mfc_mutex);
af935746
KD
969 return rc;
970}
971
972/* Mmap */
973static int s5p_mfc_mmap(struct file *file, struct vm_area_struct *vma)
974{
975 struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
bc738301 976 struct s5p_mfc_dev *dev = ctx->dev;
af935746
KD
977 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
978 int ret;
bc738301
HV
979
980 if (mutex_lock_interruptible(&dev->mfc_mutex))
981 return -ERESTARTSYS;
af935746
KD
982 if (offset < DST_QUEUE_OFF_BASE) {
983 mfc_debug(2, "mmaping source\n");
984 ret = vb2_mmap(&ctx->vq_src, vma);
985 } else { /* capture */
986 mfc_debug(2, "mmaping destination\n");
987 vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
988 ret = vb2_mmap(&ctx->vq_dst, vma);
989 }
bc738301 990 mutex_unlock(&dev->mfc_mutex);
af935746
KD
991 return ret;
992}
993
994/* v4l2 ops */
995static const struct v4l2_file_operations s5p_mfc_fops = {
996 .owner = THIS_MODULE,
997 .open = s5p_mfc_open,
998 .release = s5p_mfc_release,
999 .poll = s5p_mfc_poll,
1000 .unlocked_ioctl = video_ioctl2,
1001 .mmap = s5p_mfc_mmap,
1002};
1003
1004static int match_child(struct device *dev, void *data)
1005{
1006 if (!dev_name(dev))
1007 return 0;
1008 return !strcmp(dev_name(dev), (char *)data);
1009}
1010
b27a23be
AK
1011static void *mfc_get_drv_data(struct platform_device *pdev);
1012
6e83e6e2
AK
1013static int s5p_mfc_alloc_memdevs(struct s5p_mfc_dev *dev)
1014{
65fccab5 1015 unsigned int mem_info[2] = { };
6e83e6e2
AK
1016
1017 dev->mem_dev_l = devm_kzalloc(&dev->plat_dev->dev,
1018 sizeof(struct device), GFP_KERNEL);
1019 if (!dev->mem_dev_l) {
1020 mfc_err("Not enough memory\n");
1021 return -ENOMEM;
1022 }
1023 device_initialize(dev->mem_dev_l);
1024 of_property_read_u32_array(dev->plat_dev->dev.of_node,
1025 "samsung,mfc-l", mem_info, 2);
1026 if (dma_declare_coherent_memory(dev->mem_dev_l, mem_info[0],
1027 mem_info[0], mem_info[1],
1028 DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE) == 0) {
1029 mfc_err("Failed to declare coherent memory for\n"
1030 "MFC device\n");
1031 return -ENOMEM;
1032 }
1033
1034 dev->mem_dev_r = devm_kzalloc(&dev->plat_dev->dev,
1035 sizeof(struct device), GFP_KERNEL);
1036 if (!dev->mem_dev_r) {
1037 mfc_err("Not enough memory\n");
1038 return -ENOMEM;
1039 }
1040 device_initialize(dev->mem_dev_r);
1041 of_property_read_u32_array(dev->plat_dev->dev.of_node,
1042 "samsung,mfc-r", mem_info, 2);
1043 if (dma_declare_coherent_memory(dev->mem_dev_r, mem_info[0],
1044 mem_info[0], mem_info[1],
1045 DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE) == 0) {
1046 pr_err("Failed to declare coherent memory for\n"
1047 "MFC device\n");
1048 return -ENOMEM;
1049 }
1050 return 0;
1051}
1052
af935746 1053/* MFC probe function */
1e393e90 1054static int s5p_mfc_probe(struct platform_device *pdev)
af935746
KD
1055{
1056 struct s5p_mfc_dev *dev;
1057 struct video_device *vfd;
1058 struct resource *res;
1059 int ret;
1060
1061 pr_debug("%s++\n", __func__);
bae061b4 1062 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
af935746
KD
1063 if (!dev) {
1064 dev_err(&pdev->dev, "Not enough memory for MFC device\n");
1065 return -ENOMEM;
1066 }
1067
1068 spin_lock_init(&dev->irqlock);
1069 spin_lock_init(&dev->condlock);
1070 dev->plat_dev = pdev;
1071 if (!dev->plat_dev) {
1072 dev_err(&pdev->dev, "No platform data specified\n");
d310f478 1073 return -ENODEV;
af935746
KD
1074 }
1075
b27a23be 1076 dev->variant = mfc_get_drv_data(pdev);
8f532a7f 1077
af935746
KD
1078 ret = s5p_mfc_init_pm(dev);
1079 if (ret < 0) {
1080 dev_err(&pdev->dev, "failed to get mfc clock source\n");
d310f478 1081 return ret;
af935746
KD
1082 }
1083
1084 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
af935746 1085
f23999ec
TR
1086 dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
1087 if (IS_ERR(dev->regs_base))
1088 return PTR_ERR(dev->regs_base);
af935746
KD
1089
1090 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1091 if (res == NULL) {
1092 dev_err(&pdev->dev, "failed to get irq resource\n");
1093 ret = -ENOENT;
d310f478 1094 goto err_res;
af935746
KD
1095 }
1096 dev->irq = res->start;
d310f478 1097 ret = devm_request_irq(&pdev->dev, dev->irq, s5p_mfc_irq,
1957f0d7 1098 0, pdev->name, dev);
af935746
KD
1099 if (ret) {
1100 dev_err(&pdev->dev, "Failed to install irq (%d)\n", ret);
d310f478 1101 goto err_res;
af935746
KD
1102 }
1103
b27a23be 1104 if (pdev->dev.of_node) {
d68b44e0
WY
1105 ret = s5p_mfc_alloc_memdevs(dev);
1106 if (ret < 0)
b27a23be 1107 goto err_res;
b27a23be
AK
1108 } else {
1109 dev->mem_dev_l = device_find_child(&dev->plat_dev->dev,
1110 "s5p-mfc-l", match_child);
1111 if (!dev->mem_dev_l) {
1112 mfc_err("Mem child (L) device get failed\n");
1113 ret = -ENODEV;
1114 goto err_res;
1115 }
1116 dev->mem_dev_r = device_find_child(&dev->plat_dev->dev,
1117 "s5p-mfc-r", match_child);
1118 if (!dev->mem_dev_r) {
1119 mfc_err("Mem child (R) device get failed\n");
1120 ret = -ENODEV;
1121 goto err_res;
1122 }
af935746
KD
1123 }
1124
1125 dev->alloc_ctx[0] = vb2_dma_contig_init_ctx(dev->mem_dev_l);
ef89fff8 1126 if (IS_ERR(dev->alloc_ctx[0])) {
af935746 1127 ret = PTR_ERR(dev->alloc_ctx[0]);
d310f478 1128 goto err_res;
af935746
KD
1129 }
1130 dev->alloc_ctx[1] = vb2_dma_contig_init_ctx(dev->mem_dev_r);
ef89fff8 1131 if (IS_ERR(dev->alloc_ctx[1])) {
af935746
KD
1132 ret = PTR_ERR(dev->alloc_ctx[1]);
1133 goto err_mem_init_ctx_1;
1134 }
1135
1136 mutex_init(&dev->mfc_mutex);
1137
2e731e44
KD
1138 ret = s5p_mfc_alloc_firmware(dev);
1139 if (ret)
1140 goto err_alloc_fw;
1141
af935746
KD
1142 ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
1143 if (ret)
1144 goto err_v4l2_dev_reg;
1145 init_waitqueue_head(&dev->queue);
1146
1147 /* decoder */
1148 vfd = video_device_alloc();
1149 if (!vfd) {
1150 v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
1151 ret = -ENOMEM;
1152 goto err_dec_alloc;
1153 }
d0ce898c 1154 vfd->fops = &s5p_mfc_fops;
af935746 1155 vfd->ioctl_ops = get_dec_v4l2_ioctl_ops();
d0ce898c 1156 vfd->release = video_device_release;
af935746
KD
1157 vfd->lock = &dev->mfc_mutex;
1158 vfd->v4l2_dev = &dev->v4l2_dev;
954f340f 1159 vfd->vfl_dir = VFL_DIR_M2M;
af935746
KD
1160 snprintf(vfd->name, sizeof(vfd->name), "%s", S5P_MFC_DEC_NAME);
1161 dev->vfd_dec = vfd;
1162 ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
1163 if (ret) {
1164 v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
1165 video_device_release(vfd);
1166 goto err_dec_reg;
1167 }
1168 v4l2_info(&dev->v4l2_dev,
1169 "decoder registered as /dev/video%d\n", vfd->num);
1170 video_set_drvdata(vfd, dev);
1171
1172 /* encoder */
1173 vfd = video_device_alloc();
1174 if (!vfd) {
1175 v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
1176 ret = -ENOMEM;
1177 goto err_enc_alloc;
1178 }
d0ce898c 1179 vfd->fops = &s5p_mfc_fops;
af935746 1180 vfd->ioctl_ops = get_enc_v4l2_ioctl_ops();
d0ce898c 1181 vfd->release = video_device_release;
af935746
KD
1182 vfd->lock = &dev->mfc_mutex;
1183 vfd->v4l2_dev = &dev->v4l2_dev;
cdcf45e7 1184 vfd->vfl_dir = VFL_DIR_M2M;
af935746
KD
1185 snprintf(vfd->name, sizeof(vfd->name), "%s", S5P_MFC_ENC_NAME);
1186 dev->vfd_enc = vfd;
1187 ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
1188 if (ret) {
1189 v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
1190 video_device_release(vfd);
1191 goto err_enc_reg;
1192 }
1193 v4l2_info(&dev->v4l2_dev,
1194 "encoder registered as /dev/video%d\n", vfd->num);
1195 video_set_drvdata(vfd, dev);
1196 platform_set_drvdata(pdev, dev);
1197
1198 dev->hw_lock = 0;
1199 dev->watchdog_workqueue = create_singlethread_workqueue(S5P_MFC_NAME);
1200 INIT_WORK(&dev->watchdog_work, s5p_mfc_watchdog_worker);
1201 atomic_set(&dev->watchdog_cnt, 0);
1202 init_timer(&dev->watchdog_timer);
1203 dev->watchdog_timer.data = (unsigned long)dev;
1204 dev->watchdog_timer.function = s5p_mfc_watchdog;
1205
43a1ea1f
AK
1206 /* Initialize HW ops and commands based on MFC version */
1207 s5p_mfc_init_hw_ops(dev);
1208 s5p_mfc_init_hw_cmds(dev);
6a9c6f68 1209 s5p_mfc_init_regs(dev);
43a1ea1f 1210
af935746
KD
1211 pr_debug("%s--\n", __func__);
1212 return 0;
1213
1214/* Deinit MFC if probe had failed */
1215err_enc_reg:
1216 video_device_release(dev->vfd_enc);
1217err_enc_alloc:
1218 video_unregister_device(dev->vfd_dec);
1219err_dec_reg:
1220 video_device_release(dev->vfd_dec);
1221err_dec_alloc:
1222 v4l2_device_unregister(&dev->v4l2_dev);
1223err_v4l2_dev_reg:
2e731e44
KD
1224 s5p_mfc_release_firmware(dev);
1225err_alloc_fw:
af935746
KD
1226 vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[1]);
1227err_mem_init_ctx_1:
1228 vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[0]);
af935746
KD
1229err_res:
1230 s5p_mfc_final_pm(dev);
d310f478 1231
af935746
KD
1232 pr_debug("%s-- with error\n", __func__);
1233 return ret;
1234
1235}
1236
1237/* Remove the driver */
4c62e976 1238static int s5p_mfc_remove(struct platform_device *pdev)
af935746
KD
1239{
1240 struct s5p_mfc_dev *dev = platform_get_drvdata(pdev);
1241
1242 v4l2_info(&dev->v4l2_dev, "Removing %s\n", pdev->name);
1243
1244 del_timer_sync(&dev->watchdog_timer);
1245 flush_workqueue(dev->watchdog_workqueue);
1246 destroy_workqueue(dev->watchdog_workqueue);
1247
1248 video_unregister_device(dev->vfd_enc);
1249 video_unregister_device(dev->vfd_dec);
1250 v4l2_device_unregister(&dev->v4l2_dev);
2e731e44 1251 s5p_mfc_release_firmware(dev);
af935746
KD
1252 vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[0]);
1253 vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[1]);
6e83e6e2
AK
1254 if (pdev->dev.of_node) {
1255 put_device(dev->mem_dev_l);
1256 put_device(dev->mem_dev_r);
1257 }
af935746 1258
af935746 1259 s5p_mfc_final_pm(dev);
af935746
KD
1260 return 0;
1261}
1262
1263#ifdef CONFIG_PM_SLEEP
1264
1265static int s5p_mfc_suspend(struct device *dev)
1266{
1267 struct platform_device *pdev = to_platform_device(dev);
1268 struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
1269 int ret;
1270
1271 if (m_dev->num_inst == 0)
1272 return 0;
81c9bcfb 1273
af935746
KD
1274 if (test_and_set_bit(0, &m_dev->enter_suspend) != 0) {
1275 mfc_err("Error: going to suspend for a second time\n");
1276 return -EIO;
1277 }
1278
1279 /* Check if we're processing then wait if it necessary. */
1280 while (test_and_set_bit(0, &m_dev->hw_lock) != 0) {
1281 /* Try and lock the HW */
1282 /* Wait on the interrupt waitqueue */
1283 ret = wait_event_interruptible_timeout(m_dev->queue,
76a4ddbd 1284 m_dev->int_cond, msecs_to_jiffies(MFC_INT_TIMEOUT));
af935746
KD
1285 if (ret == 0) {
1286 mfc_err("Waiting for hardware to finish timed out\n");
1287 return -EIO;
1288 }
1289 }
81c9bcfb
SK
1290
1291 return s5p_mfc_sleep(m_dev);
af935746
KD
1292}
1293
1294static int s5p_mfc_resume(struct device *dev)
1295{
1296 struct platform_device *pdev = to_platform_device(dev);
1297 struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
1298
1299 if (m_dev->num_inst == 0)
1300 return 0;
1301 return s5p_mfc_wakeup(m_dev);
1302}
1303#endif
1304
1305#ifdef CONFIG_PM_RUNTIME
1306static int s5p_mfc_runtime_suspend(struct device *dev)
1307{
1308 struct platform_device *pdev = to_platform_device(dev);
1309 struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
1310
1311 atomic_set(&m_dev->pm.power, 0);
1312 return 0;
1313}
1314
1315static int s5p_mfc_runtime_resume(struct device *dev)
1316{
1317 struct platform_device *pdev = to_platform_device(dev);
1318 struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
af935746
KD
1319
1320 if (!m_dev->alloc_ctx)
1321 return 0;
af935746
KD
1322 atomic_set(&m_dev->pm.power, 1);
1323 return 0;
1324}
1325#endif
1326
1327/* Power management */
1328static const struct dev_pm_ops s5p_mfc_pm_ops = {
1329 SET_SYSTEM_SLEEP_PM_OPS(s5p_mfc_suspend, s5p_mfc_resume)
1330 SET_RUNTIME_PM_OPS(s5p_mfc_runtime_suspend, s5p_mfc_runtime_resume,
1331 NULL)
1332};
1333
ca5ea0c5 1334static struct s5p_mfc_buf_size_v5 mfc_buf_size_v5 = {
8f532a7f
AK
1335 .h264_ctx = MFC_H264_CTX_BUF_SIZE,
1336 .non_h264_ctx = MFC_CTX_BUF_SIZE,
1337 .dsc = DESC_BUF_SIZE,
1338 .shm = SHARED_BUF_SIZE,
1339};
1340
ca5ea0c5 1341static struct s5p_mfc_buf_size buf_size_v5 = {
8f532a7f
AK
1342 .fw = MAX_FW_SIZE,
1343 .cpb = MAX_CPB_SIZE,
1344 .priv = &mfc_buf_size_v5,
1345};
1346
ca5ea0c5 1347static struct s5p_mfc_buf_align mfc_buf_align_v5 = {
8f532a7f
AK
1348 .base = MFC_BASE_ALIGN_ORDER,
1349};
1350
1351static struct s5p_mfc_variant mfc_drvdata_v5 = {
1352 .version = MFC_VERSION,
9aa5f008 1353 .version_bit = MFC_V5_BIT,
8f532a7f
AK
1354 .port_num = MFC_NUM_PORTS,
1355 .buf_size = &buf_size_v5,
1356 .buf_align = &mfc_buf_align_v5,
77ba6b73 1357 .fw_name[0] = "s5p-mfc.fw",
f96f3cfa
JP
1358};
1359
ca5ea0c5 1360static struct s5p_mfc_buf_size_v6 mfc_buf_size_v6 = {
f96f3cfa
JP
1361 .dev_ctx = MFC_CTX_BUF_SIZE_V6,
1362 .h264_dec_ctx = MFC_H264_DEC_CTX_BUF_SIZE_V6,
1363 .other_dec_ctx = MFC_OTHER_DEC_CTX_BUF_SIZE_V6,
1364 .h264_enc_ctx = MFC_H264_ENC_CTX_BUF_SIZE_V6,
1365 .other_enc_ctx = MFC_OTHER_ENC_CTX_BUF_SIZE_V6,
1366};
1367
ca5ea0c5 1368static struct s5p_mfc_buf_size buf_size_v6 = {
f96f3cfa
JP
1369 .fw = MAX_FW_SIZE_V6,
1370 .cpb = MAX_CPB_SIZE_V6,
1371 .priv = &mfc_buf_size_v6,
1372};
1373
ca5ea0c5 1374static struct s5p_mfc_buf_align mfc_buf_align_v6 = {
f96f3cfa
JP
1375 .base = 0,
1376};
1377
1378static struct s5p_mfc_variant mfc_drvdata_v6 = {
1379 .version = MFC_VERSION_V6,
9aa5f008 1380 .version_bit = MFC_V6_BIT,
f96f3cfa
JP
1381 .port_num = MFC_NUM_PORTS_V6,
1382 .buf_size = &buf_size_v6,
1383 .buf_align = &mfc_buf_align_v6,
77ba6b73
AK
1384 .fw_name[0] = "s5p-mfc-v6.fw",
1385 /*
1386 * v6-v2 firmware contains bug fixes and interface change
1387 * for init buffer command
1388 */
1389 .fw_name[1] = "s5p-mfc-v6-v2.fw",
8f532a7f
AK
1390};
1391
ca5ea0c5 1392static struct s5p_mfc_buf_size_v6 mfc_buf_size_v7 = {
5441e9da
AK
1393 .dev_ctx = MFC_CTX_BUF_SIZE_V7,
1394 .h264_dec_ctx = MFC_H264_DEC_CTX_BUF_SIZE_V7,
1395 .other_dec_ctx = MFC_OTHER_DEC_CTX_BUF_SIZE_V7,
1396 .h264_enc_ctx = MFC_H264_ENC_CTX_BUF_SIZE_V7,
1397 .other_enc_ctx = MFC_OTHER_ENC_CTX_BUF_SIZE_V7,
1398};
1399
ca5ea0c5 1400static struct s5p_mfc_buf_size buf_size_v7 = {
5441e9da
AK
1401 .fw = MAX_FW_SIZE_V7,
1402 .cpb = MAX_CPB_SIZE_V7,
1403 .priv = &mfc_buf_size_v7,
1404};
1405
ca5ea0c5 1406static struct s5p_mfc_buf_align mfc_buf_align_v7 = {
5441e9da
AK
1407 .base = 0,
1408};
1409
1410static struct s5p_mfc_variant mfc_drvdata_v7 = {
1411 .version = MFC_VERSION_V7,
9aa5f008 1412 .version_bit = MFC_V7_BIT,
5441e9da
AK
1413 .port_num = MFC_NUM_PORTS_V7,
1414 .buf_size = &buf_size_v7,
1415 .buf_align = &mfc_buf_align_v7,
77ba6b73 1416 .fw_name[0] = "s5p-mfc-v7.fw",
5441e9da
AK
1417};
1418
ca5ea0c5 1419static struct s5p_mfc_buf_size_v6 mfc_buf_size_v8 = {
e2b9deb2
KA
1420 .dev_ctx = MFC_CTX_BUF_SIZE_V8,
1421 .h264_dec_ctx = MFC_H264_DEC_CTX_BUF_SIZE_V8,
1422 .other_dec_ctx = MFC_OTHER_DEC_CTX_BUF_SIZE_V8,
3e594ce7
KA
1423 .h264_enc_ctx = MFC_H264_ENC_CTX_BUF_SIZE_V8,
1424 .other_enc_ctx = MFC_OTHER_ENC_CTX_BUF_SIZE_V8,
e2b9deb2
KA
1425};
1426
ca5ea0c5 1427static struct s5p_mfc_buf_size buf_size_v8 = {
e2b9deb2
KA
1428 .fw = MAX_FW_SIZE_V8,
1429 .cpb = MAX_CPB_SIZE_V8,
1430 .priv = &mfc_buf_size_v8,
1431};
1432
ca5ea0c5 1433static struct s5p_mfc_buf_align mfc_buf_align_v8 = {
e2b9deb2
KA
1434 .base = 0,
1435};
1436
1437static struct s5p_mfc_variant mfc_drvdata_v8 = {
1438 .version = MFC_VERSION_V8,
1439 .version_bit = MFC_V8_BIT,
1440 .port_num = MFC_NUM_PORTS_V8,
1441 .buf_size = &buf_size_v8,
1442 .buf_align = &mfc_buf_align_v8,
77ba6b73 1443 .fw_name[0] = "s5p-mfc-v8.fw",
e2b9deb2
KA
1444};
1445
8f532a7f
AK
1446static struct platform_device_id mfc_driver_ids[] = {
1447 {
1448 .name = "s5p-mfc",
1449 .driver_data = (unsigned long)&mfc_drvdata_v5,
f96f3cfa
JP
1450 }, {
1451 .name = "s5p-mfc-v5",
1452 .driver_data = (unsigned long)&mfc_drvdata_v5,
1453 }, {
1454 .name = "s5p-mfc-v6",
1455 .driver_data = (unsigned long)&mfc_drvdata_v6,
5441e9da
AK
1456 }, {
1457 .name = "s5p-mfc-v7",
1458 .driver_data = (unsigned long)&mfc_drvdata_v7,
e2b9deb2
KA
1459 }, {
1460 .name = "s5p-mfc-v8",
1461 .driver_data = (unsigned long)&mfc_drvdata_v8,
8f532a7f
AK
1462 },
1463 {},
1464};
1465MODULE_DEVICE_TABLE(platform, mfc_driver_ids);
1466
b27a23be
AK
1467static const struct of_device_id exynos_mfc_match[] = {
1468 {
1469 .compatible = "samsung,mfc-v5",
1470 .data = &mfc_drvdata_v5,
1471 }, {
1472 .compatible = "samsung,mfc-v6",
1473 .data = &mfc_drvdata_v6,
5441e9da
AK
1474 }, {
1475 .compatible = "samsung,mfc-v7",
1476 .data = &mfc_drvdata_v7,
e2b9deb2
KA
1477 }, {
1478 .compatible = "samsung,mfc-v8",
1479 .data = &mfc_drvdata_v8,
b27a23be
AK
1480 },
1481 {},
1482};
1483MODULE_DEVICE_TABLE(of, exynos_mfc_match);
1484
1485static void *mfc_get_drv_data(struct platform_device *pdev)
1486{
1487 struct s5p_mfc_variant *driver_data = NULL;
1488
1489 if (pdev->dev.of_node) {
1490 const struct of_device_id *match;
a40a1382 1491 match = of_match_node(exynos_mfc_match,
b27a23be
AK
1492 pdev->dev.of_node);
1493 if (match)
1494 driver_data = (struct s5p_mfc_variant *)match->data;
1495 } else {
1496 driver_data = (struct s5p_mfc_variant *)
1497 platform_get_device_id(pdev)->driver_data;
1498 }
1499 return driver_data;
1500}
1501
1e393e90 1502static struct platform_driver s5p_mfc_driver = {
8f532a7f 1503 .probe = s5p_mfc_probe,
4c62e976 1504 .remove = s5p_mfc_remove,
8f532a7f 1505 .id_table = mfc_driver_ids,
af935746
KD
1506 .driver = {
1507 .name = S5P_MFC_NAME,
1508 .owner = THIS_MODULE,
b27a23be
AK
1509 .pm = &s5p_mfc_pm_ops,
1510 .of_match_table = exynos_mfc_match,
af935746
KD
1511 },
1512};
1513
1d6629b1 1514module_platform_driver(s5p_mfc_driver);
af935746
KD
1515
1516MODULE_LICENSE("GPL");
1517MODULE_AUTHOR("Kamil Debski <k.debski@samsung.com>");
1518MODULE_DESCRIPTION("Samsung S5P Multi Format Codec V4L2 driver");
1519
This page took 2.319429 seconds and 5 git commands to generate.