drm/vc4: Add support a few more RGB display plane formats.
[deliverable/linux.git] / drivers / gpu / drm / vc4 / vc4_plane.c
CommitLineData
c8b75bca
EA
1/*
2 * Copyright (C) 2015 Broadcom
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9/**
10 * DOC: VC4 plane module
11 *
12 * Each DRM plane is a layer of pixels being scanned out by the HVS.
13 *
14 * At atomic modeset check time, we compute the HVS display element
15 * state that would be necessary for displaying the plane (giving us a
16 * chance to figure out if a plane configuration is invalid), then at
17 * atomic flush time the CRTC will ask us to write our element state
18 * into the region of the HVS that it has allocated for us.
19 */
20
21#include "vc4_drv.h"
22#include "vc4_regs.h"
23#include "drm_atomic_helper.h"
24#include "drm_fb_cma_helper.h"
25#include "drm_plane_helper.h"
26
21af94cf
EA
27enum vc4_scaling_mode {
28 VC4_SCALING_NONE,
29 VC4_SCALING_TPZ,
30 VC4_SCALING_PPF,
31};
32
c8b75bca
EA
33struct vc4_plane_state {
34 struct drm_plane_state base;
f427fb16
EA
35 /* System memory copy of the display list for this element, computed
36 * at atomic_check time.
37 */
c8b75bca 38 u32 *dlist;
f427fb16 39 u32 dlist_size; /* Number of dwords allocated for the display list */
c8b75bca 40 u32 dlist_count; /* Number of used dwords in the display list. */
b501bacc 41
6674a904
EA
42 /* Offset in the dlist to various words, for pageflip or
43 * cursor updates.
44 */
45 u32 pos0_offset;
46 u32 pos2_offset;
47 u32 ptr0_offset;
b501bacc
EA
48
49 /* Offset where the plane's dlist was last stored in the
f427fb16
EA
50 * hardware at vc4_crtc_atomic_flush() time.
51 */
17eac751 52 u32 __iomem *hw_dlist;
5c679994
EA
53
54 /* Clipped coordinates of the plane on the display. */
55 int crtc_x, crtc_y, crtc_w, crtc_h;
21af94cf
EA
56 /* Clipped area being scanned from in the FB. */
57 u32 src_x, src_y, src_w, src_h;
58
59 enum vc4_scaling_mode x_scaling, y_scaling;
60 bool is_unity;
5c679994
EA
61
62 /* Offset to start scanning out from the start of the plane's
63 * BO.
64 */
65 u32 offset;
21af94cf
EA
66
67 /* Our allocation in LBM for temporary storage during scaling. */
68 struct drm_mm_node lbm;
c8b75bca
EA
69};
70
71static inline struct vc4_plane_state *
72to_vc4_plane_state(struct drm_plane_state *state)
73{
74 return (struct vc4_plane_state *)state;
75}
76
77static const struct hvs_format {
78 u32 drm; /* DRM_FORMAT_* */
79 u32 hvs; /* HVS_FORMAT_* */
80 u32 pixel_order;
81 bool has_alpha;
82} hvs_formats[] = {
83 {
84 .drm = DRM_FORMAT_XRGB8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
85 .pixel_order = HVS_PIXEL_ORDER_ABGR, .has_alpha = false,
86 },
87 {
88 .drm = DRM_FORMAT_ARGB8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
89 .pixel_order = HVS_PIXEL_ORDER_ABGR, .has_alpha = true,
90 },
fe4cd847
EA
91 {
92 .drm = DRM_FORMAT_RGB565, .hvs = HVS_PIXEL_FORMAT_RGB565,
93 .pixel_order = HVS_PIXEL_ORDER_XRGB, .has_alpha = false,
94 },
95 {
96 .drm = DRM_FORMAT_BGR565, .hvs = HVS_PIXEL_FORMAT_RGB565,
97 .pixel_order = HVS_PIXEL_ORDER_XBGR, .has_alpha = false,
98 },
99 {
100 .drm = DRM_FORMAT_ARGB1555, .hvs = HVS_PIXEL_FORMAT_RGBA5551,
101 .pixel_order = HVS_PIXEL_ORDER_ABGR, .has_alpha = true,
102 },
103 {
104 .drm = DRM_FORMAT_XRGB1555, .hvs = HVS_PIXEL_FORMAT_RGBA5551,
105 .pixel_order = HVS_PIXEL_ORDER_ABGR, .has_alpha = false,
106 },
c8b75bca
EA
107};
108
109static const struct hvs_format *vc4_get_hvs_format(u32 drm_format)
110{
111 unsigned i;
112
113 for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) {
114 if (hvs_formats[i].drm == drm_format)
115 return &hvs_formats[i];
116 }
117
118 return NULL;
119}
120
21af94cf
EA
121static enum vc4_scaling_mode vc4_get_scaling_mode(u32 src, u32 dst)
122{
123 if (dst > src)
124 return VC4_SCALING_PPF;
125 else if (dst < src)
126 return VC4_SCALING_TPZ;
127 else
128 return VC4_SCALING_NONE;
129}
130
c8b75bca
EA
131static bool plane_enabled(struct drm_plane_state *state)
132{
133 return state->fb && state->crtc;
134}
135
91276ae2 136static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane)
c8b75bca
EA
137{
138 struct vc4_plane_state *vc4_state;
139
140 if (WARN_ON(!plane->state))
141 return NULL;
142
143 vc4_state = kmemdup(plane->state, sizeof(*vc4_state), GFP_KERNEL);
144 if (!vc4_state)
145 return NULL;
146
21af94cf
EA
147 memset(&vc4_state->lbm, 0, sizeof(vc4_state->lbm));
148
c8b75bca
EA
149 __drm_atomic_helper_plane_duplicate_state(plane, &vc4_state->base);
150
151 if (vc4_state->dlist) {
152 vc4_state->dlist = kmemdup(vc4_state->dlist,
153 vc4_state->dlist_count * 4,
154 GFP_KERNEL);
155 if (!vc4_state->dlist) {
156 kfree(vc4_state);
157 return NULL;
158 }
159 vc4_state->dlist_size = vc4_state->dlist_count;
160 }
161
162 return &vc4_state->base;
163}
164
91276ae2 165static void vc4_plane_destroy_state(struct drm_plane *plane,
166 struct drm_plane_state *state)
c8b75bca 167{
21af94cf 168 struct vc4_dev *vc4 = to_vc4_dev(plane->dev);
c8b75bca
EA
169 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
170
21af94cf
EA
171 if (vc4_state->lbm.allocated) {
172 unsigned long irqflags;
173
174 spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
175 drm_mm_remove_node(&vc4_state->lbm);
176 spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
177 }
178
c8b75bca
EA
179 kfree(vc4_state->dlist);
180 __drm_atomic_helper_plane_destroy_state(plane, &vc4_state->base);
181 kfree(state);
182}
183
184/* Called during init to allocate the plane's atomic state. */
91276ae2 185static void vc4_plane_reset(struct drm_plane *plane)
c8b75bca
EA
186{
187 struct vc4_plane_state *vc4_state;
188
189 WARN_ON(plane->state);
190
191 vc4_state = kzalloc(sizeof(*vc4_state), GFP_KERNEL);
192 if (!vc4_state)
193 return;
194
195 plane->state = &vc4_state->base;
196 vc4_state->base.plane = plane;
197}
198
199static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val)
200{
201 if (vc4_state->dlist_count == vc4_state->dlist_size) {
202 u32 new_size = max(4u, vc4_state->dlist_count * 2);
203 u32 *new_dlist = kmalloc(new_size * 4, GFP_KERNEL);
204
205 if (!new_dlist)
206 return;
207 memcpy(new_dlist, vc4_state->dlist, vc4_state->dlist_count * 4);
208
209 kfree(vc4_state->dlist);
210 vc4_state->dlist = new_dlist;
211 vc4_state->dlist_size = new_size;
212 }
213
214 vc4_state->dlist[vc4_state->dlist_count++] = val;
215}
216
21af94cf
EA
217/* Returns the scl0/scl1 field based on whether the dimensions need to
218 * be up/down/non-scaled.
219 *
220 * This is a replication of a table from the spec.
221 */
222static u32 vc4_get_scl_field(struct drm_plane_state *state)
223{
224 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
225
226 switch (vc4_state->x_scaling << 2 | vc4_state->y_scaling) {
227 case VC4_SCALING_PPF << 2 | VC4_SCALING_PPF:
228 return SCALER_CTL0_SCL_H_PPF_V_PPF;
229 case VC4_SCALING_TPZ << 2 | VC4_SCALING_PPF:
230 return SCALER_CTL0_SCL_H_TPZ_V_PPF;
231 case VC4_SCALING_PPF << 2 | VC4_SCALING_TPZ:
232 return SCALER_CTL0_SCL_H_PPF_V_TPZ;
233 case VC4_SCALING_TPZ << 2 | VC4_SCALING_TPZ:
234 return SCALER_CTL0_SCL_H_TPZ_V_TPZ;
235 case VC4_SCALING_PPF << 2 | VC4_SCALING_NONE:
236 return SCALER_CTL0_SCL_H_PPF_V_NONE;
237 case VC4_SCALING_NONE << 2 | VC4_SCALING_PPF:
238 return SCALER_CTL0_SCL_H_NONE_V_PPF;
239 case VC4_SCALING_NONE << 2 | VC4_SCALING_TPZ:
240 return SCALER_CTL0_SCL_H_NONE_V_TPZ;
241 case VC4_SCALING_TPZ << 2 | VC4_SCALING_NONE:
242 return SCALER_CTL0_SCL_H_TPZ_V_NONE;
243 default:
244 case VC4_SCALING_NONE << 2 | VC4_SCALING_NONE:
245 /* The unity case is independently handled by
246 * SCALER_CTL0_UNITY.
247 */
248 return 0;
249 }
250}
251
5c679994 252static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
c8b75bca 253{
21af94cf 254 struct drm_plane *plane = state->plane;
c8b75bca
EA
255 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
256 struct drm_framebuffer *fb = state->fb;
21af94cf 257 u32 subpixel_src_mask = (1 << 16) - 1;
5c679994
EA
258
259 vc4_state->offset = fb->offsets[0];
260
21af94cf
EA
261 /* We don't support subpixel source positioning for scaling. */
262 if ((state->src_x & subpixel_src_mask) ||
263 (state->src_y & subpixel_src_mask) ||
264 (state->src_w & subpixel_src_mask) ||
265 (state->src_h & subpixel_src_mask)) {
bf893acc
EA
266 return -EINVAL;
267 }
268
21af94cf
EA
269 vc4_state->src_x = state->src_x >> 16;
270 vc4_state->src_y = state->src_y >> 16;
f863e356
EA
271 vc4_state->src_w = state->src_w >> 16;
272 vc4_state->src_h = state->src_h >> 16;
273
274 vc4_state->crtc_x = state->crtc_x;
275 vc4_state->crtc_y = state->crtc_y;
276 vc4_state->crtc_w = state->crtc_w;
277 vc4_state->crtc_h = state->crtc_h;
278
21af94cf
EA
279 vc4_state->x_scaling = vc4_get_scaling_mode(vc4_state->src_w,
280 vc4_state->crtc_w);
281 vc4_state->y_scaling = vc4_get_scaling_mode(vc4_state->src_h,
282 vc4_state->crtc_h);
283 vc4_state->is_unity = (vc4_state->x_scaling == VC4_SCALING_NONE &&
284 vc4_state->y_scaling == VC4_SCALING_NONE);
285
286 /* No configuring scaling on the cursor plane, since it gets
287 non-vblank-synced updates, and scaling requires requires
288 LBM changes which have to be vblank-synced.
289 */
290 if (plane->type == DRM_PLANE_TYPE_CURSOR && !vc4_state->is_unity)
291 return -EINVAL;
292
293 /* Clamp the on-screen start x/y to 0. The hardware doesn't
294 * support negative y, and negative x wastes bandwidth.
295 */
5c679994
EA
296 if (vc4_state->crtc_x < 0) {
297 vc4_state->offset += (drm_format_plane_cpp(fb->pixel_format,
298 0) *
299 -vc4_state->crtc_x);
f863e356 300 vc4_state->src_w += vc4_state->crtc_x;
5c679994 301 vc4_state->crtc_x = 0;
c8b75bca
EA
302 }
303
5c679994
EA
304 if (vc4_state->crtc_y < 0) {
305 vc4_state->offset += fb->pitches[0] * -vc4_state->crtc_y;
f863e356 306 vc4_state->src_h += vc4_state->crtc_y;
5c679994 307 vc4_state->crtc_y = 0;
c8b75bca
EA
308 }
309
5c679994
EA
310 return 0;
311}
312
21af94cf
EA
313static void vc4_write_tpz(struct vc4_plane_state *vc4_state, u32 src, u32 dst)
314{
315 u32 scale, recip;
316
317 scale = (1 << 16) * src / dst;
318
319 /* The specs note that while the reciprocal would be defined
320 * as (1<<32)/scale, ~0 is close enough.
321 */
322 recip = ~0 / scale;
323
324 vc4_dlist_write(vc4_state,
325 VC4_SET_FIELD(scale, SCALER_TPZ0_SCALE) |
326 VC4_SET_FIELD(0, SCALER_TPZ0_IPHASE));
327 vc4_dlist_write(vc4_state,
328 VC4_SET_FIELD(recip, SCALER_TPZ1_RECIP));
329}
330
331static void vc4_write_ppf(struct vc4_plane_state *vc4_state, u32 src, u32 dst)
332{
333 u32 scale = (1 << 16) * src / dst;
334
335 vc4_dlist_write(vc4_state,
336 SCALER_PPF_AGC |
337 VC4_SET_FIELD(scale, SCALER_PPF_SCALE) |
338 VC4_SET_FIELD(0, SCALER_PPF_IPHASE));
339}
340
341static u32 vc4_lbm_size(struct drm_plane_state *state)
342{
343 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
344 /* This is the worst case number. One of the two sizes will
345 * be used depending on the scaling configuration.
346 */
347 u32 pix_per_line = max(vc4_state->src_w, (u32)vc4_state->crtc_w);
348 u32 lbm;
349
350 if (vc4_state->is_unity)
351 return 0;
352 else if (vc4_state->y_scaling == VC4_SCALING_TPZ)
353 lbm = pix_per_line * 8;
354 else {
355 /* In special cases, this multiplier might be 12. */
356 lbm = pix_per_line * 16;
357 }
358
359 lbm = roundup(lbm, 32);
360
361 return lbm;
362}
363
364static void vc4_write_scaling_parameters(struct drm_plane_state *state)
365{
366 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
367
368 /* Ch0 H-PPF Word 0: Scaling Parameters */
369 if (vc4_state->x_scaling == VC4_SCALING_PPF) {
370 vc4_write_ppf(vc4_state,
371 vc4_state->src_w, vc4_state->crtc_w);
372 }
373
374 /* Ch0 V-PPF Words 0-1: Scaling Parameters, Context */
375 if (vc4_state->y_scaling == VC4_SCALING_PPF) {
376 vc4_write_ppf(vc4_state,
377 vc4_state->src_h, vc4_state->crtc_h);
378 vc4_dlist_write(vc4_state, 0xc0c0c0c0);
379 }
380
381 /* Ch0 H-TPZ Words 0-1: Scaling Parameters, Recip */
382 if (vc4_state->x_scaling == VC4_SCALING_TPZ) {
383 vc4_write_tpz(vc4_state,
384 vc4_state->src_w, vc4_state->crtc_w);
385 }
386
387 /* Ch0 V-TPZ Words 0-2: Scaling Parameters, Recip, Context */
388 if (vc4_state->y_scaling == VC4_SCALING_TPZ) {
389 vc4_write_tpz(vc4_state,
390 vc4_state->src_h, vc4_state->crtc_h);
391 vc4_dlist_write(vc4_state, 0xc0c0c0c0);
392 }
393}
5c679994
EA
394
395/* Writes out a full display list for an active plane to the plane's
396 * private dlist state.
397 */
398static int vc4_plane_mode_set(struct drm_plane *plane,
399 struct drm_plane_state *state)
400{
21af94cf 401 struct vc4_dev *vc4 = to_vc4_dev(plane->dev);
5c679994
EA
402 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
403 struct drm_framebuffer *fb = state->fb;
404 struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0);
405 u32 ctl0_offset = vc4_state->dlist_count;
406 const struct hvs_format *format = vc4_get_hvs_format(fb->pixel_format);
21af94cf
EA
407 u32 scl;
408 u32 lbm_size;
409 unsigned long irqflags;
5c679994
EA
410 int ret;
411
412 ret = vc4_plane_setup_clipping_and_scaling(state);
413 if (ret)
414 return ret;
415
21af94cf
EA
416 /* Allocate the LBM memory that the HVS will use for temporary
417 * storage due to our scaling/format conversion.
418 */
419 lbm_size = vc4_lbm_size(state);
420 if (lbm_size) {
421 if (!vc4_state->lbm.allocated) {
422 spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
423 ret = drm_mm_insert_node(&vc4->hvs->lbm_mm,
424 &vc4_state->lbm,
425 lbm_size, 32, 0);
426 spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
427 } else {
428 WARN_ON_ONCE(lbm_size != vc4_state->lbm.size);
429 }
430 }
431
432 if (ret)
433 return ret;
434
435 scl = vc4_get_scl_field(state);
436
437 /* Control word */
c8b75bca
EA
438 vc4_dlist_write(vc4_state,
439 SCALER_CTL0_VALID |
440 (format->pixel_order << SCALER_CTL0_ORDER_SHIFT) |
441 (format->hvs << SCALER_CTL0_PIXEL_FORMAT_SHIFT) |
21af94cf
EA
442 (vc4_state->is_unity ? SCALER_CTL0_UNITY : 0) |
443 VC4_SET_FIELD(scl, SCALER_CTL0_SCL0) |
444 VC4_SET_FIELD(scl, SCALER_CTL0_SCL1));
c8b75bca
EA
445
446 /* Position Word 0: Image Positions and Alpha Value */
6674a904 447 vc4_state->pos0_offset = vc4_state->dlist_count;
c8b75bca
EA
448 vc4_dlist_write(vc4_state,
449 VC4_SET_FIELD(0xff, SCALER_POS0_FIXED_ALPHA) |
5c679994
EA
450 VC4_SET_FIELD(vc4_state->crtc_x, SCALER_POS0_START_X) |
451 VC4_SET_FIELD(vc4_state->crtc_y, SCALER_POS0_START_Y));
c8b75bca 452
21af94cf
EA
453 /* Position Word 1: Scaled Image Dimensions. */
454 if (!vc4_state->is_unity) {
455 vc4_dlist_write(vc4_state,
456 VC4_SET_FIELD(vc4_state->crtc_w,
457 SCALER_POS1_SCL_WIDTH) |
458 VC4_SET_FIELD(vc4_state->crtc_h,
459 SCALER_POS1_SCL_HEIGHT));
460 }
c8b75bca
EA
461
462 /* Position Word 2: Source Image Size, Alpha Mode */
6674a904 463 vc4_state->pos2_offset = vc4_state->dlist_count;
c8b75bca
EA
464 vc4_dlist_write(vc4_state,
465 VC4_SET_FIELD(format->has_alpha ?
466 SCALER_POS2_ALPHA_MODE_PIPELINE :
467 SCALER_POS2_ALPHA_MODE_FIXED,
468 SCALER_POS2_ALPHA_MODE) |
f863e356
EA
469 VC4_SET_FIELD(vc4_state->src_w, SCALER_POS2_WIDTH) |
470 VC4_SET_FIELD(vc4_state->src_h, SCALER_POS2_HEIGHT));
c8b75bca
EA
471
472 /* Position Word 3: Context. Written by the HVS. */
473 vc4_dlist_write(vc4_state, 0xc0c0c0c0);
474
475 /* Pointer Word 0: RGB / Y Pointer */
6674a904 476 vc4_state->ptr0_offset = vc4_state->dlist_count;
5c679994 477 vc4_dlist_write(vc4_state, bo->paddr + vc4_state->offset);
c8b75bca
EA
478
479 /* Pointer Context Word 0: Written by the HVS */
480 vc4_dlist_write(vc4_state, 0xc0c0c0c0);
481
482 /* Pitch word 0: Pointer 0 Pitch */
483 vc4_dlist_write(vc4_state,
484 VC4_SET_FIELD(fb->pitches[0], SCALER_SRC_PITCH));
485
21af94cf
EA
486 if (!vc4_state->is_unity) {
487 /* LBM Base Address. */
488 if (vc4_state->y_scaling != VC4_SCALING_NONE)
489 vc4_dlist_write(vc4_state, vc4_state->lbm.start);
490
491 vc4_write_scaling_parameters(state);
492
493 /* If any PPF setup was done, then all the kernel
494 * pointers get uploaded.
495 */
496 if (vc4_state->x_scaling == VC4_SCALING_PPF ||
497 vc4_state->y_scaling == VC4_SCALING_PPF) {
498 u32 kernel = VC4_SET_FIELD(vc4->hvs->mitchell_netravali_filter.start,
499 SCALER_PPF_KERNEL_OFFSET);
500
501 /* HPPF plane 0 */
502 vc4_dlist_write(vc4_state, kernel);
503 /* VPPF plane 0 */
504 vc4_dlist_write(vc4_state, kernel);
505 /* HPPF plane 1 */
506 vc4_dlist_write(vc4_state, kernel);
507 /* VPPF plane 1 */
508 vc4_dlist_write(vc4_state, kernel);
509 }
510 }
511
c8b75bca
EA
512 vc4_state->dlist[ctl0_offset] |=
513 VC4_SET_FIELD(vc4_state->dlist_count, SCALER_CTL0_SIZE);
514
515 return 0;
516}
517
518/* If a modeset involves changing the setup of a plane, the atomic
519 * infrastructure will call this to validate a proposed plane setup.
520 * However, if a plane isn't getting updated, this (and the
521 * corresponding vc4_plane_atomic_update) won't get called. Thus, we
522 * compute the dlist here and have all active plane dlists get updated
523 * in the CRTC's flush.
524 */
525static int vc4_plane_atomic_check(struct drm_plane *plane,
526 struct drm_plane_state *state)
527{
528 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
529
530 vc4_state->dlist_count = 0;
531
532 if (plane_enabled(state))
533 return vc4_plane_mode_set(plane, state);
534 else
535 return 0;
536}
537
538static void vc4_plane_atomic_update(struct drm_plane *plane,
539 struct drm_plane_state *old_state)
540{
541 /* No contents here. Since we don't know where in the CRTC's
542 * dlist we should be stored, our dlist is uploaded to the
543 * hardware with vc4_plane_write_dlist() at CRTC atomic_flush
544 * time.
545 */
546}
547
548u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist)
549{
550 struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
551 int i;
552
b501bacc
EA
553 vc4_state->hw_dlist = dlist;
554
c8b75bca
EA
555 /* Can't memcpy_toio() because it needs to be 32-bit writes. */
556 for (i = 0; i < vc4_state->dlist_count; i++)
557 writel(vc4_state->dlist[i], &dlist[i]);
558
559 return vc4_state->dlist_count;
560}
561
562u32 vc4_plane_dlist_size(struct drm_plane_state *state)
563{
564 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
565
566 return vc4_state->dlist_count;
567}
568
b501bacc
EA
569/* Updates the plane to immediately (well, once the FIFO needs
570 * refilling) scan out from at a new framebuffer.
571 */
572void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
573{
574 struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
575 struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0);
576 uint32_t addr;
577
578 /* We're skipping the address adjustment for negative origin,
579 * because this is only called on the primary plane.
580 */
581 WARN_ON_ONCE(plane->state->crtc_x < 0 || plane->state->crtc_y < 0);
582 addr = bo->paddr + fb->offsets[0];
583
584 /* Write the new address into the hardware immediately. The
585 * scanout will start from this address as soon as the FIFO
586 * needs to refill with pixels.
587 */
6674a904 588 writel(addr, &vc4_state->hw_dlist[vc4_state->ptr0_offset]);
b501bacc
EA
589
590 /* Also update the CPU-side dlist copy, so that any later
591 * atomic updates that don't do a new modeset on our plane
592 * also use our updated address.
593 */
6674a904 594 vc4_state->dlist[vc4_state->ptr0_offset] = addr;
b501bacc
EA
595}
596
c8b75bca
EA
597static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
598 .prepare_fb = NULL,
599 .cleanup_fb = NULL,
600 .atomic_check = vc4_plane_atomic_check,
601 .atomic_update = vc4_plane_atomic_update,
602};
603
604static void vc4_plane_destroy(struct drm_plane *plane)
605{
606 drm_plane_helper_disable(plane);
607 drm_plane_cleanup(plane);
608}
609
6674a904
EA
610/* Implements immediate (non-vblank-synced) updates of the cursor
611 * position, or falls back to the atomic helper otherwise.
612 */
613static int
614vc4_update_plane(struct drm_plane *plane,
615 struct drm_crtc *crtc,
616 struct drm_framebuffer *fb,
617 int crtc_x, int crtc_y,
618 unsigned int crtc_w, unsigned int crtc_h,
619 uint32_t src_x, uint32_t src_y,
620 uint32_t src_w, uint32_t src_h)
621{
622 struct drm_plane_state *plane_state;
623 struct vc4_plane_state *vc4_state;
624
625 if (plane != crtc->cursor)
626 goto out;
627
628 plane_state = plane->state;
629 vc4_state = to_vc4_plane_state(plane_state);
630
631 if (!plane_state)
632 goto out;
633
634 /* If we're changing the cursor contents, do that in the
635 * normal vblank-synced atomic path.
636 */
637 if (fb != plane_state->fb)
638 goto out;
639
640 /* No configuring new scaling in the fast path. */
641 if (crtc_w != plane_state->crtc_w ||
642 crtc_h != plane_state->crtc_h ||
643 src_w != plane_state->src_w ||
644 src_h != plane_state->src_h) {
645 goto out;
646 }
647
648 /* Set the cursor's position on the screen. This is the
649 * expected change from the drm_mode_cursor_universal()
650 * helper.
651 */
652 plane_state->crtc_x = crtc_x;
653 plane_state->crtc_y = crtc_y;
654
655 /* Allow changing the start position within the cursor BO, if
656 * that matters.
657 */
658 plane_state->src_x = src_x;
659 plane_state->src_y = src_y;
660
661 /* Update the display list based on the new crtc_x/y. */
662 vc4_plane_atomic_check(plane, plane_state);
663
664 /* Note that we can't just call vc4_plane_write_dlist()
665 * because that would smash the context data that the HVS is
666 * currently using.
667 */
668 writel(vc4_state->dlist[vc4_state->pos0_offset],
669 &vc4_state->hw_dlist[vc4_state->pos0_offset]);
670 writel(vc4_state->dlist[vc4_state->pos2_offset],
671 &vc4_state->hw_dlist[vc4_state->pos2_offset]);
672 writel(vc4_state->dlist[vc4_state->ptr0_offset],
673 &vc4_state->hw_dlist[vc4_state->ptr0_offset]);
674
675 return 0;
676
677out:
678 return drm_atomic_helper_update_plane(plane, crtc, fb,
679 crtc_x, crtc_y,
680 crtc_w, crtc_h,
681 src_x, src_y,
682 src_w, src_h);
683}
684
c8b75bca 685static const struct drm_plane_funcs vc4_plane_funcs = {
6674a904 686 .update_plane = vc4_update_plane,
c8b75bca
EA
687 .disable_plane = drm_atomic_helper_disable_plane,
688 .destroy = vc4_plane_destroy,
689 .set_property = NULL,
690 .reset = vc4_plane_reset,
691 .atomic_duplicate_state = vc4_plane_duplicate_state,
692 .atomic_destroy_state = vc4_plane_destroy_state,
693};
694
695struct drm_plane *vc4_plane_init(struct drm_device *dev,
696 enum drm_plane_type type)
697{
698 struct drm_plane *plane = NULL;
699 struct vc4_plane *vc4_plane;
700 u32 formats[ARRAY_SIZE(hvs_formats)];
701 int ret = 0;
702 unsigned i;
703
704 vc4_plane = devm_kzalloc(dev->dev, sizeof(*vc4_plane),
705 GFP_KERNEL);
706 if (!vc4_plane) {
707 ret = -ENOMEM;
708 goto fail;
709 }
710
711 for (i = 0; i < ARRAY_SIZE(hvs_formats); i++)
712 formats[i] = hvs_formats[i].drm;
713 plane = &vc4_plane->base;
714 ret = drm_universal_plane_init(dev, plane, 0xff,
715 &vc4_plane_funcs,
716 formats, ARRAY_SIZE(formats),
b0b3b795 717 type, NULL);
c8b75bca
EA
718
719 drm_plane_helper_add(plane, &vc4_plane_helper_funcs);
720
721 return plane;
722fail:
723 if (plane)
724 vc4_plane_destroy(plane);
725
726 return ERR_PTR(ret);
727}
This page took 0.068414 seconds and 5 git commands to generate.