2 * Copyright (C) STMicroelectronics SA 2014
3 * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
4 * Fabien Dessenne <fabien.dessenne@st.com>
5 * for STMicroelectronics.
6 * License terms: GNU General Public License (GPL), version 2
10 #include <linux/dma-mapping.h>
13 #include "sti_layer.h"
16 #define ENA_COLOR_FILL BIT(8)
17 #define WAIT_NEXT_VSYNC BIT(31)
19 /* GDP color formats */
20 #define GDP_RGB565 0x00
21 #define GDP_RGB888 0x01
22 #define GDP_RGB888_32 0x02
23 #define GDP_ARGB8565 0x04
24 #define GDP_ARGB8888 0x05
25 #define GDP_ARGB1555 0x06
26 #define GDP_ARGB4444 0x07
27 #define GDP_CLUT8 0x0B
28 #define GDP_YCBR888 0x10
29 #define GDP_YCBR422R 0x12
30 #define GDP_AYCBR8888 0x15
32 #define GAM_GDP_CTL_OFFSET 0x00
33 #define GAM_GDP_AGC_OFFSET 0x04
34 #define GAM_GDP_VPO_OFFSET 0x0C
35 #define GAM_GDP_VPS_OFFSET 0x10
36 #define GAM_GDP_PML_OFFSET 0x14
37 #define GAM_GDP_PMP_OFFSET 0x18
38 #define GAM_GDP_SIZE_OFFSET 0x1C
39 #define GAM_GDP_NVN_OFFSET 0x24
40 #define GAM_GDP_KEY1_OFFSET 0x28
41 #define GAM_GDP_KEY2_OFFSET 0x2C
42 #define GAM_GDP_PPT_OFFSET 0x34
43 #define GAM_GDP_CML_OFFSET 0x3C
44 #define GAM_GDP_MST_OFFSET 0x68
46 #define GAM_GDP_ALPHARANGE_255 BIT(5)
47 #define GAM_GDP_AGC_FULL_RANGE 0x00808080
48 #define GAM_GDP_PPT_IGNORE (BIT(1) | BIT(0))
49 #define GAM_GDP_SIZE_MAX 0x7FF
51 #define GDP_NODE_NB_BANK 2
52 #define GDP_NODE_PER_FIELD 2
73 struct sti_gdp_node_list
{
74 struct sti_gdp_node
*top_field
;
75 struct sti_gdp_node
*btm_field
;
81 * @layer: layer structure
82 * @clk_pix: pixel clock for the current gdp
83 * @vtg_field_nb: callback for VTG FIELD (top or bottom) notification
84 * @is_curr_top: true if the current node processed is the top field
85 * @node_list: array of node list
88 struct sti_layer layer
;
90 struct notifier_block vtg_field_nb
;
92 struct sti_gdp_node_list node_list
[GDP_NODE_NB_BANK
];
95 #define to_sti_gdp(x) container_of(x, struct sti_gdp, layer)
97 static const uint32_t gdp_supported_formats
[] = {
110 static const uint32_t *sti_gdp_get_formats(struct sti_layer
*layer
)
112 return gdp_supported_formats
;
115 static unsigned int sti_gdp_get_nb_formats(struct sti_layer
*layer
)
117 return ARRAY_SIZE(gdp_supported_formats
);
120 static int sti_gdp_fourcc2format(int fourcc
)
123 case DRM_FORMAT_XRGB8888
:
124 return GDP_RGB888_32
;
125 case DRM_FORMAT_ARGB8888
:
127 case DRM_FORMAT_ARGB4444
:
129 case DRM_FORMAT_ARGB1555
:
131 case DRM_FORMAT_RGB565
:
133 case DRM_FORMAT_RGB888
:
135 case DRM_FORMAT_AYUV
:
136 return GDP_AYCBR8888
;
137 case DRM_FORMAT_YUV444
:
139 case DRM_FORMAT_VYUY
:
147 static int sti_gdp_get_alpharange(int format
)
153 return GAM_GDP_ALPHARANGE_255
;
159 * sti_gdp_get_free_nodes
162 * Look for a GDP node list that is not currently read by the HW.
165 * Pointer to the free GDP node list
167 static struct sti_gdp_node_list
*sti_gdp_get_free_nodes(struct sti_layer
*layer
)
171 struct sti_gdp
*gdp
= to_sti_gdp(layer
);
174 hw_nvn
= readl(layer
->regs
+ GAM_GDP_NVN_OFFSET
);
178 virt_nvn
= dma_to_virt(layer
->dev
, (dma_addr_t
) hw_nvn
);
180 for (i
= 0; i
< GDP_NODE_NB_BANK
; i
++)
181 if ((virt_nvn
!= gdp
->node_list
[i
].btm_field
) &&
182 (virt_nvn
!= gdp
->node_list
[i
].top_field
))
183 return &gdp
->node_list
[i
];
186 return &gdp
->node_list
[0];
190 * sti_gdp_get_current_nodes
193 * Look for GDP nodes that are currently read by the HW.
196 * Pointer to the current GDP node list
199 struct sti_gdp_node_list
*sti_gdp_get_current_nodes(struct sti_layer
*layer
)
203 struct sti_gdp
*gdp
= to_sti_gdp(layer
);
206 hw_nvn
= readl(layer
->regs
+ GAM_GDP_NVN_OFFSET
);
210 virt_nvn
= dma_to_virt(layer
->dev
, (dma_addr_t
) hw_nvn
);
212 for (i
= 0; i
< GDP_NODE_NB_BANK
; i
++)
213 if ((virt_nvn
== gdp
->node_list
[i
].btm_field
) ||
214 (virt_nvn
== gdp
->node_list
[i
].top_field
))
215 return &gdp
->node_list
[i
];
222 * sti_gdp_prepare_layer
224 * @first_prepare: true if it is the first time this function is called
226 * Update the free GDP node list according to the layer properties.
231 static int sti_gdp_prepare_layer(struct sti_layer
*layer
, bool first_prepare
)
233 struct sti_gdp_node_list
*list
;
234 struct sti_gdp_node
*top_field
, *btm_field
;
235 struct drm_display_mode
*mode
= layer
->mode
;
236 struct device
*dev
= layer
->dev
;
237 struct sti_gdp
*gdp
= to_sti_gdp(layer
);
239 unsigned int depth
, bpp
;
240 int rate
= mode
->clock
* 1000;
242 u32 ydo
, xdo
, yds
, xds
;
244 list
= sti_gdp_get_free_nodes(layer
);
245 top_field
= list
->top_field
;
246 btm_field
= list
->btm_field
;
248 /* Build the top field from layer params */
249 top_field
->gam_gdp_agc
= GAM_GDP_AGC_FULL_RANGE
;
250 top_field
->gam_gdp_ctl
= WAIT_NEXT_VSYNC
;
251 format
= sti_gdp_fourcc2format(layer
->format
);
253 DRM_ERROR("Format not supported by GDP %.4s\n",
254 (char *)&layer
->format
);
257 top_field
->gam_gdp_ctl
|= format
;
258 top_field
->gam_gdp_ctl
|= sti_gdp_get_alpharange(format
);
259 top_field
->gam_gdp_ppt
&= ~GAM_GDP_PPT_IGNORE
;
261 /* pixel memory location */
262 drm_fb_get_bpp_depth(layer
->format
, &depth
, &bpp
);
263 top_field
->gam_gdp_pml
= (u32
) layer
->paddr
+ layer
->offsets
[0];
264 top_field
->gam_gdp_pml
+= layer
->src_x
* (bpp
>> 3);
265 top_field
->gam_gdp_pml
+= layer
->src_y
* layer
->pitches
[0];
267 /* input parameters */
268 top_field
->gam_gdp_pmp
= layer
->pitches
[0];
269 top_field
->gam_gdp_size
=
270 clamp_val(layer
->src_h
, 0, GAM_GDP_SIZE_MAX
) << 16 |
271 clamp_val(layer
->src_w
, 0, GAM_GDP_SIZE_MAX
);
273 /* output parameters */
274 ydo
= sti_vtg_get_line_number(*mode
, layer
->dst_y
);
275 yds
= sti_vtg_get_line_number(*mode
, layer
->dst_y
+ layer
->dst_h
- 1);
276 xdo
= sti_vtg_get_pixel_number(*mode
, layer
->dst_x
);
277 xds
= sti_vtg_get_pixel_number(*mode
, layer
->dst_x
+ layer
->dst_w
- 1);
278 top_field
->gam_gdp_vpo
= (ydo
<< 16) | xdo
;
279 top_field
->gam_gdp_vps
= (yds
<< 16) | xds
;
281 /* Same content and chained together */
282 memcpy(btm_field
, top_field
, sizeof(*btm_field
));
283 top_field
->gam_gdp_nvn
= virt_to_dma(dev
, btm_field
);
284 btm_field
->gam_gdp_nvn
= virt_to_dma(dev
, top_field
);
286 /* Interlaced mode */
287 if (layer
->mode
->flags
& DRM_MODE_FLAG_INTERLACE
)
288 btm_field
->gam_gdp_pml
= top_field
->gam_gdp_pml
+
292 /* Set and enable gdp clock */
294 res
= clk_set_rate(gdp
->clk_pix
, rate
);
296 DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
301 if (clk_prepare_enable(gdp
->clk_pix
)) {
302 DRM_ERROR("Failed to prepare/enable gdp\n");
312 * sti_gdp_commit_layer
315 * Update the NVN field of the 'right' field of the current GDP node (being
316 * used by the HW) with the address of the updated ('free') top field GDP node.
317 * - In interlaced mode the 'right' field is the bottom field as we update
318 * frames starting from their top field
319 * - In progressive mode, we update both bottom and top fields which are
321 * At the next VSYNC, the updated node list will be used by the HW.
326 static int sti_gdp_commit_layer(struct sti_layer
*layer
)
328 struct sti_gdp_node_list
*updated_list
= sti_gdp_get_free_nodes(layer
);
329 struct sti_gdp_node
*updated_top_node
= updated_list
->top_field
;
330 struct sti_gdp_node
*updated_btm_node
= updated_list
->btm_field
;
331 struct sti_gdp
*gdp
= to_sti_gdp(layer
);
332 u32 dma_updated_top
= virt_to_dma(layer
->dev
, updated_top_node
);
333 u32 dma_updated_btm
= virt_to_dma(layer
->dev
, updated_btm_node
);
334 struct sti_gdp_node_list
*curr_list
= sti_gdp_get_current_nodes(layer
);
336 dev_dbg(layer
->dev
, "Current NVN:0x%X\n",
337 readl(layer
->regs
+ GAM_GDP_NVN_OFFSET
));
338 dev_dbg(layer
->dev
, "Posted buff: %lx current buff: %x\n",
339 (unsigned long)layer
->paddr
,
340 readl(layer
->regs
+ GAM_GDP_PML_OFFSET
));
342 if (curr_list
== NULL
) {
343 /* First update or invalid node should directly write in the
345 writel(gdp
->is_curr_top
== true ?
346 dma_updated_btm
: dma_updated_top
,
347 layer
->regs
+ GAM_GDP_NVN_OFFSET
);
351 if (layer
->mode
->flags
& DRM_MODE_FLAG_INTERLACE
) {
352 if (gdp
->is_curr_top
== true) {
353 /* Do not update in the middle of the frame, but
354 * postpone the update after the bottom field has
356 curr_list
->btm_field
->gam_gdp_nvn
= dma_updated_top
;
358 /* Direct update to avoid one frame delay */
359 writel(dma_updated_top
,
360 layer
->regs
+ GAM_GDP_NVN_OFFSET
);
363 /* Direct update for progressive to avoid one frame delay */
364 writel(dma_updated_top
, layer
->regs
+ GAM_GDP_NVN_OFFSET
);
371 * sti_gdp_disable_layer
379 static int sti_gdp_disable_layer(struct sti_layer
*layer
)
382 struct sti_gdp
*gdp
= to_sti_gdp(layer
);
384 /* Set the nodes as 'to be ignored on mixer' */
385 for (i
= 0; i
< GDP_NODE_NB_BANK
; i
++) {
386 gdp
->node_list
[i
].top_field
->gam_gdp_ppt
|= GAM_GDP_PPT_IGNORE
;
387 gdp
->node_list
[i
].btm_field
->gam_gdp_ppt
|= GAM_GDP_PPT_IGNORE
;
391 clk_disable_unprepare(gdp
->clk_pix
);
398 * @nb: notifier block
399 * @event: event message
400 * @data: private data
402 * Handle VTG top field and bottom field event.
407 int sti_gdp_field_cb(struct notifier_block
*nb
,
408 unsigned long event
, void *data
)
410 struct sti_gdp
*gdp
= container_of(nb
, struct sti_gdp
, vtg_field_nb
);
413 case VTG_TOP_FIELD_EVENT
:
414 gdp
->is_curr_top
= true;
416 case VTG_BOTTOM_FIELD_EVENT
:
417 gdp
->is_curr_top
= false;
420 DRM_ERROR("unsupported event: %lu\n", event
);
427 static void sti_gdp_init(struct sti_layer
*layer
)
429 struct sti_gdp
*gdp
= to_sti_gdp(layer
);
430 struct device_node
*np
= layer
->dev
->of_node
;
433 unsigned int i
, size
;
435 /* Allocate all the nodes within a single memory page */
436 size
= sizeof(struct sti_gdp_node
) *
437 GDP_NODE_PER_FIELD
* GDP_NODE_NB_BANK
;
439 base
= dma_alloc_writecombine(layer
->dev
,
440 size
, &dma
, GFP_KERNEL
| GFP_DMA
);
442 DRM_ERROR("Failed to allocate memory for GDP node\n");
445 memset(base
, 0, size
);
447 for (i
= 0; i
< GDP_NODE_NB_BANK
; i
++) {
448 if (virt_to_dma(layer
->dev
, base
) & 0xF) {
449 DRM_ERROR("Mem alignment failed\n");
452 gdp
->node_list
[i
].top_field
= base
;
453 DRM_DEBUG_DRIVER("node[%d].top_field=%p\n", i
, base
);
454 base
+= sizeof(struct sti_gdp_node
);
456 if (virt_to_dma(layer
->dev
, base
) & 0xF) {
457 DRM_ERROR("Mem alignment failed\n");
460 gdp
->node_list
[i
].btm_field
= base
;
461 DRM_DEBUG_DRIVER("node[%d].btm_field=%p\n", i
, base
);
462 base
+= sizeof(struct sti_gdp_node
);
465 if (of_device_is_compatible(np
, "st,stih407-compositor")) {
466 /* GDP of STiH407 chip have its own pixel clock */
469 switch (layer
->desc
) {
471 clk_name
= "pix_gdp1";
474 clk_name
= "pix_gdp2";
477 clk_name
= "pix_gdp3";
480 clk_name
= "pix_gdp4";
483 DRM_ERROR("GDP id not recognized\n");
487 gdp
->clk_pix
= devm_clk_get(layer
->dev
, clk_name
);
488 if (IS_ERR(gdp
->clk_pix
))
489 DRM_ERROR("Cannot get %s clock\n", clk_name
);
493 static const struct sti_layer_funcs gdp_ops
= {
494 .get_formats
= sti_gdp_get_formats
,
495 .get_nb_formats
= sti_gdp_get_nb_formats
,
496 .init
= sti_gdp_init
,
497 .prepare
= sti_gdp_prepare_layer
,
498 .commit
= sti_gdp_commit_layer
,
499 .disable
= sti_gdp_disable_layer
,
502 struct sti_layer
*sti_gdp_create(struct device
*dev
, int id
)
506 gdp
= devm_kzalloc(dev
, sizeof(*gdp
), GFP_KERNEL
);
508 DRM_ERROR("Failed to allocate memory for GDP\n");
512 gdp
->layer
.ops
= &gdp_ops
;
513 gdp
->vtg_field_nb
.notifier_call
= sti_gdp_field_cb
;
515 return (struct sti_layer
*)gdp
;